1 /* 2 * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // no precompiled headers 26 #include "classfile/vmSymbols.hpp" 27 #include "gc/shared/collectedHeap.hpp" 28 #include "interpreter/bytecodeHistogram.hpp" 29 #include "interpreter/bytecodeInterpreter.hpp" 30 #include "interpreter/bytecodeInterpreter.inline.hpp" 31 #include "interpreter/bytecodeInterpreterProfiling.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "interpreter/interpreterRuntime.hpp" 34 #include "logging/log.hpp" 35 #include "memory/resourceArea.hpp" 36 #include "oops/constantPool.inline.hpp" 37 #include "oops/cpCache.inline.hpp" 38 #include "oops/method.inline.hpp" 39 #include "oops/methodCounters.hpp" 40 #include "oops/objArrayKlass.hpp" 41 #include "oops/objArrayOop.inline.hpp" 42 #include "oops/oop.inline.hpp" 43 #include "oops/typeArrayOop.inline.hpp" 44 #include "prims/jvmtiExport.hpp" 45 #include "prims/jvmtiThreadState.hpp" 46 #include "runtime/atomic.hpp" 47 #include "runtime/biasedLocking.hpp" 48 #include "runtime/frame.inline.hpp" 49 #include "runtime/handles.inline.hpp" 50 #include "runtime/interfaceSupport.inline.hpp" 51 #include "runtime/orderAccess.inline.hpp" 52 #include "runtime/sharedRuntime.hpp" 53 #include "runtime/threadCritical.hpp" 54 #include "utilities/exceptions.hpp" 55 56 // no precompiled headers 57 #ifdef CC_INTERP 58 59 /* 60 * USELABELS - If using GCC, then use labels for the opcode dispatching 61 * rather -then a switch statement. This improves performance because it 62 * gives us the opportunity to have the instructions that calculate the 63 * next opcode to jump to be intermixed with the rest of the instructions 64 * that implement the opcode (see UPDATE_PC_AND_TOS_AND_CONTINUE macro). 65 */ 66 #undef USELABELS 67 #ifdef __GNUC__ 68 /* 69 ASSERT signifies debugging. It is much easier to step thru bytecodes if we 70 don't use the computed goto approach. 71 */ 72 #ifndef ASSERT 73 #define USELABELS 74 #endif 75 #endif 76 77 #undef CASE 78 #ifdef USELABELS 79 #define CASE(opcode) opc ## opcode 80 #define DEFAULT opc_default 81 #else 82 #define CASE(opcode) case Bytecodes:: opcode 83 #define DEFAULT default 84 #endif 85 86 /* 87 * PREFETCH_OPCCODE - Some compilers do better if you prefetch the next 88 * opcode before going back to the top of the while loop, rather then having 89 * the top of the while loop handle it. This provides a better opportunity 90 * for instruction scheduling. Some compilers just do this prefetch 91 * automatically. Some actually end up with worse performance if you 92 * force the prefetch. Solaris gcc seems to do better, but cc does worse. 93 */ 94 #undef PREFETCH_OPCCODE 95 #define PREFETCH_OPCCODE 96 97 /* 98 Interpreter safepoint: it is expected that the interpreter will have no live 99 handles of its own creation live at an interpreter safepoint. Therefore we 100 run a HandleMarkCleaner and trash all handles allocated in the call chain 101 since the JavaCalls::call_helper invocation that initiated the chain. 102 There really shouldn't be any handles remaining to trash but this is cheap 103 in relation to a safepoint. 104 */ 105 #define SAFEPOINT \ 106 { \ 107 /* zap freed handles rather than GC'ing them */ \ 108 HandleMarkCleaner __hmc(THREAD); \ 109 CALL_VM(SafepointMechanism::block_if_requested(THREAD), handle_exception); \ 110 } 111 112 /* 113 * VM_JAVA_ERROR - Macro for throwing a java exception from 114 * the interpreter loop. Should really be a CALL_VM but there 115 * is no entry point to do the transition to vm so we just 116 * do it by hand here. 117 */ 118 #define VM_JAVA_ERROR_NO_JUMP(name, msg, note_a_trap) \ 119 DECACHE_STATE(); \ 120 SET_LAST_JAVA_FRAME(); \ 121 { \ 122 InterpreterRuntime::note_a_trap(THREAD, istate->method(), BCI()); \ 123 ThreadInVMfromJava trans(THREAD); \ 124 Exceptions::_throw_msg(THREAD, __FILE__, __LINE__, name, msg); \ 125 } \ 126 RESET_LAST_JAVA_FRAME(); \ 127 CACHE_STATE(); 128 129 // Normal throw of a java error. 130 #define VM_JAVA_ERROR(name, msg, note_a_trap) \ 131 VM_JAVA_ERROR_NO_JUMP(name, msg, note_a_trap) \ 132 goto handle_exception; 133 134 #ifdef PRODUCT 135 #define DO_UPDATE_INSTRUCTION_COUNT(opcode) 136 #else 137 #define DO_UPDATE_INSTRUCTION_COUNT(opcode) \ 138 { \ 139 BytecodeCounter::_counter_value++; \ 140 BytecodeHistogram::_counters[(Bytecodes::Code)opcode]++; \ 141 if (StopInterpreterAt && StopInterpreterAt == BytecodeCounter::_counter_value) os::breakpoint(); \ 142 if (TraceBytecodes) { \ 143 CALL_VM((void)InterpreterRuntime::trace_bytecode(THREAD, 0, \ 144 topOfStack[Interpreter::expr_index_at(1)], \ 145 topOfStack[Interpreter::expr_index_at(2)]), \ 146 handle_exception); \ 147 } \ 148 } 149 #endif 150 151 #undef DEBUGGER_SINGLE_STEP_NOTIFY 152 #ifdef VM_JVMTI 153 /* NOTE: (kbr) This macro must be called AFTER the PC has been 154 incremented. JvmtiExport::at_single_stepping_point() may cause a 155 breakpoint opcode to get inserted at the current PC to allow the 156 debugger to coalesce single-step events. 157 158 As a result if we call at_single_stepping_point() we refetch opcode 159 to get the current opcode. This will override any other prefetching 160 that might have occurred. 161 */ 162 #define DEBUGGER_SINGLE_STEP_NOTIFY() \ 163 { \ 164 if (_jvmti_interp_events) { \ 165 if (JvmtiExport::should_post_single_step()) { \ 166 DECACHE_STATE(); \ 167 SET_LAST_JAVA_FRAME(); \ 168 ThreadInVMfromJava trans(THREAD); \ 169 JvmtiExport::at_single_stepping_point(THREAD, \ 170 istate->method(), \ 171 pc); \ 172 RESET_LAST_JAVA_FRAME(); \ 173 CACHE_STATE(); \ 174 if (THREAD->pop_frame_pending() && \ 175 !THREAD->pop_frame_in_process()) { \ 176 goto handle_Pop_Frame; \ 177 } \ 178 if (THREAD->jvmti_thread_state() && \ 179 THREAD->jvmti_thread_state()->is_earlyret_pending()) { \ 180 goto handle_Early_Return; \ 181 } \ 182 opcode = *pc; \ 183 } \ 184 } \ 185 } 186 #else 187 #define DEBUGGER_SINGLE_STEP_NOTIFY() 188 #endif 189 190 /* 191 * CONTINUE - Macro for executing the next opcode. 192 */ 193 #undef CONTINUE 194 #ifdef USELABELS 195 // Have to do this dispatch this way in C++ because otherwise gcc complains about crossing an 196 // initialization (which is is the initialization of the table pointer...) 197 #define DISPATCH(opcode) goto *(void*)dispatch_table[opcode] 198 #define CONTINUE { \ 199 opcode = *pc; \ 200 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 201 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 202 DISPATCH(opcode); \ 203 } 204 #else 205 #ifdef PREFETCH_OPCCODE 206 #define CONTINUE { \ 207 opcode = *pc; \ 208 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 209 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 210 continue; \ 211 } 212 #else 213 #define CONTINUE { \ 214 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 215 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 216 continue; \ 217 } 218 #endif 219 #endif 220 221 222 #define UPDATE_PC(opsize) {pc += opsize; } 223 /* 224 * UPDATE_PC_AND_TOS - Macro for updating the pc and topOfStack. 225 */ 226 #undef UPDATE_PC_AND_TOS 227 #define UPDATE_PC_AND_TOS(opsize, stack) \ 228 {pc += opsize; MORE_STACK(stack); } 229 230 /* 231 * UPDATE_PC_AND_TOS_AND_CONTINUE - Macro for updating the pc and topOfStack, 232 * and executing the next opcode. It's somewhat similar to the combination 233 * of UPDATE_PC_AND_TOS and CONTINUE, but with some minor optimizations. 234 */ 235 #undef UPDATE_PC_AND_TOS_AND_CONTINUE 236 #ifdef USELABELS 237 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \ 238 pc += opsize; opcode = *pc; MORE_STACK(stack); \ 239 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 240 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 241 DISPATCH(opcode); \ 242 } 243 244 #define UPDATE_PC_AND_CONTINUE(opsize) { \ 245 pc += opsize; opcode = *pc; \ 246 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 247 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 248 DISPATCH(opcode); \ 249 } 250 #else 251 #ifdef PREFETCH_OPCCODE 252 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \ 253 pc += opsize; opcode = *pc; MORE_STACK(stack); \ 254 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 255 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 256 goto do_continue; \ 257 } 258 259 #define UPDATE_PC_AND_CONTINUE(opsize) { \ 260 pc += opsize; opcode = *pc; \ 261 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 262 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 263 goto do_continue; \ 264 } 265 #else 266 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \ 267 pc += opsize; MORE_STACK(stack); \ 268 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 269 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 270 goto do_continue; \ 271 } 272 273 #define UPDATE_PC_AND_CONTINUE(opsize) { \ 274 pc += opsize; \ 275 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 276 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 277 goto do_continue; \ 278 } 279 #endif /* PREFETCH_OPCCODE */ 280 #endif /* USELABELS */ 281 282 // About to call a new method, update the save the adjusted pc and return to frame manager 283 #define UPDATE_PC_AND_RETURN(opsize) \ 284 DECACHE_TOS(); \ 285 istate->set_bcp(pc+opsize); \ 286 return; 287 288 289 #define METHOD istate->method() 290 #define GET_METHOD_COUNTERS(res) \ 291 res = METHOD->method_counters(); \ 292 if (res == NULL) { \ 293 CALL_VM(res = InterpreterRuntime::build_method_counters(THREAD, METHOD), handle_exception); \ 294 } 295 296 #define OSR_REQUEST(res, branch_pc) \ 297 CALL_VM(res=InterpreterRuntime::frequency_counter_overflow(THREAD, branch_pc), handle_exception); 298 /* 299 * For those opcodes that need to have a GC point on a backwards branch 300 */ 301 302 // Backedge counting is kind of strange. The asm interpreter will increment 303 // the backedge counter as a separate counter but it does it's comparisons 304 // to the sum (scaled) of invocation counter and backedge count to make 305 // a decision. Seems kind of odd to sum them together like that 306 307 // skip is delta from current bcp/bci for target, branch_pc is pre-branch bcp 308 309 310 #define DO_BACKEDGE_CHECKS(skip, branch_pc) \ 311 if ((skip) <= 0) { \ 312 MethodCounters* mcs; \ 313 GET_METHOD_COUNTERS(mcs); \ 314 if (UseLoopCounter) { \ 315 bool do_OSR = UseOnStackReplacement; \ 316 mcs->backedge_counter()->increment(); \ 317 if (ProfileInterpreter) { \ 318 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); \ 319 /* Check for overflow against MDO count. */ \ 320 do_OSR = do_OSR \ 321 && (mdo_last_branch_taken_count >= (uint)InvocationCounter::InterpreterBackwardBranchLimit)\ 322 /* When ProfileInterpreter is on, the backedge_count comes */ \ 323 /* from the methodDataOop, which value does not get reset on */ \ 324 /* the call to frequency_counter_overflow(). To avoid */ \ 325 /* excessive calls to the overflow routine while the method is */ \ 326 /* being compiled, add a second test to make sure the overflow */ \ 327 /* function is called only once every overflow_frequency. */ \ 328 && (!(mdo_last_branch_taken_count & 1023)); \ 329 } else { \ 330 /* check for overflow of backedge counter */ \ 331 do_OSR = do_OSR \ 332 && mcs->invocation_counter()->reached_InvocationLimit(mcs->backedge_counter()); \ 333 } \ 334 if (do_OSR) { \ 335 nmethod* osr_nmethod; \ 336 OSR_REQUEST(osr_nmethod, branch_pc); \ 337 if (osr_nmethod != NULL && osr_nmethod->is_in_use()) { \ 338 intptr_t* buf; \ 339 /* Call OSR migration with last java frame only, no checks. */ \ 340 CALL_VM_NAKED_LJF(buf=SharedRuntime::OSR_migration_begin(THREAD)); \ 341 istate->set_msg(do_osr); \ 342 istate->set_osr_buf((address)buf); \ 343 istate->set_osr_entry(osr_nmethod->osr_entry()); \ 344 return; \ 345 } \ 346 } \ 347 } /* UseCompiler ... */ \ 348 SAFEPOINT; \ 349 } 350 351 /* 352 * For those opcodes that need to have a GC point on a backwards branch 353 */ 354 355 /* 356 * Macros for caching and flushing the interpreter state. Some local 357 * variables need to be flushed out to the frame before we do certain 358 * things (like pushing frames or becomming gc safe) and some need to 359 * be recached later (like after popping a frame). We could use one 360 * macro to cache or decache everything, but this would be less then 361 * optimal because we don't always need to cache or decache everything 362 * because some things we know are already cached or decached. 363 */ 364 #undef DECACHE_TOS 365 #undef CACHE_TOS 366 #undef CACHE_PREV_TOS 367 #define DECACHE_TOS() istate->set_stack(topOfStack); 368 369 #define CACHE_TOS() topOfStack = (intptr_t *)istate->stack(); 370 371 #undef DECACHE_PC 372 #undef CACHE_PC 373 #define DECACHE_PC() istate->set_bcp(pc); 374 #define CACHE_PC() pc = istate->bcp(); 375 #define CACHE_CP() cp = istate->constants(); 376 #define CACHE_LOCALS() locals = istate->locals(); 377 #undef CACHE_FRAME 378 #define CACHE_FRAME() 379 380 // BCI() returns the current bytecode-index. 381 #undef BCI 382 #define BCI() ((int)(intptr_t)(pc - (intptr_t)istate->method()->code_base())) 383 384 /* 385 * CHECK_NULL - Macro for throwing a NullPointerException if the object 386 * passed is a null ref. 387 * On some architectures/platforms it should be possible to do this implicitly 388 */ 389 #undef CHECK_NULL 390 #define CHECK_NULL(obj_) \ 391 if ((obj_) == NULL) { \ 392 VM_JAVA_ERROR(vmSymbols::java_lang_NullPointerException(), NULL, note_nullCheck_trap); \ 393 } \ 394 VERIFY_OOP(obj_) 395 396 #define VMdoubleConstZero() 0.0 397 #define VMdoubleConstOne() 1.0 398 #define VMlongConstZero() (max_jlong-max_jlong) 399 #define VMlongConstOne() ((max_jlong-max_jlong)+1) 400 401 /* 402 * Alignment 403 */ 404 #define VMalignWordUp(val) (((uintptr_t)(val) + 3) & ~3) 405 406 // Decache the interpreter state that interpreter modifies directly (i.e. GC is indirect mod) 407 #define DECACHE_STATE() DECACHE_PC(); DECACHE_TOS(); 408 409 // Reload interpreter state after calling the VM or a possible GC 410 #define CACHE_STATE() \ 411 CACHE_TOS(); \ 412 CACHE_PC(); \ 413 CACHE_CP(); \ 414 CACHE_LOCALS(); 415 416 // Call the VM with last java frame only. 417 #define CALL_VM_NAKED_LJF(func) \ 418 DECACHE_STATE(); \ 419 SET_LAST_JAVA_FRAME(); \ 420 func; \ 421 RESET_LAST_JAVA_FRAME(); \ 422 CACHE_STATE(); 423 424 // Call the VM. Don't check for pending exceptions. 425 #define CALL_VM_NOCHECK(func) \ 426 CALL_VM_NAKED_LJF(func) \ 427 if (THREAD->pop_frame_pending() && \ 428 !THREAD->pop_frame_in_process()) { \ 429 goto handle_Pop_Frame; \ 430 } \ 431 if (THREAD->jvmti_thread_state() && \ 432 THREAD->jvmti_thread_state()->is_earlyret_pending()) { \ 433 goto handle_Early_Return; \ 434 } 435 436 // Call the VM and check for pending exceptions 437 #define CALL_VM(func, label) { \ 438 CALL_VM_NOCHECK(func); \ 439 if (THREAD->has_pending_exception()) goto label; \ 440 } 441 442 /* 443 * BytecodeInterpreter::run(interpreterState istate) 444 * BytecodeInterpreter::runWithChecks(interpreterState istate) 445 * 446 * The real deal. This is where byte codes actually get interpreted. 447 * Basically it's a big while loop that iterates until we return from 448 * the method passed in. 449 * 450 * The runWithChecks is used if JVMTI is enabled. 451 * 452 */ 453 #if defined(VM_JVMTI) 454 void 455 BytecodeInterpreter::runWithChecks(interpreterState istate) { 456 #else 457 void 458 BytecodeInterpreter::run(interpreterState istate) { 459 #endif 460 461 // In order to simplify some tests based on switches set at runtime 462 // we invoke the interpreter a single time after switches are enabled 463 // and set simpler to to test variables rather than method calls or complex 464 // boolean expressions. 465 466 static int initialized = 0; 467 static int checkit = 0; 468 static intptr_t* c_addr = NULL; 469 static intptr_t c_value; 470 471 if (checkit && *c_addr != c_value) { 472 os::breakpoint(); 473 } 474 #ifdef VM_JVMTI 475 static bool _jvmti_interp_events = 0; 476 #endif 477 478 static int _compiling; // (UseCompiler || CountCompiledCalls) 479 480 #ifdef ASSERT 481 if (istate->_msg != initialize) { 482 assert(labs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + 1), "bad stack limit"); 483 IA32_ONLY(assert(istate->_stack_limit == istate->_thread->last_Java_sp() + 1, "wrong")); 484 } 485 // Verify linkages. 486 interpreterState l = istate; 487 do { 488 assert(l == l->_self_link, "bad link"); 489 l = l->_prev_link; 490 } while (l != NULL); 491 // Screwups with stack management usually cause us to overwrite istate 492 // save a copy so we can verify it. 493 interpreterState orig = istate; 494 #endif 495 496 register intptr_t* topOfStack = (intptr_t *)istate->stack(); /* access with STACK macros */ 497 register address pc = istate->bcp(); 498 register jubyte opcode; 499 register intptr_t* locals = istate->locals(); 500 register ConstantPoolCache* cp = istate->constants(); // method()->constants()->cache() 501 #ifdef LOTS_OF_REGS 502 register JavaThread* THREAD = istate->thread(); 503 #else 504 #undef THREAD 505 #define THREAD istate->thread() 506 #endif 507 508 #ifdef USELABELS 509 const static void* const opclabels_data[256] = { 510 /* 0x00 */ &&opc_nop, &&opc_aconst_null,&&opc_iconst_m1,&&opc_iconst_0, 511 /* 0x04 */ &&opc_iconst_1,&&opc_iconst_2, &&opc_iconst_3, &&opc_iconst_4, 512 /* 0x08 */ &&opc_iconst_5,&&opc_lconst_0, &&opc_lconst_1, &&opc_fconst_0, 513 /* 0x0C */ &&opc_fconst_1,&&opc_fconst_2, &&opc_dconst_0, &&opc_dconst_1, 514 515 /* 0x10 */ &&opc_bipush, &&opc_sipush, &&opc_ldc, &&opc_ldc_w, 516 /* 0x14 */ &&opc_ldc2_w, &&opc_iload, &&opc_lload, &&opc_fload, 517 /* 0x18 */ &&opc_dload, &&opc_aload, &&opc_iload_0,&&opc_iload_1, 518 /* 0x1C */ &&opc_iload_2,&&opc_iload_3,&&opc_lload_0,&&opc_lload_1, 519 520 /* 0x20 */ &&opc_lload_2,&&opc_lload_3,&&opc_fload_0,&&opc_fload_1, 521 /* 0x24 */ &&opc_fload_2,&&opc_fload_3,&&opc_dload_0,&&opc_dload_1, 522 /* 0x28 */ &&opc_dload_2,&&opc_dload_3,&&opc_aload_0,&&opc_aload_1, 523 /* 0x2C */ &&opc_aload_2,&&opc_aload_3,&&opc_iaload, &&opc_laload, 524 525 /* 0x30 */ &&opc_faload, &&opc_daload, &&opc_aaload, &&opc_baload, 526 /* 0x34 */ &&opc_caload, &&opc_saload, &&opc_istore, &&opc_lstore, 527 /* 0x38 */ &&opc_fstore, &&opc_dstore, &&opc_astore, &&opc_istore_0, 528 /* 0x3C */ &&opc_istore_1,&&opc_istore_2,&&opc_istore_3,&&opc_lstore_0, 529 530 /* 0x40 */ &&opc_lstore_1,&&opc_lstore_2,&&opc_lstore_3,&&opc_fstore_0, 531 /* 0x44 */ &&opc_fstore_1,&&opc_fstore_2,&&opc_fstore_3,&&opc_dstore_0, 532 /* 0x48 */ &&opc_dstore_1,&&opc_dstore_2,&&opc_dstore_3,&&opc_astore_0, 533 /* 0x4C */ &&opc_astore_1,&&opc_astore_2,&&opc_astore_3,&&opc_iastore, 534 535 /* 0x50 */ &&opc_lastore,&&opc_fastore,&&opc_dastore,&&opc_aastore, 536 /* 0x54 */ &&opc_bastore,&&opc_castore,&&opc_sastore,&&opc_pop, 537 /* 0x58 */ &&opc_pop2, &&opc_dup, &&opc_dup_x1, &&opc_dup_x2, 538 /* 0x5C */ &&opc_dup2, &&opc_dup2_x1,&&opc_dup2_x2,&&opc_swap, 539 540 /* 0x60 */ &&opc_iadd,&&opc_ladd,&&opc_fadd,&&opc_dadd, 541 /* 0x64 */ &&opc_isub,&&opc_lsub,&&opc_fsub,&&opc_dsub, 542 /* 0x68 */ &&opc_imul,&&opc_lmul,&&opc_fmul,&&opc_dmul, 543 /* 0x6C */ &&opc_idiv,&&opc_ldiv,&&opc_fdiv,&&opc_ddiv, 544 545 /* 0x70 */ &&opc_irem, &&opc_lrem, &&opc_frem,&&opc_drem, 546 /* 0x74 */ &&opc_ineg, &&opc_lneg, &&opc_fneg,&&opc_dneg, 547 /* 0x78 */ &&opc_ishl, &&opc_lshl, &&opc_ishr,&&opc_lshr, 548 /* 0x7C */ &&opc_iushr,&&opc_lushr,&&opc_iand,&&opc_land, 549 550 /* 0x80 */ &&opc_ior, &&opc_lor,&&opc_ixor,&&opc_lxor, 551 /* 0x84 */ &&opc_iinc,&&opc_i2l,&&opc_i2f, &&opc_i2d, 552 /* 0x88 */ &&opc_l2i, &&opc_l2f,&&opc_l2d, &&opc_f2i, 553 /* 0x8C */ &&opc_f2l, &&opc_f2d,&&opc_d2i, &&opc_d2l, 554 555 /* 0x90 */ &&opc_d2f, &&opc_i2b, &&opc_i2c, &&opc_i2s, 556 /* 0x94 */ &&opc_lcmp, &&opc_fcmpl,&&opc_fcmpg,&&opc_dcmpl, 557 /* 0x98 */ &&opc_dcmpg,&&opc_ifeq, &&opc_ifne, &&opc_iflt, 558 /* 0x9C */ &&opc_ifge, &&opc_ifgt, &&opc_ifle, &&opc_if_icmpeq, 559 560 /* 0xA0 */ &&opc_if_icmpne,&&opc_if_icmplt,&&opc_if_icmpge, &&opc_if_icmpgt, 561 /* 0xA4 */ &&opc_if_icmple,&&opc_if_acmpeq,&&opc_if_acmpne, &&opc_goto, 562 /* 0xA8 */ &&opc_jsr, &&opc_ret, &&opc_tableswitch,&&opc_lookupswitch, 563 /* 0xAC */ &&opc_ireturn, &&opc_lreturn, &&opc_freturn, &&opc_dreturn, 564 565 /* 0xB0 */ &&opc_areturn, &&opc_return, &&opc_getstatic, &&opc_putstatic, 566 /* 0xB4 */ &&opc_getfield, &&opc_putfield, &&opc_invokevirtual,&&opc_invokespecial, 567 /* 0xB8 */ &&opc_invokestatic,&&opc_invokeinterface,&&opc_invokedynamic,&&opc_new, 568 /* 0xBC */ &&opc_newarray, &&opc_anewarray, &&opc_arraylength, &&opc_athrow, 569 570 /* 0xC0 */ &&opc_checkcast, &&opc_instanceof, &&opc_monitorenter, &&opc_monitorexit, 571 /* 0xC4 */ &&opc_wide, &&opc_multianewarray, &&opc_ifnull, &&opc_ifnonnull, 572 /* 0xC8 */ &&opc_goto_w, &&opc_jsr_w, &&opc_breakpoint, &&opc_default, 573 /* 0xCC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 574 575 /* 0xD0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 576 /* 0xD4 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 577 /* 0xD8 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 578 /* 0xDC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 579 580 /* 0xE0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 581 /* 0xE4 */ &&opc_default, &&opc_default, &&opc_fast_aldc, &&opc_fast_aldc_w, 582 /* 0xE8 */ &&opc_return_register_finalizer, 583 &&opc_invokehandle, &&opc_default, &&opc_default, 584 /* 0xEC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 585 586 /* 0xF0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 587 /* 0xF4 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 588 /* 0xF8 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 589 /* 0xFC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default 590 }; 591 register uintptr_t *dispatch_table = (uintptr_t*)&opclabels_data[0]; 592 #endif /* USELABELS */ 593 594 #ifdef ASSERT 595 // this will trigger a VERIFY_OOP on entry 596 if (istate->msg() != initialize && ! METHOD->is_static()) { 597 oop rcvr = LOCALS_OBJECT(0); 598 VERIFY_OOP(rcvr); 599 } 600 #endif 601 602 /* QQQ this should be a stack method so we don't know actual direction */ 603 guarantee(istate->msg() == initialize || 604 topOfStack >= istate->stack_limit() && 605 topOfStack < istate->stack_base(), 606 "Stack top out of range"); 607 608 #ifdef CC_INTERP_PROFILE 609 // MethodData's last branch taken count. 610 uint mdo_last_branch_taken_count = 0; 611 #else 612 const uint mdo_last_branch_taken_count = 0; 613 #endif 614 615 switch (istate->msg()) { 616 case initialize: { 617 if (initialized++) ShouldNotReachHere(); // Only one initialize call. 618 _compiling = (UseCompiler || CountCompiledCalls); 619 #ifdef VM_JVMTI 620 _jvmti_interp_events = JvmtiExport::can_post_interpreter_events(); 621 #endif 622 return; 623 } 624 break; 625 case method_entry: { 626 THREAD->set_do_not_unlock(); 627 // count invocations 628 assert(initialized, "Interpreter not initialized"); 629 if (_compiling) { 630 MethodCounters* mcs; 631 GET_METHOD_COUNTERS(mcs); 632 #if COMPILER2_OR_JVMCI 633 if (ProfileInterpreter) { 634 METHOD->increment_interpreter_invocation_count(THREAD); 635 } 636 #endif 637 mcs->invocation_counter()->increment(); 638 if (mcs->invocation_counter()->reached_InvocationLimit(mcs->backedge_counter())) { 639 CALL_VM((void)InterpreterRuntime::frequency_counter_overflow(THREAD, NULL), handle_exception); 640 // We no longer retry on a counter overflow. 641 } 642 // Get or create profile data. Check for pending (async) exceptions. 643 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); 644 SAFEPOINT; 645 } 646 647 if ((istate->_stack_base - istate->_stack_limit) != istate->method()->max_stack() + 1) { 648 // initialize 649 os::breakpoint(); 650 } 651 652 // Lock method if synchronized. 653 if (METHOD->is_synchronized()) { 654 // oop rcvr = locals[0].j.r; 655 oop rcvr; 656 if (METHOD->is_static()) { 657 rcvr = METHOD->constants()->pool_holder()->java_mirror(); 658 } else { 659 rcvr = LOCALS_OBJECT(0); 660 VERIFY_OOP(rcvr); 661 } 662 // The initial monitor is ours for the taking. 663 // Monitor not filled in frame manager any longer as this caused race condition with biased locking. 664 BasicObjectLock* mon = &istate->monitor_base()[-1]; 665 mon->set_obj(rcvr); 666 bool success = false; 667 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place; 668 markOop mark = rcvr->mark(); 669 intptr_t hash = (intptr_t) markOopDesc::no_hash; 670 // Implies UseBiasedLocking. 671 if (mark->has_bias_pattern()) { 672 uintptr_t thread_ident; 673 uintptr_t anticipated_bias_locking_value; 674 thread_ident = (uintptr_t)istate->thread(); 675 anticipated_bias_locking_value = 676 (((uintptr_t)rcvr->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) & 677 ~((uintptr_t) markOopDesc::age_mask_in_place); 678 679 if (anticipated_bias_locking_value == 0) { 680 // Already biased towards this thread, nothing to do. 681 if (PrintBiasedLockingStatistics) { 682 (* BiasedLocking::biased_lock_entry_count_addr())++; 683 } 684 success = true; 685 } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) { 686 // Try to revoke bias. 687 markOop header = rcvr->klass()->prototype_header(); 688 if (hash != markOopDesc::no_hash) { 689 header = header->copy_set_hash(hash); 690 } 691 if (Atomic::cmpxchg(header, rcvr->mark_addr(), mark) == mark) { 692 if (PrintBiasedLockingStatistics) 693 (*BiasedLocking::revoked_lock_entry_count_addr())++; 694 } 695 } else if ((anticipated_bias_locking_value & epoch_mask_in_place) != 0) { 696 // Try to rebias. 697 markOop new_header = (markOop) ( (intptr_t) rcvr->klass()->prototype_header() | thread_ident); 698 if (hash != markOopDesc::no_hash) { 699 new_header = new_header->copy_set_hash(hash); 700 } 701 if (Atomic::cmpxchg(new_header, rcvr->mark_addr(), mark) == mark) { 702 if (PrintBiasedLockingStatistics) { 703 (* BiasedLocking::rebiased_lock_entry_count_addr())++; 704 } 705 } else { 706 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception); 707 } 708 success = true; 709 } else { 710 // Try to bias towards thread in case object is anonymously biased. 711 markOop header = (markOop) ((uintptr_t) mark & 712 ((uintptr_t)markOopDesc::biased_lock_mask_in_place | 713 (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place)); 714 if (hash != markOopDesc::no_hash) { 715 header = header->copy_set_hash(hash); 716 } 717 markOop new_header = (markOop) ((uintptr_t) header | thread_ident); 718 // Debugging hint. 719 DEBUG_ONLY(mon->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);) 720 if (Atomic::cmpxchg(new_header, rcvr->mark_addr(), header) == header) { 721 if (PrintBiasedLockingStatistics) { 722 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++; 723 } 724 } else { 725 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception); 726 } 727 success = true; 728 } 729 } 730 731 // Traditional lightweight locking. 732 if (!success) { 733 markOop displaced = rcvr->mark()->set_unlocked(); 734 mon->lock()->set_displaced_header(displaced); 735 bool call_vm = UseHeavyMonitors; 736 if (call_vm || Atomic::cmpxchg((markOop)mon, rcvr->mark_addr(), displaced) != displaced) { 737 // Is it simple recursive case? 738 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { 739 mon->lock()->set_displaced_header(NULL); 740 } else { 741 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception); 742 } 743 } 744 } 745 } 746 THREAD->clr_do_not_unlock(); 747 748 // Notify jvmti 749 #ifdef VM_JVMTI 750 if (_jvmti_interp_events) { 751 // Whenever JVMTI puts a thread in interp_only_mode, method 752 // entry/exit events are sent for that thread to track stack depth. 753 if (THREAD->is_interp_only_mode()) { 754 CALL_VM(InterpreterRuntime::post_method_entry(THREAD), 755 handle_exception); 756 } 757 } 758 #endif /* VM_JVMTI */ 759 760 goto run; 761 } 762 763 case popping_frame: { 764 // returned from a java call to pop the frame, restart the call 765 // clear the message so we don't confuse ourselves later 766 assert(THREAD->pop_frame_in_process(), "wrong frame pop state"); 767 istate->set_msg(no_request); 768 if (_compiling) { 769 // Set MDX back to the ProfileData of the invoke bytecode that will be 770 // restarted. 771 SET_MDX(NULL); 772 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); 773 } 774 THREAD->clr_pop_frame_in_process(); 775 goto run; 776 } 777 778 case method_resume: { 779 if ((istate->_stack_base - istate->_stack_limit) != istate->method()->max_stack() + 1) { 780 // resume 781 os::breakpoint(); 782 } 783 // returned from a java call, continue executing. 784 if (THREAD->pop_frame_pending() && !THREAD->pop_frame_in_process()) { 785 goto handle_Pop_Frame; 786 } 787 if (THREAD->jvmti_thread_state() && 788 THREAD->jvmti_thread_state()->is_earlyret_pending()) { 789 goto handle_Early_Return; 790 } 791 792 if (THREAD->has_pending_exception()) goto handle_exception; 793 // Update the pc by the saved amount of the invoke bytecode size 794 UPDATE_PC(istate->bcp_advance()); 795 796 if (_compiling) { 797 // Get or create profile data. Check for pending (async) exceptions. 798 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); 799 } 800 goto run; 801 } 802 803 case deopt_resume2: { 804 // Returned from an opcode that will reexecute. Deopt was 805 // a result of a PopFrame request. 806 // 807 808 if (_compiling) { 809 // Get or create profile data. Check for pending (async) exceptions. 810 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); 811 } 812 goto run; 813 } 814 815 case deopt_resume: { 816 // Returned from an opcode that has completed. The stack has 817 // the result all we need to do is skip across the bytecode 818 // and continue (assuming there is no exception pending) 819 // 820 // compute continuation length 821 // 822 // Note: it is possible to deopt at a return_register_finalizer opcode 823 // because this requires entering the vm to do the registering. While the 824 // opcode is complete we can't advance because there are no more opcodes 825 // much like trying to deopt at a poll return. In that has we simply 826 // get out of here 827 // 828 if ( Bytecodes::code_at(METHOD, pc) == Bytecodes::_return_register_finalizer) { 829 // this will do the right thing even if an exception is pending. 830 goto handle_return; 831 } 832 UPDATE_PC(Bytecodes::length_at(METHOD, pc)); 833 if (THREAD->has_pending_exception()) goto handle_exception; 834 835 if (_compiling) { 836 // Get or create profile data. Check for pending (async) exceptions. 837 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); 838 } 839 goto run; 840 } 841 case got_monitors: { 842 // continue locking now that we have a monitor to use 843 // we expect to find newly allocated monitor at the "top" of the monitor stack. 844 oop lockee = STACK_OBJECT(-1); 845 VERIFY_OOP(lockee); 846 // derefing's lockee ought to provoke implicit null check 847 // find a free monitor 848 BasicObjectLock* entry = (BasicObjectLock*) istate->stack_base(); 849 assert(entry->obj() == NULL, "Frame manager didn't allocate the monitor"); 850 entry->set_obj(lockee); 851 bool success = false; 852 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place; 853 854 markOop mark = lockee->mark(); 855 intptr_t hash = (intptr_t) markOopDesc::no_hash; 856 // implies UseBiasedLocking 857 if (mark->has_bias_pattern()) { 858 uintptr_t thread_ident; 859 uintptr_t anticipated_bias_locking_value; 860 thread_ident = (uintptr_t)istate->thread(); 861 anticipated_bias_locking_value = 862 (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) & 863 ~((uintptr_t) markOopDesc::age_mask_in_place); 864 865 if (anticipated_bias_locking_value == 0) { 866 // already biased towards this thread, nothing to do 867 if (PrintBiasedLockingStatistics) { 868 (* BiasedLocking::biased_lock_entry_count_addr())++; 869 } 870 success = true; 871 } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) { 872 // try revoke bias 873 markOop header = lockee->klass()->prototype_header(); 874 if (hash != markOopDesc::no_hash) { 875 header = header->copy_set_hash(hash); 876 } 877 if (Atomic::cmpxchg(header, lockee->mark_addr(), mark) == mark) { 878 if (PrintBiasedLockingStatistics) { 879 (*BiasedLocking::revoked_lock_entry_count_addr())++; 880 } 881 } 882 } else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) { 883 // try rebias 884 markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident); 885 if (hash != markOopDesc::no_hash) { 886 new_header = new_header->copy_set_hash(hash); 887 } 888 if (Atomic::cmpxchg(new_header, lockee->mark_addr(), mark) == mark) { 889 if (PrintBiasedLockingStatistics) { 890 (* BiasedLocking::rebiased_lock_entry_count_addr())++; 891 } 892 } else { 893 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 894 } 895 success = true; 896 } else { 897 // try to bias towards thread in case object is anonymously biased 898 markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place | 899 (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place)); 900 if (hash != markOopDesc::no_hash) { 901 header = header->copy_set_hash(hash); 902 } 903 markOop new_header = (markOop) ((uintptr_t) header | thread_ident); 904 // debugging hint 905 DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);) 906 if (Atomic::cmpxchg(new_header, lockee->mark_addr(), header) == header) { 907 if (PrintBiasedLockingStatistics) { 908 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++; 909 } 910 } else { 911 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 912 } 913 success = true; 914 } 915 } 916 917 // traditional lightweight locking 918 if (!success) { 919 markOop displaced = lockee->mark()->set_unlocked(); 920 entry->lock()->set_displaced_header(displaced); 921 bool call_vm = UseHeavyMonitors; 922 if (call_vm || Atomic::cmpxchg((markOop)entry, lockee->mark_addr(), displaced) != displaced) { 923 // Is it simple recursive case? 924 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { 925 entry->lock()->set_displaced_header(NULL); 926 } else { 927 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 928 } 929 } 930 } 931 UPDATE_PC_AND_TOS(1, -1); 932 goto run; 933 } 934 default: { 935 fatal("Unexpected message from frame manager"); 936 } 937 } 938 939 run: 940 941 DO_UPDATE_INSTRUCTION_COUNT(*pc) 942 DEBUGGER_SINGLE_STEP_NOTIFY(); 943 #ifdef PREFETCH_OPCCODE 944 opcode = *pc; /* prefetch first opcode */ 945 #endif 946 947 #ifndef USELABELS 948 while (1) 949 #endif 950 { 951 #ifndef PREFETCH_OPCCODE 952 opcode = *pc; 953 #endif 954 // Seems like this happens twice per opcode. At worst this is only 955 // need at entry to the loop. 956 // DEBUGGER_SINGLE_STEP_NOTIFY(); 957 /* Using this labels avoids double breakpoints when quickening and 958 * when returing from transition frames. 959 */ 960 opcode_switch: 961 assert(istate == orig, "Corrupted istate"); 962 /* QQQ Hmm this has knowledge of direction, ought to be a stack method */ 963 assert(topOfStack >= istate->stack_limit(), "Stack overrun"); 964 assert(topOfStack < istate->stack_base(), "Stack underrun"); 965 966 #ifdef USELABELS 967 DISPATCH(opcode); 968 #else 969 switch (opcode) 970 #endif 971 { 972 CASE(_nop): 973 UPDATE_PC_AND_CONTINUE(1); 974 975 /* Push miscellaneous constants onto the stack. */ 976 977 CASE(_aconst_null): 978 SET_STACK_OBJECT(NULL, 0); 979 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 980 981 #undef OPC_CONST_n 982 #define OPC_CONST_n(opcode, const_type, value) \ 983 CASE(opcode): \ 984 SET_STACK_ ## const_type(value, 0); \ 985 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 986 987 OPC_CONST_n(_iconst_m1, INT, -1); 988 OPC_CONST_n(_iconst_0, INT, 0); 989 OPC_CONST_n(_iconst_1, INT, 1); 990 OPC_CONST_n(_iconst_2, INT, 2); 991 OPC_CONST_n(_iconst_3, INT, 3); 992 OPC_CONST_n(_iconst_4, INT, 4); 993 OPC_CONST_n(_iconst_5, INT, 5); 994 OPC_CONST_n(_fconst_0, FLOAT, 0.0); 995 OPC_CONST_n(_fconst_1, FLOAT, 1.0); 996 OPC_CONST_n(_fconst_2, FLOAT, 2.0); 997 998 #undef OPC_CONST2_n 999 #define OPC_CONST2_n(opcname, value, key, kind) \ 1000 CASE(_##opcname): \ 1001 { \ 1002 SET_STACK_ ## kind(VM##key##Const##value(), 1); \ 1003 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); \ 1004 } 1005 OPC_CONST2_n(dconst_0, Zero, double, DOUBLE); 1006 OPC_CONST2_n(dconst_1, One, double, DOUBLE); 1007 OPC_CONST2_n(lconst_0, Zero, long, LONG); 1008 OPC_CONST2_n(lconst_1, One, long, LONG); 1009 1010 /* Load constant from constant pool: */ 1011 1012 /* Push a 1-byte signed integer value onto the stack. */ 1013 CASE(_bipush): 1014 SET_STACK_INT((jbyte)(pc[1]), 0); 1015 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1); 1016 1017 /* Push a 2-byte signed integer constant onto the stack. */ 1018 CASE(_sipush): 1019 SET_STACK_INT((int16_t)Bytes::get_Java_u2(pc + 1), 0); 1020 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1); 1021 1022 /* load from local variable */ 1023 1024 CASE(_aload): 1025 VERIFY_OOP(LOCALS_OBJECT(pc[1])); 1026 SET_STACK_OBJECT(LOCALS_OBJECT(pc[1]), 0); 1027 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1); 1028 1029 CASE(_iload): 1030 CASE(_fload): 1031 SET_STACK_SLOT(LOCALS_SLOT(pc[1]), 0); 1032 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1); 1033 1034 CASE(_lload): 1035 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(pc[1]), 1); 1036 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 2); 1037 1038 CASE(_dload): 1039 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_DOUBLE_AT(pc[1]), 1); 1040 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 2); 1041 1042 #undef OPC_LOAD_n 1043 #define OPC_LOAD_n(num) \ 1044 CASE(_aload_##num): \ 1045 VERIFY_OOP(LOCALS_OBJECT(num)); \ 1046 SET_STACK_OBJECT(LOCALS_OBJECT(num), 0); \ 1047 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); \ 1048 \ 1049 CASE(_iload_##num): \ 1050 CASE(_fload_##num): \ 1051 SET_STACK_SLOT(LOCALS_SLOT(num), 0); \ 1052 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); \ 1053 \ 1054 CASE(_lload_##num): \ 1055 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(num), 1); \ 1056 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); \ 1057 CASE(_dload_##num): \ 1058 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_DOUBLE_AT(num), 1); \ 1059 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1060 1061 OPC_LOAD_n(0); 1062 OPC_LOAD_n(1); 1063 OPC_LOAD_n(2); 1064 OPC_LOAD_n(3); 1065 1066 /* store to a local variable */ 1067 1068 CASE(_astore): 1069 astore(topOfStack, -1, locals, pc[1]); 1070 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -1); 1071 1072 CASE(_istore): 1073 CASE(_fstore): 1074 SET_LOCALS_SLOT(STACK_SLOT(-1), pc[1]); 1075 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -1); 1076 1077 CASE(_lstore): 1078 SET_LOCALS_LONG(STACK_LONG(-1), pc[1]); 1079 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -2); 1080 1081 CASE(_dstore): 1082 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), pc[1]); 1083 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -2); 1084 1085 CASE(_wide): { 1086 uint16_t reg = Bytes::get_Java_u2(pc + 2); 1087 1088 opcode = pc[1]; 1089 1090 // Wide and it's sub-bytecode are counted as separate instructions. If we 1091 // don't account for this here, the bytecode trace skips the next bytecode. 1092 DO_UPDATE_INSTRUCTION_COUNT(opcode); 1093 1094 switch(opcode) { 1095 case Bytecodes::_aload: 1096 VERIFY_OOP(LOCALS_OBJECT(reg)); 1097 SET_STACK_OBJECT(LOCALS_OBJECT(reg), 0); 1098 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1); 1099 1100 case Bytecodes::_iload: 1101 case Bytecodes::_fload: 1102 SET_STACK_SLOT(LOCALS_SLOT(reg), 0); 1103 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1); 1104 1105 case Bytecodes::_lload: 1106 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(reg), 1); 1107 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2); 1108 1109 case Bytecodes::_dload: 1110 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_LONG_AT(reg), 1); 1111 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2); 1112 1113 case Bytecodes::_astore: 1114 astore(topOfStack, -1, locals, reg); 1115 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -1); 1116 1117 case Bytecodes::_istore: 1118 case Bytecodes::_fstore: 1119 SET_LOCALS_SLOT(STACK_SLOT(-1), reg); 1120 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -1); 1121 1122 case Bytecodes::_lstore: 1123 SET_LOCALS_LONG(STACK_LONG(-1), reg); 1124 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2); 1125 1126 case Bytecodes::_dstore: 1127 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), reg); 1128 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2); 1129 1130 case Bytecodes::_iinc: { 1131 int16_t offset = (int16_t)Bytes::get_Java_u2(pc+4); 1132 // Be nice to see what this generates.... QQQ 1133 SET_LOCALS_INT(LOCALS_INT(reg) + offset, reg); 1134 UPDATE_PC_AND_CONTINUE(6); 1135 } 1136 case Bytecodes::_ret: 1137 // Profile ret. 1138 BI_PROFILE_UPDATE_RET(/*bci=*/((int)(intptr_t)(LOCALS_ADDR(reg)))); 1139 // Now, update the pc. 1140 pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(reg)); 1141 UPDATE_PC_AND_CONTINUE(0); 1142 default: 1143 VM_JAVA_ERROR(vmSymbols::java_lang_InternalError(), "undefined opcode", note_no_trap); 1144 } 1145 } 1146 1147 1148 #undef OPC_STORE_n 1149 #define OPC_STORE_n(num) \ 1150 CASE(_astore_##num): \ 1151 astore(topOfStack, -1, locals, num); \ 1152 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1153 CASE(_istore_##num): \ 1154 CASE(_fstore_##num): \ 1155 SET_LOCALS_SLOT(STACK_SLOT(-1), num); \ 1156 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1157 1158 OPC_STORE_n(0); 1159 OPC_STORE_n(1); 1160 OPC_STORE_n(2); 1161 OPC_STORE_n(3); 1162 1163 #undef OPC_DSTORE_n 1164 #define OPC_DSTORE_n(num) \ 1165 CASE(_dstore_##num): \ 1166 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), num); \ 1167 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \ 1168 CASE(_lstore_##num): \ 1169 SET_LOCALS_LONG(STACK_LONG(-1), num); \ 1170 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); 1171 1172 OPC_DSTORE_n(0); 1173 OPC_DSTORE_n(1); 1174 OPC_DSTORE_n(2); 1175 OPC_DSTORE_n(3); 1176 1177 /* stack pop, dup, and insert opcodes */ 1178 1179 1180 CASE(_pop): /* Discard the top item on the stack */ 1181 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1182 1183 1184 CASE(_pop2): /* Discard the top 2 items on the stack */ 1185 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); 1186 1187 1188 CASE(_dup): /* Duplicate the top item on the stack */ 1189 dup(topOfStack); 1190 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1191 1192 CASE(_dup2): /* Duplicate the top 2 items on the stack */ 1193 dup2(topOfStack); 1194 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1195 1196 CASE(_dup_x1): /* insert top word two down */ 1197 dup_x1(topOfStack); 1198 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1199 1200 CASE(_dup_x2): /* insert top word three down */ 1201 dup_x2(topOfStack); 1202 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1203 1204 CASE(_dup2_x1): /* insert top 2 slots three down */ 1205 dup2_x1(topOfStack); 1206 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1207 1208 CASE(_dup2_x2): /* insert top 2 slots four down */ 1209 dup2_x2(topOfStack); 1210 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1211 1212 CASE(_swap): { /* swap top two elements on the stack */ 1213 swap(topOfStack); 1214 UPDATE_PC_AND_CONTINUE(1); 1215 } 1216 1217 /* Perform various binary integer operations */ 1218 1219 #undef OPC_INT_BINARY 1220 #define OPC_INT_BINARY(opcname, opname, test) \ 1221 CASE(_i##opcname): \ 1222 if (test && (STACK_INT(-1) == 0)) { \ 1223 VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \ 1224 "/ by zero", note_div0Check_trap); \ 1225 } \ 1226 SET_STACK_INT(VMint##opname(STACK_INT(-2), \ 1227 STACK_INT(-1)), \ 1228 -2); \ 1229 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1230 CASE(_l##opcname): \ 1231 { \ 1232 if (test) { \ 1233 jlong l1 = STACK_LONG(-1); \ 1234 if (VMlongEqz(l1)) { \ 1235 VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \ 1236 "/ by long zero", note_div0Check_trap); \ 1237 } \ 1238 } \ 1239 /* First long at (-1,-2) next long at (-3,-4) */ \ 1240 SET_STACK_LONG(VMlong##opname(STACK_LONG(-3), \ 1241 STACK_LONG(-1)), \ 1242 -3); \ 1243 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \ 1244 } 1245 1246 OPC_INT_BINARY(add, Add, 0); 1247 OPC_INT_BINARY(sub, Sub, 0); 1248 OPC_INT_BINARY(mul, Mul, 0); 1249 OPC_INT_BINARY(and, And, 0); 1250 OPC_INT_BINARY(or, Or, 0); 1251 OPC_INT_BINARY(xor, Xor, 0); 1252 OPC_INT_BINARY(div, Div, 1); 1253 OPC_INT_BINARY(rem, Rem, 1); 1254 1255 1256 /* Perform various binary floating number operations */ 1257 /* On some machine/platforms/compilers div zero check can be implicit */ 1258 1259 #undef OPC_FLOAT_BINARY 1260 #define OPC_FLOAT_BINARY(opcname, opname) \ 1261 CASE(_d##opcname): { \ 1262 SET_STACK_DOUBLE(VMdouble##opname(STACK_DOUBLE(-3), \ 1263 STACK_DOUBLE(-1)), \ 1264 -3); \ 1265 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \ 1266 } \ 1267 CASE(_f##opcname): \ 1268 SET_STACK_FLOAT(VMfloat##opname(STACK_FLOAT(-2), \ 1269 STACK_FLOAT(-1)), \ 1270 -2); \ 1271 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1272 1273 1274 OPC_FLOAT_BINARY(add, Add); 1275 OPC_FLOAT_BINARY(sub, Sub); 1276 OPC_FLOAT_BINARY(mul, Mul); 1277 OPC_FLOAT_BINARY(div, Div); 1278 OPC_FLOAT_BINARY(rem, Rem); 1279 1280 /* Shift operations 1281 * Shift left int and long: ishl, lshl 1282 * Logical shift right int and long w/zero extension: iushr, lushr 1283 * Arithmetic shift right int and long w/sign extension: ishr, lshr 1284 */ 1285 1286 #undef OPC_SHIFT_BINARY 1287 #define OPC_SHIFT_BINARY(opcname, opname) \ 1288 CASE(_i##opcname): \ 1289 SET_STACK_INT(VMint##opname(STACK_INT(-2), \ 1290 STACK_INT(-1)), \ 1291 -2); \ 1292 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1293 CASE(_l##opcname): \ 1294 { \ 1295 SET_STACK_LONG(VMlong##opname(STACK_LONG(-2), \ 1296 STACK_INT(-1)), \ 1297 -2); \ 1298 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1299 } 1300 1301 OPC_SHIFT_BINARY(shl, Shl); 1302 OPC_SHIFT_BINARY(shr, Shr); 1303 OPC_SHIFT_BINARY(ushr, Ushr); 1304 1305 /* Increment local variable by constant */ 1306 CASE(_iinc): 1307 { 1308 // locals[pc[1]].j.i += (jbyte)(pc[2]); 1309 SET_LOCALS_INT(LOCALS_INT(pc[1]) + (jbyte)(pc[2]), pc[1]); 1310 UPDATE_PC_AND_CONTINUE(3); 1311 } 1312 1313 /* negate the value on the top of the stack */ 1314 1315 CASE(_ineg): 1316 SET_STACK_INT(VMintNeg(STACK_INT(-1)), -1); 1317 UPDATE_PC_AND_CONTINUE(1); 1318 1319 CASE(_fneg): 1320 SET_STACK_FLOAT(VMfloatNeg(STACK_FLOAT(-1)), -1); 1321 UPDATE_PC_AND_CONTINUE(1); 1322 1323 CASE(_lneg): 1324 { 1325 SET_STACK_LONG(VMlongNeg(STACK_LONG(-1)), -1); 1326 UPDATE_PC_AND_CONTINUE(1); 1327 } 1328 1329 CASE(_dneg): 1330 { 1331 SET_STACK_DOUBLE(VMdoubleNeg(STACK_DOUBLE(-1)), -1); 1332 UPDATE_PC_AND_CONTINUE(1); 1333 } 1334 1335 /* Conversion operations */ 1336 1337 CASE(_i2f): /* convert top of stack int to float */ 1338 SET_STACK_FLOAT(VMint2Float(STACK_INT(-1)), -1); 1339 UPDATE_PC_AND_CONTINUE(1); 1340 1341 CASE(_i2l): /* convert top of stack int to long */ 1342 { 1343 // this is ugly QQQ 1344 jlong r = VMint2Long(STACK_INT(-1)); 1345 MORE_STACK(-1); // Pop 1346 SET_STACK_LONG(r, 1); 1347 1348 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1349 } 1350 1351 CASE(_i2d): /* convert top of stack int to double */ 1352 { 1353 // this is ugly QQQ (why cast to jlong?? ) 1354 jdouble r = (jlong)STACK_INT(-1); 1355 MORE_STACK(-1); // Pop 1356 SET_STACK_DOUBLE(r, 1); 1357 1358 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1359 } 1360 1361 CASE(_l2i): /* convert top of stack long to int */ 1362 { 1363 jint r = VMlong2Int(STACK_LONG(-1)); 1364 MORE_STACK(-2); // Pop 1365 SET_STACK_INT(r, 0); 1366 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1367 } 1368 1369 CASE(_l2f): /* convert top of stack long to float */ 1370 { 1371 jlong r = STACK_LONG(-1); 1372 MORE_STACK(-2); // Pop 1373 SET_STACK_FLOAT(VMlong2Float(r), 0); 1374 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1375 } 1376 1377 CASE(_l2d): /* convert top of stack long to double */ 1378 { 1379 jlong r = STACK_LONG(-1); 1380 MORE_STACK(-2); // Pop 1381 SET_STACK_DOUBLE(VMlong2Double(r), 1); 1382 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1383 } 1384 1385 CASE(_f2i): /* Convert top of stack float to int */ 1386 SET_STACK_INT(SharedRuntime::f2i(STACK_FLOAT(-1)), -1); 1387 UPDATE_PC_AND_CONTINUE(1); 1388 1389 CASE(_f2l): /* convert top of stack float to long */ 1390 { 1391 jlong r = SharedRuntime::f2l(STACK_FLOAT(-1)); 1392 MORE_STACK(-1); // POP 1393 SET_STACK_LONG(r, 1); 1394 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1395 } 1396 1397 CASE(_f2d): /* convert top of stack float to double */ 1398 { 1399 jfloat f; 1400 jdouble r; 1401 f = STACK_FLOAT(-1); 1402 r = (jdouble) f; 1403 MORE_STACK(-1); // POP 1404 SET_STACK_DOUBLE(r, 1); 1405 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1406 } 1407 1408 CASE(_d2i): /* convert top of stack double to int */ 1409 { 1410 jint r1 = SharedRuntime::d2i(STACK_DOUBLE(-1)); 1411 MORE_STACK(-2); 1412 SET_STACK_INT(r1, 0); 1413 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1414 } 1415 1416 CASE(_d2f): /* convert top of stack double to float */ 1417 { 1418 jfloat r1 = VMdouble2Float(STACK_DOUBLE(-1)); 1419 MORE_STACK(-2); 1420 SET_STACK_FLOAT(r1, 0); 1421 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1422 } 1423 1424 CASE(_d2l): /* convert top of stack double to long */ 1425 { 1426 jlong r1 = SharedRuntime::d2l(STACK_DOUBLE(-1)); 1427 MORE_STACK(-2); 1428 SET_STACK_LONG(r1, 1); 1429 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1430 } 1431 1432 CASE(_i2b): 1433 SET_STACK_INT(VMint2Byte(STACK_INT(-1)), -1); 1434 UPDATE_PC_AND_CONTINUE(1); 1435 1436 CASE(_i2c): 1437 SET_STACK_INT(VMint2Char(STACK_INT(-1)), -1); 1438 UPDATE_PC_AND_CONTINUE(1); 1439 1440 CASE(_i2s): 1441 SET_STACK_INT(VMint2Short(STACK_INT(-1)), -1); 1442 UPDATE_PC_AND_CONTINUE(1); 1443 1444 /* comparison operators */ 1445 1446 1447 #define COMPARISON_OP(name, comparison) \ 1448 CASE(_if_icmp##name): { \ 1449 const bool cmp = (STACK_INT(-2) comparison STACK_INT(-1)); \ 1450 int skip = cmp \ 1451 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1452 address branch_pc = pc; \ 1453 /* Profile branch. */ \ 1454 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \ 1455 UPDATE_PC_AND_TOS(skip, -2); \ 1456 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1457 CONTINUE; \ 1458 } \ 1459 CASE(_if##name): { \ 1460 const bool cmp = (STACK_INT(-1) comparison 0); \ 1461 int skip = cmp \ 1462 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1463 address branch_pc = pc; \ 1464 /* Profile branch. */ \ 1465 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \ 1466 UPDATE_PC_AND_TOS(skip, -1); \ 1467 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1468 CONTINUE; \ 1469 } 1470 1471 #define COMPARISON_OP2(name, comparison) \ 1472 COMPARISON_OP(name, comparison) \ 1473 CASE(_if_acmp##name): { \ 1474 const bool cmp = (STACK_OBJECT(-2) comparison STACK_OBJECT(-1)); \ 1475 int skip = cmp \ 1476 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1477 address branch_pc = pc; \ 1478 /* Profile branch. */ \ 1479 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \ 1480 UPDATE_PC_AND_TOS(skip, -2); \ 1481 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1482 CONTINUE; \ 1483 } 1484 1485 #define NULL_COMPARISON_NOT_OP(name) \ 1486 CASE(_if##name): { \ 1487 const bool cmp = (!(STACK_OBJECT(-1) == NULL)); \ 1488 int skip = cmp \ 1489 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1490 address branch_pc = pc; \ 1491 /* Profile branch. */ \ 1492 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \ 1493 UPDATE_PC_AND_TOS(skip, -1); \ 1494 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1495 CONTINUE; \ 1496 } 1497 1498 #define NULL_COMPARISON_OP(name) \ 1499 CASE(_if##name): { \ 1500 const bool cmp = ((STACK_OBJECT(-1) == NULL)); \ 1501 int skip = cmp \ 1502 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1503 address branch_pc = pc; \ 1504 /* Profile branch. */ \ 1505 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \ 1506 UPDATE_PC_AND_TOS(skip, -1); \ 1507 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1508 CONTINUE; \ 1509 } 1510 COMPARISON_OP(lt, <); 1511 COMPARISON_OP(gt, >); 1512 COMPARISON_OP(le, <=); 1513 COMPARISON_OP(ge, >=); 1514 COMPARISON_OP2(eq, ==); /* include ref comparison */ 1515 COMPARISON_OP2(ne, !=); /* include ref comparison */ 1516 NULL_COMPARISON_OP(null); 1517 NULL_COMPARISON_NOT_OP(nonnull); 1518 1519 /* Goto pc at specified offset in switch table. */ 1520 1521 CASE(_tableswitch): { 1522 jint* lpc = (jint*)VMalignWordUp(pc+1); 1523 int32_t key = STACK_INT(-1); 1524 int32_t low = Bytes::get_Java_u4((address)&lpc[1]); 1525 int32_t high = Bytes::get_Java_u4((address)&lpc[2]); 1526 int32_t skip; 1527 key -= low; 1528 if (((uint32_t) key > (uint32_t)(high - low))) { 1529 key = -1; 1530 skip = Bytes::get_Java_u4((address)&lpc[0]); 1531 } else { 1532 skip = Bytes::get_Java_u4((address)&lpc[key + 3]); 1533 } 1534 // Profile switch. 1535 BI_PROFILE_UPDATE_SWITCH(/*switch_index=*/key); 1536 // Does this really need a full backedge check (osr)? 1537 address branch_pc = pc; 1538 UPDATE_PC_AND_TOS(skip, -1); 1539 DO_BACKEDGE_CHECKS(skip, branch_pc); 1540 CONTINUE; 1541 } 1542 1543 /* Goto pc whose table entry matches specified key. */ 1544 1545 CASE(_lookupswitch): { 1546 jint* lpc = (jint*)VMalignWordUp(pc+1); 1547 int32_t key = STACK_INT(-1); 1548 int32_t skip = Bytes::get_Java_u4((address) lpc); /* default amount */ 1549 // Remember index. 1550 int index = -1; 1551 int newindex = 0; 1552 int32_t npairs = Bytes::get_Java_u4((address) &lpc[1]); 1553 while (--npairs >= 0) { 1554 lpc += 2; 1555 if (key == (int32_t)Bytes::get_Java_u4((address)lpc)) { 1556 skip = Bytes::get_Java_u4((address)&lpc[1]); 1557 index = newindex; 1558 break; 1559 } 1560 newindex += 1; 1561 } 1562 // Profile switch. 1563 BI_PROFILE_UPDATE_SWITCH(/*switch_index=*/index); 1564 address branch_pc = pc; 1565 UPDATE_PC_AND_TOS(skip, -1); 1566 DO_BACKEDGE_CHECKS(skip, branch_pc); 1567 CONTINUE; 1568 } 1569 1570 CASE(_fcmpl): 1571 CASE(_fcmpg): 1572 { 1573 SET_STACK_INT(VMfloatCompare(STACK_FLOAT(-2), 1574 STACK_FLOAT(-1), 1575 (opcode == Bytecodes::_fcmpl ? -1 : 1)), 1576 -2); 1577 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1578 } 1579 1580 CASE(_dcmpl): 1581 CASE(_dcmpg): 1582 { 1583 int r = VMdoubleCompare(STACK_DOUBLE(-3), 1584 STACK_DOUBLE(-1), 1585 (opcode == Bytecodes::_dcmpl ? -1 : 1)); 1586 MORE_STACK(-4); // Pop 1587 SET_STACK_INT(r, 0); 1588 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1589 } 1590 1591 CASE(_lcmp): 1592 { 1593 int r = VMlongCompare(STACK_LONG(-3), STACK_LONG(-1)); 1594 MORE_STACK(-4); 1595 SET_STACK_INT(r, 0); 1596 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1597 } 1598 1599 1600 /* Return from a method */ 1601 1602 CASE(_areturn): 1603 CASE(_ireturn): 1604 CASE(_freturn): 1605 { 1606 // Allow a safepoint before returning to frame manager. 1607 SAFEPOINT; 1608 1609 goto handle_return; 1610 } 1611 1612 CASE(_lreturn): 1613 CASE(_dreturn): 1614 { 1615 // Allow a safepoint before returning to frame manager. 1616 SAFEPOINT; 1617 goto handle_return; 1618 } 1619 1620 CASE(_return_register_finalizer): { 1621 1622 oop rcvr = LOCALS_OBJECT(0); 1623 VERIFY_OOP(rcvr); 1624 if (rcvr->klass()->has_finalizer()) { 1625 CALL_VM(InterpreterRuntime::register_finalizer(THREAD, rcvr), handle_exception); 1626 } 1627 goto handle_return; 1628 } 1629 CASE(_return): { 1630 1631 // Allow a safepoint before returning to frame manager. 1632 SAFEPOINT; 1633 goto handle_return; 1634 } 1635 1636 /* Array access byte-codes */ 1637 1638 /* Every array access byte-code starts out like this */ 1639 // arrayOopDesc* arrObj = (arrayOopDesc*)STACK_OBJECT(arrayOff); 1640 #define ARRAY_INTRO(arrayOff) \ 1641 arrayOop arrObj = (arrayOop)STACK_OBJECT(arrayOff); \ 1642 jint index = STACK_INT(arrayOff + 1); \ 1643 char message[jintAsStringSize]; \ 1644 CHECK_NULL(arrObj); \ 1645 if ((uint32_t)index >= (uint32_t)arrObj->length()) { \ 1646 sprintf(message, "%d", index); \ 1647 VM_JAVA_ERROR(vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), \ 1648 message, note_rangeCheck_trap); \ 1649 } 1650 1651 /* 32-bit loads. These handle conversion from < 32-bit types */ 1652 #define ARRAY_LOADTO32(T, T2, format, stackRes, extra) \ 1653 { \ 1654 ARRAY_INTRO(-2); \ 1655 (void)extra; \ 1656 SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), \ 1657 -2); \ 1658 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1659 } 1660 1661 /* 64-bit loads */ 1662 #define ARRAY_LOADTO64(T,T2, stackRes, extra) \ 1663 { \ 1664 ARRAY_INTRO(-2); \ 1665 SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), -1); \ 1666 (void)extra; \ 1667 UPDATE_PC_AND_CONTINUE(1); \ 1668 } 1669 1670 CASE(_iaload): 1671 ARRAY_LOADTO32(T_INT, jint, "%d", STACK_INT, 0); 1672 CASE(_faload): 1673 ARRAY_LOADTO32(T_FLOAT, jfloat, "%f", STACK_FLOAT, 0); 1674 CASE(_aaload): { 1675 ARRAY_INTRO(-2); 1676 SET_STACK_OBJECT(((objArrayOop) arrObj)->obj_at(index), -2); 1677 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1678 } 1679 CASE(_baload): 1680 ARRAY_LOADTO32(T_BYTE, jbyte, "%d", STACK_INT, 0); 1681 CASE(_caload): 1682 ARRAY_LOADTO32(T_CHAR, jchar, "%d", STACK_INT, 0); 1683 CASE(_saload): 1684 ARRAY_LOADTO32(T_SHORT, jshort, "%d", STACK_INT, 0); 1685 CASE(_laload): 1686 ARRAY_LOADTO64(T_LONG, jlong, STACK_LONG, 0); 1687 CASE(_daload): 1688 ARRAY_LOADTO64(T_DOUBLE, jdouble, STACK_DOUBLE, 0); 1689 1690 /* 32-bit stores. These handle conversion to < 32-bit types */ 1691 #define ARRAY_STOREFROM32(T, T2, format, stackSrc, extra) \ 1692 { \ 1693 ARRAY_INTRO(-3); \ 1694 (void)extra; \ 1695 *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \ 1696 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3); \ 1697 } 1698 1699 /* 64-bit stores */ 1700 #define ARRAY_STOREFROM64(T, T2, stackSrc, extra) \ 1701 { \ 1702 ARRAY_INTRO(-4); \ 1703 (void)extra; \ 1704 *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \ 1705 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -4); \ 1706 } 1707 1708 CASE(_iastore): 1709 ARRAY_STOREFROM32(T_INT, jint, "%d", STACK_INT, 0); 1710 CASE(_fastore): 1711 ARRAY_STOREFROM32(T_FLOAT, jfloat, "%f", STACK_FLOAT, 0); 1712 /* 1713 * This one looks different because of the assignability check 1714 */ 1715 CASE(_aastore): { 1716 oop rhsObject = STACK_OBJECT(-1); 1717 VERIFY_OOP(rhsObject); 1718 ARRAY_INTRO( -3); 1719 // arrObj, index are set 1720 if (rhsObject != NULL) { 1721 /* Check assignability of rhsObject into arrObj */ 1722 Klass* rhsKlass = rhsObject->klass(); // EBX (subclass) 1723 Klass* elemKlass = ObjArrayKlass::cast(arrObj->klass())->element_klass(); // superklass EAX 1724 // 1725 // Check for compatibilty. This check must not GC!! 1726 // Seems way more expensive now that we must dispatch 1727 // 1728 if (rhsKlass != elemKlass && !rhsKlass->is_subtype_of(elemKlass)) { // ebx->is... 1729 // Decrement counter if subtype check failed. 1730 BI_PROFILE_SUBTYPECHECK_FAILED(rhsKlass); 1731 VM_JAVA_ERROR(vmSymbols::java_lang_ArrayStoreException(), "", note_arrayCheck_trap); 1732 } 1733 // Profile checkcast with null_seen and receiver. 1734 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/false, rhsKlass); 1735 } else { 1736 // Profile checkcast with null_seen and receiver. 1737 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/true, NULL); 1738 } 1739 ((objArrayOop) arrObj)->obj_at_put(index, rhsObject); 1740 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3); 1741 } 1742 CASE(_bastore): { 1743 ARRAY_INTRO(-3); 1744 int item = STACK_INT(-1); 1745 // if it is a T_BOOLEAN array, mask the stored value to 0/1 1746 if (arrObj->klass() == Universe::boolArrayKlassObj()) { 1747 item &= 1; 1748 } else { 1749 assert(arrObj->klass() == Universe::byteArrayKlassObj(), 1750 "should be byte array otherwise"); 1751 } 1752 ((typeArrayOop)arrObj)->byte_at_put(index, item); 1753 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3); 1754 } 1755 CASE(_castore): 1756 ARRAY_STOREFROM32(T_CHAR, jchar, "%d", STACK_INT, 0); 1757 CASE(_sastore): 1758 ARRAY_STOREFROM32(T_SHORT, jshort, "%d", STACK_INT, 0); 1759 CASE(_lastore): 1760 ARRAY_STOREFROM64(T_LONG, jlong, STACK_LONG, 0); 1761 CASE(_dastore): 1762 ARRAY_STOREFROM64(T_DOUBLE, jdouble, STACK_DOUBLE, 0); 1763 1764 CASE(_arraylength): 1765 { 1766 arrayOop ary = (arrayOop) STACK_OBJECT(-1); 1767 CHECK_NULL(ary); 1768 SET_STACK_INT(ary->length(), -1); 1769 UPDATE_PC_AND_CONTINUE(1); 1770 } 1771 1772 /* monitorenter and monitorexit for locking/unlocking an object */ 1773 1774 CASE(_monitorenter): { 1775 oop lockee = STACK_OBJECT(-1); 1776 // derefing's lockee ought to provoke implicit null check 1777 CHECK_NULL(lockee); 1778 // find a free monitor or one already allocated for this object 1779 // if we find a matching object then we need a new monitor 1780 // since this is recursive enter 1781 BasicObjectLock* limit = istate->monitor_base(); 1782 BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base(); 1783 BasicObjectLock* entry = NULL; 1784 while (most_recent != limit ) { 1785 if (most_recent->obj() == NULL) entry = most_recent; 1786 else if (most_recent->obj() == lockee) break; 1787 most_recent++; 1788 } 1789 if (entry != NULL) { 1790 entry->set_obj(lockee); 1791 int success = false; 1792 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place; 1793 1794 markOop mark = lockee->mark(); 1795 intptr_t hash = (intptr_t) markOopDesc::no_hash; 1796 // implies UseBiasedLocking 1797 if (mark->has_bias_pattern()) { 1798 uintptr_t thread_ident; 1799 uintptr_t anticipated_bias_locking_value; 1800 thread_ident = (uintptr_t)istate->thread(); 1801 anticipated_bias_locking_value = 1802 (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) & 1803 ~((uintptr_t) markOopDesc::age_mask_in_place); 1804 1805 if (anticipated_bias_locking_value == 0) { 1806 // already biased towards this thread, nothing to do 1807 if (PrintBiasedLockingStatistics) { 1808 (* BiasedLocking::biased_lock_entry_count_addr())++; 1809 } 1810 success = true; 1811 } 1812 else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) { 1813 // try revoke bias 1814 markOop header = lockee->klass()->prototype_header(); 1815 if (hash != markOopDesc::no_hash) { 1816 header = header->copy_set_hash(hash); 1817 } 1818 if (Atomic::cmpxchg(header, lockee->mark_addr(), mark) == mark) { 1819 if (PrintBiasedLockingStatistics) 1820 (*BiasedLocking::revoked_lock_entry_count_addr())++; 1821 } 1822 } 1823 else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) { 1824 // try rebias 1825 markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident); 1826 if (hash != markOopDesc::no_hash) { 1827 new_header = new_header->copy_set_hash(hash); 1828 } 1829 if (Atomic::cmpxchg(new_header, lockee->mark_addr(), mark) == mark) { 1830 if (PrintBiasedLockingStatistics) 1831 (* BiasedLocking::rebiased_lock_entry_count_addr())++; 1832 } 1833 else { 1834 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 1835 } 1836 success = true; 1837 } 1838 else { 1839 // try to bias towards thread in case object is anonymously biased 1840 markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place | 1841 (uintptr_t)markOopDesc::age_mask_in_place | 1842 epoch_mask_in_place)); 1843 if (hash != markOopDesc::no_hash) { 1844 header = header->copy_set_hash(hash); 1845 } 1846 markOop new_header = (markOop) ((uintptr_t) header | thread_ident); 1847 // debugging hint 1848 DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);) 1849 if (Atomic::cmpxchg(new_header, lockee->mark_addr(), header) == header) { 1850 if (PrintBiasedLockingStatistics) 1851 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++; 1852 } 1853 else { 1854 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 1855 } 1856 success = true; 1857 } 1858 } 1859 1860 // traditional lightweight locking 1861 if (!success) { 1862 markOop displaced = lockee->mark()->set_unlocked(); 1863 entry->lock()->set_displaced_header(displaced); 1864 bool call_vm = UseHeavyMonitors; 1865 if (call_vm || Atomic::cmpxchg((markOop)entry, lockee->mark_addr(), displaced) != displaced) { 1866 // Is it simple recursive case? 1867 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { 1868 entry->lock()->set_displaced_header(NULL); 1869 } else { 1870 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 1871 } 1872 } 1873 } 1874 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1875 } else { 1876 istate->set_msg(more_monitors); 1877 UPDATE_PC_AND_RETURN(0); // Re-execute 1878 } 1879 } 1880 1881 CASE(_monitorexit): { 1882 oop lockee = STACK_OBJECT(-1); 1883 CHECK_NULL(lockee); 1884 // derefing's lockee ought to provoke implicit null check 1885 // find our monitor slot 1886 BasicObjectLock* limit = istate->monitor_base(); 1887 BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base(); 1888 while (most_recent != limit ) { 1889 if ((most_recent)->obj() == lockee) { 1890 BasicLock* lock = most_recent->lock(); 1891 markOop header = lock->displaced_header(); 1892 most_recent->set_obj(NULL); 1893 if (!lockee->mark()->has_bias_pattern()) { 1894 bool call_vm = UseHeavyMonitors; 1895 // If it isn't recursive we either must swap old header or call the runtime 1896 if (header != NULL || call_vm) { 1897 markOop old_header = markOopDesc::encode(lock); 1898 if (call_vm || lockee->cas_set_mark(header, old_header) != old_header) { 1899 // restore object for the slow case 1900 most_recent->set_obj(lockee); 1901 CALL_VM(InterpreterRuntime::monitorexit(THREAD, most_recent), handle_exception); 1902 } 1903 } 1904 } 1905 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1906 } 1907 most_recent++; 1908 } 1909 // Need to throw illegal monitor state exception 1910 CALL_VM(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD), handle_exception); 1911 ShouldNotReachHere(); 1912 } 1913 1914 /* All of the non-quick opcodes. */ 1915 1916 /* -Set clobbersCpIndex true if the quickened opcode clobbers the 1917 * constant pool index in the instruction. 1918 */ 1919 CASE(_getfield): 1920 CASE(_getstatic): 1921 { 1922 u2 index; 1923 ConstantPoolCacheEntry* cache; 1924 index = Bytes::get_native_u2(pc+1); 1925 1926 // QQQ Need to make this as inlined as possible. Probably need to 1927 // split all the bytecode cases out so c++ compiler has a chance 1928 // for constant prop to fold everything possible away. 1929 1930 cache = cp->entry_at(index); 1931 if (!cache->is_resolved((Bytecodes::Code)opcode)) { 1932 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode), 1933 handle_exception); 1934 cache = cp->entry_at(index); 1935 } 1936 1937 #ifdef VM_JVMTI 1938 if (_jvmti_interp_events) { 1939 int *count_addr; 1940 oop obj; 1941 // Check to see if a field modification watch has been set 1942 // before we take the time to call into the VM. 1943 count_addr = (int *)JvmtiExport::get_field_access_count_addr(); 1944 if ( *count_addr > 0 ) { 1945 if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) { 1946 obj = (oop)NULL; 1947 } else { 1948 obj = (oop) STACK_OBJECT(-1); 1949 VERIFY_OOP(obj); 1950 } 1951 CALL_VM(InterpreterRuntime::post_field_access(THREAD, 1952 obj, 1953 cache), 1954 handle_exception); 1955 } 1956 } 1957 #endif /* VM_JVMTI */ 1958 1959 oop obj; 1960 if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) { 1961 Klass* k = cache->f1_as_klass(); 1962 obj = k->java_mirror(); 1963 MORE_STACK(1); // Assume single slot push 1964 } else { 1965 obj = (oop) STACK_OBJECT(-1); 1966 CHECK_NULL(obj); 1967 } 1968 1969 // 1970 // Now store the result on the stack 1971 // 1972 TosState tos_type = cache->flag_state(); 1973 int field_offset = cache->f2_as_index(); 1974 if (cache->is_volatile()) { 1975 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 1976 OrderAccess::fence(); 1977 } 1978 if (tos_type == atos) { 1979 VERIFY_OOP(obj->obj_field_acquire(field_offset)); 1980 SET_STACK_OBJECT(obj->obj_field_acquire(field_offset), -1); 1981 } else if (tos_type == itos) { 1982 SET_STACK_INT(obj->int_field_acquire(field_offset), -1); 1983 } else if (tos_type == ltos) { 1984 SET_STACK_LONG(obj->long_field_acquire(field_offset), 0); 1985 MORE_STACK(1); 1986 } else if (tos_type == btos || tos_type == ztos) { 1987 SET_STACK_INT(obj->byte_field_acquire(field_offset), -1); 1988 } else if (tos_type == ctos) { 1989 SET_STACK_INT(obj->char_field_acquire(field_offset), -1); 1990 } else if (tos_type == stos) { 1991 SET_STACK_INT(obj->short_field_acquire(field_offset), -1); 1992 } else if (tos_type == ftos) { 1993 SET_STACK_FLOAT(obj->float_field_acquire(field_offset), -1); 1994 } else { 1995 SET_STACK_DOUBLE(obj->double_field_acquire(field_offset), 0); 1996 MORE_STACK(1); 1997 } 1998 } else { 1999 if (tos_type == atos) { 2000 VERIFY_OOP(obj->obj_field(field_offset)); 2001 SET_STACK_OBJECT(obj->obj_field(field_offset), -1); 2002 } else if (tos_type == itos) { 2003 SET_STACK_INT(obj->int_field(field_offset), -1); 2004 } else if (tos_type == ltos) { 2005 SET_STACK_LONG(obj->long_field(field_offset), 0); 2006 MORE_STACK(1); 2007 } else if (tos_type == btos || tos_type == ztos) { 2008 SET_STACK_INT(obj->byte_field(field_offset), -1); 2009 } else if (tos_type == ctos) { 2010 SET_STACK_INT(obj->char_field(field_offset), -1); 2011 } else if (tos_type == stos) { 2012 SET_STACK_INT(obj->short_field(field_offset), -1); 2013 } else if (tos_type == ftos) { 2014 SET_STACK_FLOAT(obj->float_field(field_offset), -1); 2015 } else { 2016 SET_STACK_DOUBLE(obj->double_field(field_offset), 0); 2017 MORE_STACK(1); 2018 } 2019 } 2020 2021 UPDATE_PC_AND_CONTINUE(3); 2022 } 2023 2024 CASE(_putfield): 2025 CASE(_putstatic): 2026 { 2027 u2 index = Bytes::get_native_u2(pc+1); 2028 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2029 if (!cache->is_resolved((Bytecodes::Code)opcode)) { 2030 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode), 2031 handle_exception); 2032 cache = cp->entry_at(index); 2033 } 2034 2035 #ifdef VM_JVMTI 2036 if (_jvmti_interp_events) { 2037 int *count_addr; 2038 oop obj; 2039 // Check to see if a field modification watch has been set 2040 // before we take the time to call into the VM. 2041 count_addr = (int *)JvmtiExport::get_field_modification_count_addr(); 2042 if ( *count_addr > 0 ) { 2043 if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) { 2044 obj = (oop)NULL; 2045 } 2046 else { 2047 if (cache->is_long() || cache->is_double()) { 2048 obj = (oop) STACK_OBJECT(-3); 2049 } else { 2050 obj = (oop) STACK_OBJECT(-2); 2051 } 2052 VERIFY_OOP(obj); 2053 } 2054 2055 CALL_VM(InterpreterRuntime::post_field_modification(THREAD, 2056 obj, 2057 cache, 2058 (jvalue *)STACK_SLOT(-1)), 2059 handle_exception); 2060 } 2061 } 2062 #endif /* VM_JVMTI */ 2063 2064 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases 2065 // out so c++ compiler has a chance for constant prop to fold everything possible away. 2066 2067 oop obj; 2068 int count; 2069 TosState tos_type = cache->flag_state(); 2070 2071 count = -1; 2072 if (tos_type == ltos || tos_type == dtos) { 2073 --count; 2074 } 2075 if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) { 2076 Klass* k = cache->f1_as_klass(); 2077 obj = k->java_mirror(); 2078 } else { 2079 --count; 2080 obj = (oop) STACK_OBJECT(count); 2081 CHECK_NULL(obj); 2082 } 2083 2084 // 2085 // Now store the result 2086 // 2087 int field_offset = cache->f2_as_index(); 2088 if (cache->is_volatile()) { 2089 if (tos_type == itos) { 2090 obj->release_int_field_put(field_offset, STACK_INT(-1)); 2091 } else if (tos_type == atos) { 2092 VERIFY_OOP(STACK_OBJECT(-1)); 2093 obj->release_obj_field_put(field_offset, STACK_OBJECT(-1)); 2094 } else if (tos_type == btos) { 2095 obj->release_byte_field_put(field_offset, STACK_INT(-1)); 2096 } else if (tos_type == ztos) { 2097 int bool_field = STACK_INT(-1); // only store LSB 2098 obj->release_byte_field_put(field_offset, (bool_field & 1)); 2099 } else if (tos_type == ltos) { 2100 obj->release_long_field_put(field_offset, STACK_LONG(-1)); 2101 } else if (tos_type == ctos) { 2102 obj->release_char_field_put(field_offset, STACK_INT(-1)); 2103 } else if (tos_type == stos) { 2104 obj->release_short_field_put(field_offset, STACK_INT(-1)); 2105 } else if (tos_type == ftos) { 2106 obj->release_float_field_put(field_offset, STACK_FLOAT(-1)); 2107 } else { 2108 obj->release_double_field_put(field_offset, STACK_DOUBLE(-1)); 2109 } 2110 OrderAccess::storeload(); 2111 } else { 2112 if (tos_type == itos) { 2113 obj->int_field_put(field_offset, STACK_INT(-1)); 2114 } else if (tos_type == atos) { 2115 VERIFY_OOP(STACK_OBJECT(-1)); 2116 obj->obj_field_put(field_offset, STACK_OBJECT(-1)); 2117 } else if (tos_type == btos) { 2118 obj->byte_field_put(field_offset, STACK_INT(-1)); 2119 } else if (tos_type == ztos) { 2120 int bool_field = STACK_INT(-1); // only store LSB 2121 obj->byte_field_put(field_offset, (bool_field & 1)); 2122 } else if (tos_type == ltos) { 2123 obj->long_field_put(field_offset, STACK_LONG(-1)); 2124 } else if (tos_type == ctos) { 2125 obj->char_field_put(field_offset, STACK_INT(-1)); 2126 } else if (tos_type == stos) { 2127 obj->short_field_put(field_offset, STACK_INT(-1)); 2128 } else if (tos_type == ftos) { 2129 obj->float_field_put(field_offset, STACK_FLOAT(-1)); 2130 } else { 2131 obj->double_field_put(field_offset, STACK_DOUBLE(-1)); 2132 } 2133 } 2134 2135 UPDATE_PC_AND_TOS_AND_CONTINUE(3, count); 2136 } 2137 2138 CASE(_new): { 2139 u2 index = Bytes::get_Java_u2(pc+1); 2140 ConstantPool* constants = istate->method()->constants(); 2141 if (!constants->tag_at(index).is_unresolved_klass()) { 2142 // Make sure klass is initialized and doesn't have a finalizer 2143 Klass* entry = constants->resolved_klass_at(index); 2144 InstanceKlass* ik = InstanceKlass::cast(entry); 2145 if (ik->is_initialized() && ik->can_be_fastpath_allocated() ) { 2146 size_t obj_size = ik->size_helper(); 2147 oop result = NULL; 2148 // If the TLAB isn't pre-zeroed then we'll have to do it 2149 bool need_zero = !ZeroTLAB; 2150 if (UseTLAB) { 2151 result = (oop) THREAD->tlab().allocate(obj_size); 2152 } 2153 // Disable non-TLAB-based fast-path, because profiling requires that all 2154 // allocations go through InterpreterRuntime::_new() if THREAD->tlab().allocate 2155 // returns NULL. 2156 #ifndef CC_INTERP_PROFILE 2157 if (result == NULL) { 2158 need_zero = true; 2159 // Try allocate in shared eden 2160 retry: 2161 HeapWord* compare_to = *Universe::heap()->top_addr(); 2162 HeapWord* new_top = compare_to + obj_size; 2163 if (new_top <= *Universe::heap()->end_addr()) { 2164 if (Atomic::cmpxchg(new_top, Universe::heap()->top_addr(), compare_to) != compare_to) { 2165 goto retry; 2166 } 2167 result = (oop) compare_to; 2168 } 2169 } 2170 #endif 2171 if (result != NULL) { 2172 // Initialize object (if nonzero size and need) and then the header 2173 if (need_zero ) { 2174 HeapWord* to_zero = (HeapWord*) result + sizeof(oopDesc) / oopSize; 2175 obj_size -= sizeof(oopDesc) / oopSize; 2176 if (obj_size > 0 ) { 2177 memset(to_zero, 0, obj_size * HeapWordSize); 2178 } 2179 } 2180 if (UseBiasedLocking) { 2181 result->set_mark(ik->prototype_header()); 2182 } else { 2183 result->set_mark(markOopDesc::prototype()); 2184 } 2185 result->set_klass_gap(0); 2186 result->set_klass(ik); 2187 // Must prevent reordering of stores for object initialization 2188 // with stores that publish the new object. 2189 OrderAccess::storestore(); 2190 SET_STACK_OBJECT(result, 0); 2191 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1); 2192 } 2193 } 2194 } 2195 // Slow case allocation 2196 CALL_VM(InterpreterRuntime::_new(THREAD, METHOD->constants(), index), 2197 handle_exception); 2198 // Must prevent reordering of stores for object initialization 2199 // with stores that publish the new object. 2200 OrderAccess::storestore(); 2201 SET_STACK_OBJECT(THREAD->vm_result(), 0); 2202 THREAD->set_vm_result(NULL); 2203 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1); 2204 } 2205 CASE(_anewarray): { 2206 u2 index = Bytes::get_Java_u2(pc+1); 2207 jint size = STACK_INT(-1); 2208 CALL_VM(InterpreterRuntime::anewarray(THREAD, METHOD->constants(), index, size), 2209 handle_exception); 2210 // Must prevent reordering of stores for object initialization 2211 // with stores that publish the new object. 2212 OrderAccess::storestore(); 2213 SET_STACK_OBJECT(THREAD->vm_result(), -1); 2214 THREAD->set_vm_result(NULL); 2215 UPDATE_PC_AND_CONTINUE(3); 2216 } 2217 CASE(_multianewarray): { 2218 jint dims = *(pc+3); 2219 jint size = STACK_INT(-1); 2220 // stack grows down, dimensions are up! 2221 jint *dimarray = 2222 (jint*)&topOfStack[dims * Interpreter::stackElementWords+ 2223 Interpreter::stackElementWords-1]; 2224 //adjust pointer to start of stack element 2225 CALL_VM(InterpreterRuntime::multianewarray(THREAD, dimarray), 2226 handle_exception); 2227 // Must prevent reordering of stores for object initialization 2228 // with stores that publish the new object. 2229 OrderAccess::storestore(); 2230 SET_STACK_OBJECT(THREAD->vm_result(), -dims); 2231 THREAD->set_vm_result(NULL); 2232 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -(dims-1)); 2233 } 2234 CASE(_checkcast): 2235 if (STACK_OBJECT(-1) != NULL) { 2236 VERIFY_OOP(STACK_OBJECT(-1)); 2237 u2 index = Bytes::get_Java_u2(pc+1); 2238 // Constant pool may have actual klass or unresolved klass. If it is 2239 // unresolved we must resolve it. 2240 if (METHOD->constants()->tag_at(index).is_unresolved_klass()) { 2241 CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception); 2242 } 2243 Klass* klassOf = (Klass*) METHOD->constants()->resolved_klass_at(index); 2244 Klass* objKlass = STACK_OBJECT(-1)->klass(); // ebx 2245 // 2246 // Check for compatibilty. This check must not GC!! 2247 // Seems way more expensive now that we must dispatch. 2248 // 2249 if (objKlass != klassOf && !objKlass->is_subtype_of(klassOf)) { 2250 // Decrement counter at checkcast. 2251 BI_PROFILE_SUBTYPECHECK_FAILED(objKlass); 2252 ResourceMark rm(THREAD); 2253 char* message = SharedRuntime::generate_class_cast_message( 2254 objKlass, klassOf); 2255 VM_JAVA_ERROR(vmSymbols::java_lang_ClassCastException(), message, note_classCheck_trap); 2256 } 2257 // Profile checkcast with null_seen and receiver. 2258 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/false, objKlass); 2259 } else { 2260 // Profile checkcast with null_seen and receiver. 2261 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/true, NULL); 2262 } 2263 UPDATE_PC_AND_CONTINUE(3); 2264 2265 CASE(_instanceof): 2266 if (STACK_OBJECT(-1) == NULL) { 2267 SET_STACK_INT(0, -1); 2268 // Profile instanceof with null_seen and receiver. 2269 BI_PROFILE_UPDATE_INSTANCEOF(/*null_seen=*/true, NULL); 2270 } else { 2271 VERIFY_OOP(STACK_OBJECT(-1)); 2272 u2 index = Bytes::get_Java_u2(pc+1); 2273 // Constant pool may have actual klass or unresolved klass. If it is 2274 // unresolved we must resolve it. 2275 if (METHOD->constants()->tag_at(index).is_unresolved_klass()) { 2276 CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception); 2277 } 2278 Klass* klassOf = (Klass*) METHOD->constants()->resolved_klass_at(index); 2279 Klass* objKlass = STACK_OBJECT(-1)->klass(); 2280 // 2281 // Check for compatibilty. This check must not GC!! 2282 // Seems way more expensive now that we must dispatch. 2283 // 2284 if ( objKlass == klassOf || objKlass->is_subtype_of(klassOf)) { 2285 SET_STACK_INT(1, -1); 2286 } else { 2287 SET_STACK_INT(0, -1); 2288 // Decrement counter at checkcast. 2289 BI_PROFILE_SUBTYPECHECK_FAILED(objKlass); 2290 } 2291 // Profile instanceof with null_seen and receiver. 2292 BI_PROFILE_UPDATE_INSTANCEOF(/*null_seen=*/false, objKlass); 2293 } 2294 UPDATE_PC_AND_CONTINUE(3); 2295 2296 CASE(_ldc_w): 2297 CASE(_ldc): 2298 { 2299 u2 index; 2300 bool wide = false; 2301 int incr = 2; // frequent case 2302 if (opcode == Bytecodes::_ldc) { 2303 index = pc[1]; 2304 } else { 2305 index = Bytes::get_Java_u2(pc+1); 2306 incr = 3; 2307 wide = true; 2308 } 2309 2310 ConstantPool* constants = METHOD->constants(); 2311 switch (constants->tag_at(index).value()) { 2312 case JVM_CONSTANT_Integer: 2313 SET_STACK_INT(constants->int_at(index), 0); 2314 break; 2315 2316 case JVM_CONSTANT_Float: 2317 SET_STACK_FLOAT(constants->float_at(index), 0); 2318 break; 2319 2320 case JVM_CONSTANT_String: 2321 { 2322 oop result = constants->resolved_references()->obj_at(index); 2323 if (result == NULL) { 2324 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), handle_exception); 2325 SET_STACK_OBJECT(THREAD->vm_result(), 0); 2326 THREAD->set_vm_result(NULL); 2327 } else { 2328 VERIFY_OOP(result); 2329 SET_STACK_OBJECT(result, 0); 2330 } 2331 break; 2332 } 2333 2334 case JVM_CONSTANT_Class: 2335 VERIFY_OOP(constants->resolved_klass_at(index)->java_mirror()); 2336 SET_STACK_OBJECT(constants->resolved_klass_at(index)->java_mirror(), 0); 2337 break; 2338 2339 case JVM_CONSTANT_UnresolvedClass: 2340 case JVM_CONSTANT_UnresolvedClassInError: 2341 CALL_VM(InterpreterRuntime::ldc(THREAD, wide), handle_exception); 2342 SET_STACK_OBJECT(THREAD->vm_result(), 0); 2343 THREAD->set_vm_result(NULL); 2344 break; 2345 2346 case JVM_CONSTANT_Dynamic: 2347 { 2348 oop result = constants->resolved_references()->obj_at(index); 2349 if (result == NULL) { 2350 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), handle_exception); 2351 result = THREAD->vm_result(); 2352 } 2353 VERIFY_OOP(result); 2354 2355 jvalue value; 2356 BasicType type = java_lang_boxing_object::get_value(result, &value); 2357 switch (type) { 2358 case T_FLOAT: SET_STACK_FLOAT(value.f, 0); break; 2359 case T_INT: SET_STACK_INT(value.i, 0); break; 2360 case T_SHORT: SET_STACK_INT(value.s, 0); break; 2361 case T_BYTE: SET_STACK_INT(value.b, 0); break; 2362 case T_CHAR: SET_STACK_INT(value.c, 0); break; 2363 case T_BOOLEAN: SET_STACK_INT(value.z, 0); break; 2364 default: ShouldNotReachHere(); 2365 } 2366 2367 break; 2368 } 2369 2370 default: ShouldNotReachHere(); 2371 } 2372 UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1); 2373 } 2374 2375 CASE(_ldc2_w): 2376 { 2377 u2 index = Bytes::get_Java_u2(pc+1); 2378 2379 ConstantPool* constants = METHOD->constants(); 2380 switch (constants->tag_at(index).value()) { 2381 2382 case JVM_CONSTANT_Long: 2383 SET_STACK_LONG(constants->long_at(index), 1); 2384 break; 2385 2386 case JVM_CONSTANT_Double: 2387 SET_STACK_DOUBLE(constants->double_at(index), 1); 2388 break; 2389 2390 case JVM_CONSTANT_Dynamic: 2391 { 2392 oop result = constants->resolved_references()->obj_at(index); 2393 if (result == NULL) { 2394 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), handle_exception); 2395 result = THREAD->vm_result(); 2396 } 2397 VERIFY_OOP(result); 2398 2399 jvalue value; 2400 BasicType type = java_lang_boxing_object::get_value(result, &value); 2401 switch (type) { 2402 case T_DOUBLE: SET_STACK_DOUBLE(value.d, 1); break; 2403 case T_LONG: SET_STACK_LONG(value.j, 1); break; 2404 default: ShouldNotReachHere(); 2405 } 2406 2407 break; 2408 } 2409 2410 default: ShouldNotReachHere(); 2411 } 2412 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 2); 2413 } 2414 2415 CASE(_fast_aldc_w): 2416 CASE(_fast_aldc): { 2417 u2 index; 2418 int incr; 2419 if (opcode == Bytecodes::_fast_aldc) { 2420 index = pc[1]; 2421 incr = 2; 2422 } else { 2423 index = Bytes::get_native_u2(pc+1); 2424 incr = 3; 2425 } 2426 2427 // We are resolved if the resolved_references array contains a non-null object (CallSite, etc.) 2428 // This kind of CP cache entry does not need to match the flags byte, because 2429 // there is a 1-1 relation between bytecode type and CP entry type. 2430 ConstantPool* constants = METHOD->constants(); 2431 oop result = constants->resolved_references()->obj_at(index); 2432 if (result == NULL) { 2433 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), 2434 handle_exception); 2435 result = THREAD->vm_result(); 2436 } 2437 if (result == Universe::the_null_sentinel()) 2438 result = NULL; 2439 2440 VERIFY_OOP(result); 2441 SET_STACK_OBJECT(result, 0); 2442 UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1); 2443 } 2444 2445 CASE(_invokedynamic): { 2446 2447 u4 index = Bytes::get_native_u4(pc+1); 2448 ConstantPoolCacheEntry* cache = cp->constant_pool()->invokedynamic_cp_cache_entry_at(index); 2449 2450 // We are resolved if the resolved_references array contains a non-null object (CallSite, etc.) 2451 // This kind of CP cache entry does not need to match the flags byte, because 2452 // there is a 1-1 relation between bytecode type and CP entry type. 2453 if (! cache->is_resolved((Bytecodes::Code) opcode)) { 2454 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode), 2455 handle_exception); 2456 cache = cp->constant_pool()->invokedynamic_cp_cache_entry_at(index); 2457 } 2458 2459 Method* method = cache->f1_as_method(); 2460 if (VerifyOops) method->verify(); 2461 2462 if (cache->has_appendix()) { 2463 ConstantPool* constants = METHOD->constants(); 2464 SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0); 2465 MORE_STACK(1); 2466 } 2467 2468 istate->set_msg(call_method); 2469 istate->set_callee(method); 2470 istate->set_callee_entry_point(method->from_interpreted_entry()); 2471 istate->set_bcp_advance(5); 2472 2473 // Invokedynamic has got a call counter, just like an invokestatic -> increment! 2474 BI_PROFILE_UPDATE_CALL(); 2475 2476 UPDATE_PC_AND_RETURN(0); // I'll be back... 2477 } 2478 2479 CASE(_invokehandle): { 2480 2481 u2 index = Bytes::get_native_u2(pc+1); 2482 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2483 2484 if (! cache->is_resolved((Bytecodes::Code) opcode)) { 2485 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode), 2486 handle_exception); 2487 cache = cp->entry_at(index); 2488 } 2489 2490 Method* method = cache->f1_as_method(); 2491 if (VerifyOops) method->verify(); 2492 2493 if (cache->has_appendix()) { 2494 ConstantPool* constants = METHOD->constants(); 2495 SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0); 2496 MORE_STACK(1); 2497 } 2498 2499 istate->set_msg(call_method); 2500 istate->set_callee(method); 2501 istate->set_callee_entry_point(method->from_interpreted_entry()); 2502 istate->set_bcp_advance(3); 2503 2504 // Invokehandle has got a call counter, just like a final call -> increment! 2505 BI_PROFILE_UPDATE_FINALCALL(); 2506 2507 UPDATE_PC_AND_RETURN(0); // I'll be back... 2508 } 2509 2510 CASE(_invokeinterface): { 2511 u2 index = Bytes::get_native_u2(pc+1); 2512 2513 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases 2514 // out so c++ compiler has a chance for constant prop to fold everything possible away. 2515 2516 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2517 if (!cache->is_resolved((Bytecodes::Code)opcode)) { 2518 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode), 2519 handle_exception); 2520 cache = cp->entry_at(index); 2521 } 2522 2523 istate->set_msg(call_method); 2524 2525 // Special case of invokeinterface called for virtual method of 2526 // java.lang.Object. See cpCacheOop.cpp for details. 2527 // This code isn't produced by javac, but could be produced by 2528 // another compliant java compiler. 2529 if (cache->is_forced_virtual()) { 2530 Method* callee; 2531 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); 2532 if (cache->is_vfinal()) { 2533 callee = cache->f2_as_vfinal_method(); 2534 // Profile 'special case of invokeinterface' final call. 2535 BI_PROFILE_UPDATE_FINALCALL(); 2536 } else { 2537 // Get receiver. 2538 int parms = cache->parameter_size(); 2539 // Same comments as invokevirtual apply here. 2540 oop rcvr = STACK_OBJECT(-parms); 2541 VERIFY_OOP(rcvr); 2542 Klass* rcvrKlass = rcvr->klass(); 2543 callee = (Method*) rcvrKlass->method_at_vtable(cache->f2_as_index()); 2544 // Profile 'special case of invokeinterface' virtual call. 2545 BI_PROFILE_UPDATE_VIRTUALCALL(rcvrKlass); 2546 } 2547 istate->set_callee(callee); 2548 istate->set_callee_entry_point(callee->from_interpreted_entry()); 2549 #ifdef VM_JVMTI 2550 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) { 2551 istate->set_callee_entry_point(callee->interpreter_entry()); 2552 } 2553 #endif /* VM_JVMTI */ 2554 istate->set_bcp_advance(5); 2555 UPDATE_PC_AND_RETURN(0); // I'll be back... 2556 } 2557 2558 // this could definitely be cleaned up QQQ 2559 Method* callee; 2560 Method *interface_method = cache->f2_as_interface_method(); 2561 InstanceKlass* iclass = interface_method->method_holder(); 2562 2563 // get receiver 2564 int parms = cache->parameter_size(); 2565 oop rcvr = STACK_OBJECT(-parms); 2566 CHECK_NULL(rcvr); 2567 InstanceKlass* int2 = (InstanceKlass*) rcvr->klass(); 2568 2569 // Receiver subtype check against resolved interface klass (REFC). 2570 { 2571 Klass* refc = cache->f1_as_klass(); 2572 itableOffsetEntry* scan; 2573 for (scan = (itableOffsetEntry*) int2->start_of_itable(); 2574 scan->interface_klass() != NULL; 2575 scan++) { 2576 if (scan->interface_klass() == refc) { 2577 break; 2578 } 2579 } 2580 // Check that the entry is non-null. A null entry means 2581 // that the receiver class doesn't implement the 2582 // interface, and wasn't the same as when the caller was 2583 // compiled. 2584 if (scan->interface_klass() == NULL) { 2585 VM_JAVA_ERROR(vmSymbols::java_lang_IncompatibleClassChangeError(), "", note_no_trap); 2586 } 2587 } 2588 2589 itableOffsetEntry* ki = (itableOffsetEntry*) int2->start_of_itable(); 2590 int i; 2591 for ( i = 0 ; i < int2->itable_length() ; i++, ki++ ) { 2592 if (ki->interface_klass() == iclass) break; 2593 } 2594 // If the interface isn't found, this class doesn't implement this 2595 // interface. The link resolver checks this but only for the first 2596 // time this interface is called. 2597 if (i == int2->itable_length()) { 2598 CALL_VM(InterpreterRuntime::throw_IncompatibleClassChangeErrorVerbose(THREAD, rcvr->klass(), iclass), 2599 handle_exception); 2600 } 2601 int mindex = interface_method->itable_index(); 2602 2603 itableMethodEntry* im = ki->first_method_entry(rcvr->klass()); 2604 callee = im[mindex].method(); 2605 if (callee == NULL) { 2606 CALL_VM(InterpreterRuntime::throw_AbstractMethodErrorVerbose(THREAD, rcvr->klass(), interface_method), 2607 handle_exception); 2608 } 2609 2610 // Profile virtual call. 2611 BI_PROFILE_UPDATE_VIRTUALCALL(rcvr->klass()); 2612 2613 istate->set_callee(callee); 2614 istate->set_callee_entry_point(callee->from_interpreted_entry()); 2615 #ifdef VM_JVMTI 2616 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) { 2617 istate->set_callee_entry_point(callee->interpreter_entry()); 2618 } 2619 #endif /* VM_JVMTI */ 2620 istate->set_bcp_advance(5); 2621 UPDATE_PC_AND_RETURN(0); // I'll be back... 2622 } 2623 2624 CASE(_invokevirtual): 2625 CASE(_invokespecial): 2626 CASE(_invokestatic): { 2627 u2 index = Bytes::get_native_u2(pc+1); 2628 2629 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2630 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases 2631 // out so c++ compiler has a chance for constant prop to fold everything possible away. 2632 2633 if (!cache->is_resolved((Bytecodes::Code)opcode)) { 2634 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode), 2635 handle_exception); 2636 cache = cp->entry_at(index); 2637 } 2638 2639 istate->set_msg(call_method); 2640 { 2641 Method* callee; 2642 if ((Bytecodes::Code)opcode == Bytecodes::_invokevirtual) { 2643 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); 2644 if (cache->is_vfinal()) { 2645 callee = cache->f2_as_vfinal_method(); 2646 // Profile final call. 2647 BI_PROFILE_UPDATE_FINALCALL(); 2648 } else { 2649 // get receiver 2650 int parms = cache->parameter_size(); 2651 // this works but needs a resourcemark and seems to create a vtable on every call: 2652 // Method* callee = rcvr->klass()->vtable()->method_at(cache->f2_as_index()); 2653 // 2654 // this fails with an assert 2655 // InstanceKlass* rcvrKlass = InstanceKlass::cast(STACK_OBJECT(-parms)->klass()); 2656 // but this works 2657 oop rcvr = STACK_OBJECT(-parms); 2658 VERIFY_OOP(rcvr); 2659 Klass* rcvrKlass = rcvr->klass(); 2660 /* 2661 Executing this code in java.lang.String: 2662 public String(char value[]) { 2663 this.count = value.length; 2664 this.value = (char[])value.clone(); 2665 } 2666 2667 a find on rcvr->klass() reports: 2668 {type array char}{type array class} 2669 - klass: {other class} 2670 2671 but using InstanceKlass::cast(STACK_OBJECT(-parms)->klass()) causes in assertion failure 2672 because rcvr->klass()->is_instance_klass() == 0 2673 However it seems to have a vtable in the right location. Huh? 2674 Because vtables have the same offset for ArrayKlass and InstanceKlass. 2675 */ 2676 callee = (Method*) rcvrKlass->method_at_vtable(cache->f2_as_index()); 2677 // Profile virtual call. 2678 BI_PROFILE_UPDATE_VIRTUALCALL(rcvrKlass); 2679 } 2680 } else { 2681 if ((Bytecodes::Code)opcode == Bytecodes::_invokespecial) { 2682 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); 2683 } 2684 callee = cache->f1_as_method(); 2685 2686 // Profile call. 2687 BI_PROFILE_UPDATE_CALL(); 2688 } 2689 2690 istate->set_callee(callee); 2691 istate->set_callee_entry_point(callee->from_interpreted_entry()); 2692 #ifdef VM_JVMTI 2693 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) { 2694 istate->set_callee_entry_point(callee->interpreter_entry()); 2695 } 2696 #endif /* VM_JVMTI */ 2697 istate->set_bcp_advance(3); 2698 UPDATE_PC_AND_RETURN(0); // I'll be back... 2699 } 2700 } 2701 2702 /* Allocate memory for a new java object. */ 2703 2704 CASE(_newarray): { 2705 BasicType atype = (BasicType) *(pc+1); 2706 jint size = STACK_INT(-1); 2707 CALL_VM(InterpreterRuntime::newarray(THREAD, atype, size), 2708 handle_exception); 2709 // Must prevent reordering of stores for object initialization 2710 // with stores that publish the new object. 2711 OrderAccess::storestore(); 2712 SET_STACK_OBJECT(THREAD->vm_result(), -1); 2713 THREAD->set_vm_result(NULL); 2714 2715 UPDATE_PC_AND_CONTINUE(2); 2716 } 2717 2718 /* Throw an exception. */ 2719 2720 CASE(_athrow): { 2721 oop except_oop = STACK_OBJECT(-1); 2722 CHECK_NULL(except_oop); 2723 // set pending_exception so we use common code 2724 THREAD->set_pending_exception(except_oop, NULL, 0); 2725 goto handle_exception; 2726 } 2727 2728 /* goto and jsr. They are exactly the same except jsr pushes 2729 * the address of the next instruction first. 2730 */ 2731 2732 CASE(_jsr): { 2733 /* push bytecode index on stack */ 2734 SET_STACK_ADDR(((address)pc - (intptr_t)(istate->method()->code_base()) + 3), 0); 2735 MORE_STACK(1); 2736 /* FALL THROUGH */ 2737 } 2738 2739 CASE(_goto): 2740 { 2741 int16_t offset = (int16_t)Bytes::get_Java_u2(pc + 1); 2742 // Profile jump. 2743 BI_PROFILE_UPDATE_JUMP(); 2744 address branch_pc = pc; 2745 UPDATE_PC(offset); 2746 DO_BACKEDGE_CHECKS(offset, branch_pc); 2747 CONTINUE; 2748 } 2749 2750 CASE(_jsr_w): { 2751 /* push return address on the stack */ 2752 SET_STACK_ADDR(((address)pc - (intptr_t)(istate->method()->code_base()) + 5), 0); 2753 MORE_STACK(1); 2754 /* FALL THROUGH */ 2755 } 2756 2757 CASE(_goto_w): 2758 { 2759 int32_t offset = Bytes::get_Java_u4(pc + 1); 2760 // Profile jump. 2761 BI_PROFILE_UPDATE_JUMP(); 2762 address branch_pc = pc; 2763 UPDATE_PC(offset); 2764 DO_BACKEDGE_CHECKS(offset, branch_pc); 2765 CONTINUE; 2766 } 2767 2768 /* return from a jsr or jsr_w */ 2769 2770 CASE(_ret): { 2771 // Profile ret. 2772 BI_PROFILE_UPDATE_RET(/*bci=*/((int)(intptr_t)(LOCALS_ADDR(pc[1])))); 2773 // Now, update the pc. 2774 pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(pc[1])); 2775 UPDATE_PC_AND_CONTINUE(0); 2776 } 2777 2778 /* debugger breakpoint */ 2779 2780 CASE(_breakpoint): { 2781 Bytecodes::Code original_bytecode; 2782 DECACHE_STATE(); 2783 SET_LAST_JAVA_FRAME(); 2784 original_bytecode = InterpreterRuntime::get_original_bytecode_at(THREAD, 2785 METHOD, pc); 2786 RESET_LAST_JAVA_FRAME(); 2787 CACHE_STATE(); 2788 if (THREAD->has_pending_exception()) goto handle_exception; 2789 CALL_VM(InterpreterRuntime::_breakpoint(THREAD, METHOD, pc), 2790 handle_exception); 2791 2792 opcode = (jubyte)original_bytecode; 2793 goto opcode_switch; 2794 } 2795 2796 DEFAULT: 2797 fatal("Unimplemented opcode %d = %s", opcode, 2798 Bytecodes::name((Bytecodes::Code)opcode)); 2799 goto finish; 2800 2801 } /* switch(opc) */ 2802 2803 2804 #ifdef USELABELS 2805 check_for_exception: 2806 #endif 2807 { 2808 if (!THREAD->has_pending_exception()) { 2809 CONTINUE; 2810 } 2811 /* We will be gcsafe soon, so flush our state. */ 2812 DECACHE_PC(); 2813 goto handle_exception; 2814 } 2815 do_continue: ; 2816 2817 } /* while (1) interpreter loop */ 2818 2819 2820 // An exception exists in the thread state see whether this activation can handle it 2821 handle_exception: { 2822 2823 HandleMarkCleaner __hmc(THREAD); 2824 Handle except_oop(THREAD, THREAD->pending_exception()); 2825 // Prevent any subsequent HandleMarkCleaner in the VM 2826 // from freeing the except_oop handle. 2827 HandleMark __hm(THREAD); 2828 2829 THREAD->clear_pending_exception(); 2830 assert(except_oop() != NULL, "No exception to process"); 2831 intptr_t continuation_bci; 2832 // expression stack is emptied 2833 topOfStack = istate->stack_base() - Interpreter::stackElementWords; 2834 CALL_VM(continuation_bci = (intptr_t)InterpreterRuntime::exception_handler_for_exception(THREAD, except_oop()), 2835 handle_exception); 2836 2837 except_oop = Handle(THREAD, THREAD->vm_result()); 2838 THREAD->set_vm_result(NULL); 2839 if (continuation_bci >= 0) { 2840 // Place exception on top of stack 2841 SET_STACK_OBJECT(except_oop(), 0); 2842 MORE_STACK(1); 2843 pc = METHOD->code_base() + continuation_bci; 2844 if (log_is_enabled(Info, exceptions)) { 2845 ResourceMark rm(THREAD); 2846 stringStream tempst; 2847 tempst.print("interpreter method <%s>\n" 2848 " at bci %d, continuing at %d for thread " INTPTR_FORMAT, 2849 METHOD->print_value_string(), 2850 (int)(istate->bcp() - METHOD->code_base()), 2851 (int)continuation_bci, p2i(THREAD)); 2852 Exceptions::log_exception(except_oop, tempst); 2853 } 2854 // for AbortVMOnException flag 2855 Exceptions::debug_check_abort(except_oop); 2856 2857 // Update profiling data. 2858 BI_PROFILE_ALIGN_TO_CURRENT_BCI(); 2859 goto run; 2860 } 2861 if (log_is_enabled(Info, exceptions)) { 2862 ResourceMark rm; 2863 stringStream tempst; 2864 tempst.print("interpreter method <%s>\n" 2865 " at bci %d, unwinding for thread " INTPTR_FORMAT, 2866 METHOD->print_value_string(), 2867 (int)(istate->bcp() - METHOD->code_base()), 2868 p2i(THREAD)); 2869 Exceptions::log_exception(except_oop, tempst); 2870 } 2871 // for AbortVMOnException flag 2872 Exceptions::debug_check_abort(except_oop); 2873 2874 // No handler in this activation, unwind and try again 2875 THREAD->set_pending_exception(except_oop(), NULL, 0); 2876 goto handle_return; 2877 } // handle_exception: 2878 2879 // Return from an interpreter invocation with the result of the interpretation 2880 // on the top of the Java Stack (or a pending exception) 2881 2882 handle_Pop_Frame: { 2883 2884 // We don't really do anything special here except we must be aware 2885 // that we can get here without ever locking the method (if sync). 2886 // Also we skip the notification of the exit. 2887 2888 istate->set_msg(popping_frame); 2889 // Clear pending so while the pop is in process 2890 // we don't start another one if a call_vm is done. 2891 THREAD->clr_pop_frame_pending(); 2892 // Let interpreter (only) see the we're in the process of popping a frame 2893 THREAD->set_pop_frame_in_process(); 2894 2895 goto handle_return; 2896 2897 } // handle_Pop_Frame 2898 2899 // ForceEarlyReturn ends a method, and returns to the caller with a return value 2900 // given by the invoker of the early return. 2901 handle_Early_Return: { 2902 2903 istate->set_msg(early_return); 2904 2905 // Clear expression stack. 2906 topOfStack = istate->stack_base() - Interpreter::stackElementWords; 2907 2908 JvmtiThreadState *ts = THREAD->jvmti_thread_state(); 2909 2910 // Push the value to be returned. 2911 switch (istate->method()->result_type()) { 2912 case T_BOOLEAN: 2913 case T_SHORT: 2914 case T_BYTE: 2915 case T_CHAR: 2916 case T_INT: 2917 SET_STACK_INT(ts->earlyret_value().i, 0); 2918 MORE_STACK(1); 2919 break; 2920 case T_LONG: 2921 SET_STACK_LONG(ts->earlyret_value().j, 1); 2922 MORE_STACK(2); 2923 break; 2924 case T_FLOAT: 2925 SET_STACK_FLOAT(ts->earlyret_value().f, 0); 2926 MORE_STACK(1); 2927 break; 2928 case T_DOUBLE: 2929 SET_STACK_DOUBLE(ts->earlyret_value().d, 1); 2930 MORE_STACK(2); 2931 break; 2932 case T_ARRAY: 2933 case T_OBJECT: 2934 SET_STACK_OBJECT(ts->earlyret_oop(), 0); 2935 MORE_STACK(1); 2936 break; 2937 } 2938 2939 ts->clr_earlyret_value(); 2940 ts->set_earlyret_oop(NULL); 2941 ts->clr_earlyret_pending(); 2942 2943 // Fall through to handle_return. 2944 2945 } // handle_Early_Return 2946 2947 handle_return: { 2948 // A storestore barrier is required to order initialization of 2949 // final fields with publishing the reference to the object that 2950 // holds the field. Without the barrier the value of final fields 2951 // can be observed to change. 2952 OrderAccess::storestore(); 2953 2954 DECACHE_STATE(); 2955 2956 bool suppress_error = istate->msg() == popping_frame || istate->msg() == early_return; 2957 bool suppress_exit_event = THREAD->has_pending_exception() || istate->msg() == popping_frame; 2958 Handle original_exception(THREAD, THREAD->pending_exception()); 2959 Handle illegal_state_oop(THREAD, NULL); 2960 2961 // We'd like a HandleMark here to prevent any subsequent HandleMarkCleaner 2962 // in any following VM entries from freeing our live handles, but illegal_state_oop 2963 // isn't really allocated yet and so doesn't become live until later and 2964 // in unpredicatable places. Instead we must protect the places where we enter the 2965 // VM. It would be much simpler (and safer) if we could allocate a real handle with 2966 // a NULL oop in it and then overwrite the oop later as needed. This isn't 2967 // unfortunately isn't possible. 2968 2969 THREAD->clear_pending_exception(); 2970 2971 // 2972 // As far as we are concerned we have returned. If we have a pending exception 2973 // that will be returned as this invocation's result. However if we get any 2974 // exception(s) while checking monitor state one of those IllegalMonitorStateExceptions 2975 // will be our final result (i.e. monitor exception trumps a pending exception). 2976 // 2977 2978 // If we never locked the method (or really passed the point where we would have), 2979 // there is no need to unlock it (or look for other monitors), since that 2980 // could not have happened. 2981 2982 if (THREAD->do_not_unlock()) { 2983 2984 // Never locked, reset the flag now because obviously any caller must 2985 // have passed their point of locking for us to have gotten here. 2986 2987 THREAD->clr_do_not_unlock(); 2988 } else { 2989 // At this point we consider that we have returned. We now check that the 2990 // locks were properly block structured. If we find that they were not 2991 // used properly we will return with an illegal monitor exception. 2992 // The exception is checked by the caller not the callee since this 2993 // checking is considered to be part of the invocation and therefore 2994 // in the callers scope (JVM spec 8.13). 2995 // 2996 // Another weird thing to watch for is if the method was locked 2997 // recursively and then not exited properly. This means we must 2998 // examine all the entries in reverse time(and stack) order and 2999 // unlock as we find them. If we find the method monitor before 3000 // we are at the initial entry then we should throw an exception. 3001 // It is not clear the template based interpreter does this 3002 // correctly 3003 3004 BasicObjectLock* base = istate->monitor_base(); 3005 BasicObjectLock* end = (BasicObjectLock*) istate->stack_base(); 3006 bool method_unlock_needed = METHOD->is_synchronized(); 3007 // We know the initial monitor was used for the method don't check that 3008 // slot in the loop 3009 if (method_unlock_needed) base--; 3010 3011 // Check all the monitors to see they are unlocked. Install exception if found to be locked. 3012 while (end < base) { 3013 oop lockee = end->obj(); 3014 if (lockee != NULL) { 3015 BasicLock* lock = end->lock(); 3016 markOop header = lock->displaced_header(); 3017 end->set_obj(NULL); 3018 3019 if (!lockee->mark()->has_bias_pattern()) { 3020 // If it isn't recursive we either must swap old header or call the runtime 3021 if (header != NULL) { 3022 markOop old_header = markOopDesc::encode(lock); 3023 if (lockee->cas_set_mark(header, old_header) != old_header) { 3024 // restore object for the slow case 3025 end->set_obj(lockee); 3026 { 3027 // Prevent any HandleMarkCleaner from freeing our live handles 3028 HandleMark __hm(THREAD); 3029 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, end)); 3030 } 3031 } 3032 } 3033 } 3034 // One error is plenty 3035 if (illegal_state_oop() == NULL && !suppress_error) { 3036 { 3037 // Prevent any HandleMarkCleaner from freeing our live handles 3038 HandleMark __hm(THREAD); 3039 CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD)); 3040 } 3041 assert(THREAD->has_pending_exception(), "Lost our exception!"); 3042 illegal_state_oop = Handle(THREAD, THREAD->pending_exception()); 3043 THREAD->clear_pending_exception(); 3044 } 3045 } 3046 end++; 3047 } 3048 // Unlock the method if needed 3049 if (method_unlock_needed) { 3050 if (base->obj() == NULL) { 3051 // The method is already unlocked this is not good. 3052 if (illegal_state_oop() == NULL && !suppress_error) { 3053 { 3054 // Prevent any HandleMarkCleaner from freeing our live handles 3055 HandleMark __hm(THREAD); 3056 CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD)); 3057 } 3058 assert(THREAD->has_pending_exception(), "Lost our exception!"); 3059 illegal_state_oop = Handle(THREAD, THREAD->pending_exception()); 3060 THREAD->clear_pending_exception(); 3061 } 3062 } else { 3063 // 3064 // The initial monitor is always used for the method 3065 // However if that slot is no longer the oop for the method it was unlocked 3066 // and reused by something that wasn't unlocked! 3067 // 3068 // deopt can come in with rcvr dead because c2 knows 3069 // its value is preserved in the monitor. So we can't use locals[0] at all 3070 // and must use first monitor slot. 3071 // 3072 oop rcvr = base->obj(); 3073 if (rcvr == NULL) { 3074 if (!suppress_error) { 3075 VM_JAVA_ERROR_NO_JUMP(vmSymbols::java_lang_NullPointerException(), "", note_nullCheck_trap); 3076 illegal_state_oop = Handle(THREAD, THREAD->pending_exception()); 3077 THREAD->clear_pending_exception(); 3078 } 3079 } else if (UseHeavyMonitors) { 3080 { 3081 // Prevent any HandleMarkCleaner from freeing our live handles. 3082 HandleMark __hm(THREAD); 3083 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base)); 3084 } 3085 if (THREAD->has_pending_exception()) { 3086 if (!suppress_error) illegal_state_oop = Handle(THREAD, THREAD->pending_exception()); 3087 THREAD->clear_pending_exception(); 3088 } 3089 } else { 3090 BasicLock* lock = base->lock(); 3091 markOop header = lock->displaced_header(); 3092 base->set_obj(NULL); 3093 3094 if (!rcvr->mark()->has_bias_pattern()) { 3095 base->set_obj(NULL); 3096 // If it isn't recursive we either must swap old header or call the runtime 3097 if (header != NULL) { 3098 markOop old_header = markOopDesc::encode(lock); 3099 if (rcvr->cas_set_mark(header, old_header) != old_header) { 3100 // restore object for the slow case 3101 base->set_obj(rcvr); 3102 { 3103 // Prevent any HandleMarkCleaner from freeing our live handles 3104 HandleMark __hm(THREAD); 3105 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base)); 3106 } 3107 if (THREAD->has_pending_exception()) { 3108 if (!suppress_error) illegal_state_oop = Handle(THREAD, THREAD->pending_exception()); 3109 THREAD->clear_pending_exception(); 3110 } 3111 } 3112 } 3113 } 3114 } 3115 } 3116 } 3117 } 3118 // Clear the do_not_unlock flag now. 3119 THREAD->clr_do_not_unlock(); 3120 3121 // 3122 // Notify jvmti/jvmdi 3123 // 3124 // NOTE: we do not notify a method_exit if we have a pending exception, 3125 // including an exception we generate for unlocking checks. In the former 3126 // case, JVMDI has already been notified by our call for the exception handler 3127 // and in both cases as far as JVMDI is concerned we have already returned. 3128 // If we notify it again JVMDI will be all confused about how many frames 3129 // are still on the stack (4340444). 3130 // 3131 // NOTE Further! It turns out the the JVMTI spec in fact expects to see 3132 // method_exit events whenever we leave an activation unless it was done 3133 // for popframe. This is nothing like jvmdi. However we are passing the 3134 // tests at the moment (apparently because they are jvmdi based) so rather 3135 // than change this code and possibly fail tests we will leave it alone 3136 // (with this note) in anticipation of changing the vm and the tests 3137 // simultaneously. 3138 3139 3140 // 3141 suppress_exit_event = suppress_exit_event || illegal_state_oop() != NULL; 3142 3143 3144 3145 #ifdef VM_JVMTI 3146 if (_jvmti_interp_events) { 3147 // Whenever JVMTI puts a thread in interp_only_mode, method 3148 // entry/exit events are sent for that thread to track stack depth. 3149 if ( !suppress_exit_event && THREAD->is_interp_only_mode() ) { 3150 { 3151 // Prevent any HandleMarkCleaner from freeing our live handles 3152 HandleMark __hm(THREAD); 3153 CALL_VM_NOCHECK(InterpreterRuntime::post_method_exit(THREAD)); 3154 } 3155 } 3156 } 3157 #endif /* VM_JVMTI */ 3158 3159 // 3160 // See if we are returning any exception 3161 // A pending exception that was pending prior to a possible popping frame 3162 // overrides the popping frame. 3163 // 3164 assert(!suppress_error || (suppress_error && illegal_state_oop() == NULL), "Error was not suppressed"); 3165 if (illegal_state_oop() != NULL || original_exception() != NULL) { 3166 // Inform the frame manager we have no result. 3167 istate->set_msg(throwing_exception); 3168 if (illegal_state_oop() != NULL) 3169 THREAD->set_pending_exception(illegal_state_oop(), NULL, 0); 3170 else 3171 THREAD->set_pending_exception(original_exception(), NULL, 0); 3172 UPDATE_PC_AND_RETURN(0); 3173 } 3174 3175 if (istate->msg() == popping_frame) { 3176 // Make it simpler on the assembly code and set the message for the frame pop. 3177 // returns 3178 if (istate->prev() == NULL) { 3179 // We must be returning to a deoptimized frame (because popframe only happens between 3180 // two interpreted frames). We need to save the current arguments in C heap so that 3181 // the deoptimized frame when it restarts can copy the arguments to its expression 3182 // stack and re-execute the call. We also have to notify deoptimization that this 3183 // has occurred and to pick the preserved args copy them to the deoptimized frame's 3184 // java expression stack. Yuck. 3185 // 3186 THREAD->popframe_preserve_args(in_ByteSize(METHOD->size_of_parameters() * wordSize), 3187 LOCALS_SLOT(METHOD->size_of_parameters() - 1)); 3188 THREAD->set_popframe_condition_bit(JavaThread::popframe_force_deopt_reexecution_bit); 3189 } 3190 } else { 3191 istate->set_msg(return_from_method); 3192 } 3193 3194 // Normal return 3195 // Advance the pc and return to frame manager 3196 UPDATE_PC_AND_RETURN(1); 3197 } /* handle_return: */ 3198 3199 // This is really a fatal error return 3200 3201 finish: 3202 DECACHE_TOS(); 3203 DECACHE_PC(); 3204 3205 return; 3206 } 3207 3208 /* 3209 * All the code following this point is only produced once and is not present 3210 * in the JVMTI version of the interpreter 3211 */ 3212 3213 #ifndef VM_JVMTI 3214 3215 // This constructor should only be used to contruct the object to signal 3216 // interpreter initialization. All other instances should be created by 3217 // the frame manager. 3218 BytecodeInterpreter::BytecodeInterpreter(messages msg) { 3219 if (msg != initialize) ShouldNotReachHere(); 3220 _msg = msg; 3221 _self_link = this; 3222 _prev_link = NULL; 3223 } 3224 3225 // Inline static functions for Java Stack and Local manipulation 3226 3227 // The implementations are platform dependent. We have to worry about alignment 3228 // issues on some machines which can change on the same platform depending on 3229 // whether it is an LP64 machine also. 3230 address BytecodeInterpreter::stack_slot(intptr_t *tos, int offset) { 3231 return (address) tos[Interpreter::expr_index_at(-offset)]; 3232 } 3233 3234 jint BytecodeInterpreter::stack_int(intptr_t *tos, int offset) { 3235 return *((jint*) &tos[Interpreter::expr_index_at(-offset)]); 3236 } 3237 3238 jfloat BytecodeInterpreter::stack_float(intptr_t *tos, int offset) { 3239 return *((jfloat *) &tos[Interpreter::expr_index_at(-offset)]); 3240 } 3241 3242 oop BytecodeInterpreter::stack_object(intptr_t *tos, int offset) { 3243 return cast_to_oop(tos [Interpreter::expr_index_at(-offset)]); 3244 } 3245 3246 jdouble BytecodeInterpreter::stack_double(intptr_t *tos, int offset) { 3247 return ((VMJavaVal64*) &tos[Interpreter::expr_index_at(-offset)])->d; 3248 } 3249 3250 jlong BytecodeInterpreter::stack_long(intptr_t *tos, int offset) { 3251 return ((VMJavaVal64 *) &tos[Interpreter::expr_index_at(-offset)])->l; 3252 } 3253 3254 // only used for value types 3255 void BytecodeInterpreter::set_stack_slot(intptr_t *tos, address value, 3256 int offset) { 3257 *((address *)&tos[Interpreter::expr_index_at(-offset)]) = value; 3258 } 3259 3260 void BytecodeInterpreter::set_stack_int(intptr_t *tos, int value, 3261 int offset) { 3262 *((jint *)&tos[Interpreter::expr_index_at(-offset)]) = value; 3263 } 3264 3265 void BytecodeInterpreter::set_stack_float(intptr_t *tos, jfloat value, 3266 int offset) { 3267 *((jfloat *)&tos[Interpreter::expr_index_at(-offset)]) = value; 3268 } 3269 3270 void BytecodeInterpreter::set_stack_object(intptr_t *tos, oop value, 3271 int offset) { 3272 *((oop *)&tos[Interpreter::expr_index_at(-offset)]) = value; 3273 } 3274 3275 // needs to be platform dep for the 32 bit platforms. 3276 void BytecodeInterpreter::set_stack_double(intptr_t *tos, jdouble value, 3277 int offset) { 3278 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->d = value; 3279 } 3280 3281 void BytecodeInterpreter::set_stack_double_from_addr(intptr_t *tos, 3282 address addr, int offset) { 3283 (((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->d = 3284 ((VMJavaVal64*)addr)->d); 3285 } 3286 3287 void BytecodeInterpreter::set_stack_long(intptr_t *tos, jlong value, 3288 int offset) { 3289 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset+1)])->l = 0xdeedbeeb; 3290 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->l = value; 3291 } 3292 3293 void BytecodeInterpreter::set_stack_long_from_addr(intptr_t *tos, 3294 address addr, int offset) { 3295 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset+1)])->l = 0xdeedbeeb; 3296 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->l = 3297 ((VMJavaVal64*)addr)->l; 3298 } 3299 3300 // Locals 3301 3302 address BytecodeInterpreter::locals_slot(intptr_t* locals, int offset) { 3303 return (address)locals[Interpreter::local_index_at(-offset)]; 3304 } 3305 jint BytecodeInterpreter::locals_int(intptr_t* locals, int offset) { 3306 return (jint)locals[Interpreter::local_index_at(-offset)]; 3307 } 3308 jfloat BytecodeInterpreter::locals_float(intptr_t* locals, int offset) { 3309 return (jfloat)locals[Interpreter::local_index_at(-offset)]; 3310 } 3311 oop BytecodeInterpreter::locals_object(intptr_t* locals, int offset) { 3312 return cast_to_oop(locals[Interpreter::local_index_at(-offset)]); 3313 } 3314 jdouble BytecodeInterpreter::locals_double(intptr_t* locals, int offset) { 3315 return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d; 3316 } 3317 jlong BytecodeInterpreter::locals_long(intptr_t* locals, int offset) { 3318 return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l; 3319 } 3320 3321 // Returns the address of locals value. 3322 address BytecodeInterpreter::locals_long_at(intptr_t* locals, int offset) { 3323 return ((address)&locals[Interpreter::local_index_at(-(offset+1))]); 3324 } 3325 address BytecodeInterpreter::locals_double_at(intptr_t* locals, int offset) { 3326 return ((address)&locals[Interpreter::local_index_at(-(offset+1))]); 3327 } 3328 3329 // Used for local value or returnAddress 3330 void BytecodeInterpreter::set_locals_slot(intptr_t *locals, 3331 address value, int offset) { 3332 *((address*)&locals[Interpreter::local_index_at(-offset)]) = value; 3333 } 3334 void BytecodeInterpreter::set_locals_int(intptr_t *locals, 3335 jint value, int offset) { 3336 *((jint *)&locals[Interpreter::local_index_at(-offset)]) = value; 3337 } 3338 void BytecodeInterpreter::set_locals_float(intptr_t *locals, 3339 jfloat value, int offset) { 3340 *((jfloat *)&locals[Interpreter::local_index_at(-offset)]) = value; 3341 } 3342 void BytecodeInterpreter::set_locals_object(intptr_t *locals, 3343 oop value, int offset) { 3344 *((oop *)&locals[Interpreter::local_index_at(-offset)]) = value; 3345 } 3346 void BytecodeInterpreter::set_locals_double(intptr_t *locals, 3347 jdouble value, int offset) { 3348 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d = value; 3349 } 3350 void BytecodeInterpreter::set_locals_long(intptr_t *locals, 3351 jlong value, int offset) { 3352 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l = value; 3353 } 3354 void BytecodeInterpreter::set_locals_double_from_addr(intptr_t *locals, 3355 address addr, int offset) { 3356 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d = ((VMJavaVal64*)addr)->d; 3357 } 3358 void BytecodeInterpreter::set_locals_long_from_addr(intptr_t *locals, 3359 address addr, int offset) { 3360 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l = ((VMJavaVal64*)addr)->l; 3361 } 3362 3363 void BytecodeInterpreter::astore(intptr_t* tos, int stack_offset, 3364 intptr_t* locals, int locals_offset) { 3365 intptr_t value = tos[Interpreter::expr_index_at(-stack_offset)]; 3366 locals[Interpreter::local_index_at(-locals_offset)] = value; 3367 } 3368 3369 3370 void BytecodeInterpreter::copy_stack_slot(intptr_t *tos, int from_offset, 3371 int to_offset) { 3372 tos[Interpreter::expr_index_at(-to_offset)] = 3373 (intptr_t)tos[Interpreter::expr_index_at(-from_offset)]; 3374 } 3375 3376 void BytecodeInterpreter::dup(intptr_t *tos) { 3377 copy_stack_slot(tos, -1, 0); 3378 } 3379 void BytecodeInterpreter::dup2(intptr_t *tos) { 3380 copy_stack_slot(tos, -2, 0); 3381 copy_stack_slot(tos, -1, 1); 3382 } 3383 3384 void BytecodeInterpreter::dup_x1(intptr_t *tos) { 3385 /* insert top word two down */ 3386 copy_stack_slot(tos, -1, 0); 3387 copy_stack_slot(tos, -2, -1); 3388 copy_stack_slot(tos, 0, -2); 3389 } 3390 3391 void BytecodeInterpreter::dup_x2(intptr_t *tos) { 3392 /* insert top word three down */ 3393 copy_stack_slot(tos, -1, 0); 3394 copy_stack_slot(tos, -2, -1); 3395 copy_stack_slot(tos, -3, -2); 3396 copy_stack_slot(tos, 0, -3); 3397 } 3398 void BytecodeInterpreter::dup2_x1(intptr_t *tos) { 3399 /* insert top 2 slots three down */ 3400 copy_stack_slot(tos, -1, 1); 3401 copy_stack_slot(tos, -2, 0); 3402 copy_stack_slot(tos, -3, -1); 3403 copy_stack_slot(tos, 1, -2); 3404 copy_stack_slot(tos, 0, -3); 3405 } 3406 void BytecodeInterpreter::dup2_x2(intptr_t *tos) { 3407 /* insert top 2 slots four down */ 3408 copy_stack_slot(tos, -1, 1); 3409 copy_stack_slot(tos, -2, 0); 3410 copy_stack_slot(tos, -3, -1); 3411 copy_stack_slot(tos, -4, -2); 3412 copy_stack_slot(tos, 1, -3); 3413 copy_stack_slot(tos, 0, -4); 3414 } 3415 3416 3417 void BytecodeInterpreter::swap(intptr_t *tos) { 3418 // swap top two elements 3419 intptr_t val = tos[Interpreter::expr_index_at(1)]; 3420 // Copy -2 entry to -1 3421 copy_stack_slot(tos, -2, -1); 3422 // Store saved -1 entry into -2 3423 tos[Interpreter::expr_index_at(2)] = val; 3424 } 3425 // -------------------------------------------------------------------------------- 3426 // Non-product code 3427 #ifndef PRODUCT 3428 3429 const char* BytecodeInterpreter::C_msg(BytecodeInterpreter::messages msg) { 3430 switch (msg) { 3431 case BytecodeInterpreter::no_request: return("no_request"); 3432 case BytecodeInterpreter::initialize: return("initialize"); 3433 // status message to C++ interpreter 3434 case BytecodeInterpreter::method_entry: return("method_entry"); 3435 case BytecodeInterpreter::method_resume: return("method_resume"); 3436 case BytecodeInterpreter::got_monitors: return("got_monitors"); 3437 case BytecodeInterpreter::rethrow_exception: return("rethrow_exception"); 3438 // requests to frame manager from C++ interpreter 3439 case BytecodeInterpreter::call_method: return("call_method"); 3440 case BytecodeInterpreter::return_from_method: return("return_from_method"); 3441 case BytecodeInterpreter::more_monitors: return("more_monitors"); 3442 case BytecodeInterpreter::throwing_exception: return("throwing_exception"); 3443 case BytecodeInterpreter::popping_frame: return("popping_frame"); 3444 case BytecodeInterpreter::do_osr: return("do_osr"); 3445 // deopt 3446 case BytecodeInterpreter::deopt_resume: return("deopt_resume"); 3447 case BytecodeInterpreter::deopt_resume2: return("deopt_resume2"); 3448 default: return("BAD MSG"); 3449 } 3450 } 3451 void 3452 BytecodeInterpreter::print() { 3453 tty->print_cr("thread: " INTPTR_FORMAT, (uintptr_t) this->_thread); 3454 tty->print_cr("bcp: " INTPTR_FORMAT, (uintptr_t) this->_bcp); 3455 tty->print_cr("locals: " INTPTR_FORMAT, (uintptr_t) this->_locals); 3456 tty->print_cr("constants: " INTPTR_FORMAT, (uintptr_t) this->_constants); 3457 { 3458 ResourceMark rm; 3459 char *method_name = _method->name_and_sig_as_C_string(); 3460 tty->print_cr("method: " INTPTR_FORMAT "[ %s ]", (uintptr_t) this->_method, method_name); 3461 } 3462 tty->print_cr("mdx: " INTPTR_FORMAT, (uintptr_t) this->_mdx); 3463 tty->print_cr("stack: " INTPTR_FORMAT, (uintptr_t) this->_stack); 3464 tty->print_cr("msg: %s", C_msg(this->_msg)); 3465 tty->print_cr("result_to_call._callee: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee); 3466 tty->print_cr("result_to_call._callee_entry_point: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee_entry_point); 3467 tty->print_cr("result_to_call._bcp_advance: %d ", this->_result._to_call._bcp_advance); 3468 tty->print_cr("osr._osr_buf: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_buf); 3469 tty->print_cr("osr._osr_entry: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_entry); 3470 tty->print_cr("prev_link: " INTPTR_FORMAT, (uintptr_t) this->_prev_link); 3471 tty->print_cr("native_mirror: " INTPTR_FORMAT, (uintptr_t) p2i(this->_oop_temp)); 3472 tty->print_cr("stack_base: " INTPTR_FORMAT, (uintptr_t) this->_stack_base); 3473 tty->print_cr("stack_limit: " INTPTR_FORMAT, (uintptr_t) this->_stack_limit); 3474 tty->print_cr("monitor_base: " INTPTR_FORMAT, (uintptr_t) this->_monitor_base); 3475 #ifdef SPARC 3476 tty->print_cr("last_Java_pc: " INTPTR_FORMAT, (uintptr_t) this->_last_Java_pc); 3477 tty->print_cr("frame_bottom: " INTPTR_FORMAT, (uintptr_t) this->_frame_bottom); 3478 tty->print_cr("&native_fresult: " INTPTR_FORMAT, (uintptr_t) &this->_native_fresult); 3479 tty->print_cr("native_lresult: " INTPTR_FORMAT, (uintptr_t) this->_native_lresult); 3480 #endif 3481 #if !defined(ZERO) && defined(PPC) 3482 tty->print_cr("last_Java_fp: " INTPTR_FORMAT, (uintptr_t) this->_last_Java_fp); 3483 #endif // !ZERO 3484 tty->print_cr("self_link: " INTPTR_FORMAT, (uintptr_t) this->_self_link); 3485 } 3486 3487 extern "C" { 3488 void PI(uintptr_t arg) { 3489 ((BytecodeInterpreter*)arg)->print(); 3490 } 3491 } 3492 #endif // PRODUCT 3493 3494 #endif // JVMTI 3495 #endif // CC_INTERP