1 /* 2 * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // no precompiled headers 26 #include "classfile/vmSymbols.hpp" 27 #include "gc/shared/collectedHeap.hpp" 28 #include "interpreter/bytecodeHistogram.hpp" 29 #include "interpreter/bytecodeInterpreter.hpp" 30 #include "interpreter/bytecodeInterpreter.inline.hpp" 31 #include "interpreter/bytecodeInterpreterProfiling.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "interpreter/interpreterRuntime.hpp" 34 #include "logging/log.hpp" 35 #include "memory/resourceArea.hpp" 36 #include "oops/methodCounters.hpp" 37 #include "oops/objArrayKlass.hpp" 38 #include "oops/objArrayOop.inline.hpp" 39 #include "oops/oop.inline.hpp" 40 #include "prims/jvmtiExport.hpp" 41 #include "prims/jvmtiThreadState.hpp" 42 #include "runtime/atomic.hpp" 43 #include "runtime/biasedLocking.hpp" 44 #include "runtime/frame.inline.hpp" 45 #include "runtime/handles.inline.hpp" 46 #include "runtime/interfaceSupport.hpp" 47 #include "runtime/orderAccess.inline.hpp" 48 #include "runtime/sharedRuntime.hpp" 49 #include "runtime/threadCritical.hpp" 50 #include "utilities/exceptions.hpp" 51 52 // no precompiled headers 53 #ifdef CC_INTERP 54 55 /* 56 * USELABELS - If using GCC, then use labels for the opcode dispatching 57 * rather -then a switch statement. This improves performance because it 58 * gives us the opportunity to have the instructions that calculate the 59 * next opcode to jump to be intermixed with the rest of the instructions 60 * that implement the opcode (see UPDATE_PC_AND_TOS_AND_CONTINUE macro). 61 */ 62 #undef USELABELS 63 #ifdef __GNUC__ 64 /* 65 ASSERT signifies debugging. It is much easier to step thru bytecodes if we 66 don't use the computed goto approach. 67 */ 68 #ifndef ASSERT 69 #define USELABELS 70 #endif 71 #endif 72 73 #undef CASE 74 #ifdef USELABELS 75 #define CASE(opcode) opc ## opcode 76 #define DEFAULT opc_default 77 #else 78 #define CASE(opcode) case Bytecodes:: opcode 79 #define DEFAULT default 80 #endif 81 82 /* 83 * PREFETCH_OPCCODE - Some compilers do better if you prefetch the next 84 * opcode before going back to the top of the while loop, rather then having 85 * the top of the while loop handle it. This provides a better opportunity 86 * for instruction scheduling. Some compilers just do this prefetch 87 * automatically. Some actually end up with worse performance if you 88 * force the prefetch. Solaris gcc seems to do better, but cc does worse. 89 */ 90 #undef PREFETCH_OPCCODE 91 #define PREFETCH_OPCCODE 92 93 /* 94 Interpreter safepoint: it is expected that the interpreter will have no live 95 handles of its own creation live at an interpreter safepoint. Therefore we 96 run a HandleMarkCleaner and trash all handles allocated in the call chain 97 since the JavaCalls::call_helper invocation that initiated the chain. 98 There really shouldn't be any handles remaining to trash but this is cheap 99 in relation to a safepoint. 100 */ 101 #define SAFEPOINT \ 102 { \ 103 /* zap freed handles rather than GC'ing them */ \ 104 HandleMarkCleaner __hmc(THREAD); \ 105 CALL_VM(SafepointMechanism::block_if_requested(THREAD), handle_exception); \ 106 } 107 108 /* 109 * VM_JAVA_ERROR - Macro for throwing a java exception from 110 * the interpreter loop. Should really be a CALL_VM but there 111 * is no entry point to do the transition to vm so we just 112 * do it by hand here. 113 */ 114 #define VM_JAVA_ERROR_NO_JUMP(name, msg, note_a_trap) \ 115 DECACHE_STATE(); \ 116 SET_LAST_JAVA_FRAME(); \ 117 { \ 118 InterpreterRuntime::note_a_trap(THREAD, istate->method(), BCI()); \ 119 ThreadInVMfromJava trans(THREAD); \ 120 Exceptions::_throw_msg(THREAD, __FILE__, __LINE__, name, msg); \ 121 } \ 122 RESET_LAST_JAVA_FRAME(); \ 123 CACHE_STATE(); 124 125 // Normal throw of a java error. 126 #define VM_JAVA_ERROR(name, msg, note_a_trap) \ 127 VM_JAVA_ERROR_NO_JUMP(name, msg, note_a_trap) \ 128 goto handle_exception; 129 130 #ifdef PRODUCT 131 #define DO_UPDATE_INSTRUCTION_COUNT(opcode) 132 #else 133 #define DO_UPDATE_INSTRUCTION_COUNT(opcode) \ 134 { \ 135 BytecodeCounter::_counter_value++; \ 136 BytecodeHistogram::_counters[(Bytecodes::Code)opcode]++; \ 137 if (StopInterpreterAt && StopInterpreterAt == BytecodeCounter::_counter_value) os::breakpoint(); \ 138 if (TraceBytecodes) { \ 139 CALL_VM((void)InterpreterRuntime::trace_bytecode(THREAD, 0, \ 140 topOfStack[Interpreter::expr_index_at(1)], \ 141 topOfStack[Interpreter::expr_index_at(2)]), \ 142 handle_exception); \ 143 } \ 144 } 145 #endif 146 147 #undef DEBUGGER_SINGLE_STEP_NOTIFY 148 #ifdef VM_JVMTI 149 /* NOTE: (kbr) This macro must be called AFTER the PC has been 150 incremented. JvmtiExport::at_single_stepping_point() may cause a 151 breakpoint opcode to get inserted at the current PC to allow the 152 debugger to coalesce single-step events. 153 154 As a result if we call at_single_stepping_point() we refetch opcode 155 to get the current opcode. This will override any other prefetching 156 that might have occurred. 157 */ 158 #define DEBUGGER_SINGLE_STEP_NOTIFY() \ 159 { \ 160 if (_jvmti_interp_events) { \ 161 if (JvmtiExport::should_post_single_step()) { \ 162 DECACHE_STATE(); \ 163 SET_LAST_JAVA_FRAME(); \ 164 ThreadInVMfromJava trans(THREAD); \ 165 JvmtiExport::at_single_stepping_point(THREAD, \ 166 istate->method(), \ 167 pc); \ 168 RESET_LAST_JAVA_FRAME(); \ 169 CACHE_STATE(); \ 170 if (THREAD->pop_frame_pending() && \ 171 !THREAD->pop_frame_in_process()) { \ 172 goto handle_Pop_Frame; \ 173 } \ 174 if (THREAD->jvmti_thread_state() && \ 175 THREAD->jvmti_thread_state()->is_earlyret_pending()) { \ 176 goto handle_Early_Return; \ 177 } \ 178 opcode = *pc; \ 179 } \ 180 } \ 181 } 182 #else 183 #define DEBUGGER_SINGLE_STEP_NOTIFY() 184 #endif 185 186 /* 187 * CONTINUE - Macro for executing the next opcode. 188 */ 189 #undef CONTINUE 190 #ifdef USELABELS 191 // Have to do this dispatch this way in C++ because otherwise gcc complains about crossing an 192 // initialization (which is is the initialization of the table pointer...) 193 #define DISPATCH(opcode) goto *(void*)dispatch_table[opcode] 194 #define CONTINUE { \ 195 opcode = *pc; \ 196 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 197 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 198 DISPATCH(opcode); \ 199 } 200 #else 201 #ifdef PREFETCH_OPCCODE 202 #define CONTINUE { \ 203 opcode = *pc; \ 204 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 205 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 206 continue; \ 207 } 208 #else 209 #define CONTINUE { \ 210 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 211 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 212 continue; \ 213 } 214 #endif 215 #endif 216 217 218 #define UPDATE_PC(opsize) {pc += opsize; } 219 /* 220 * UPDATE_PC_AND_TOS - Macro for updating the pc and topOfStack. 221 */ 222 #undef UPDATE_PC_AND_TOS 223 #define UPDATE_PC_AND_TOS(opsize, stack) \ 224 {pc += opsize; MORE_STACK(stack); } 225 226 /* 227 * UPDATE_PC_AND_TOS_AND_CONTINUE - Macro for updating the pc and topOfStack, 228 * and executing the next opcode. It's somewhat similar to the combination 229 * of UPDATE_PC_AND_TOS and CONTINUE, but with some minor optimizations. 230 */ 231 #undef UPDATE_PC_AND_TOS_AND_CONTINUE 232 #ifdef USELABELS 233 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \ 234 pc += opsize; opcode = *pc; MORE_STACK(stack); \ 235 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 236 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 237 DISPATCH(opcode); \ 238 } 239 240 #define UPDATE_PC_AND_CONTINUE(opsize) { \ 241 pc += opsize; opcode = *pc; \ 242 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 243 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 244 DISPATCH(opcode); \ 245 } 246 #else 247 #ifdef PREFETCH_OPCCODE 248 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \ 249 pc += opsize; opcode = *pc; MORE_STACK(stack); \ 250 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 251 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 252 goto do_continue; \ 253 } 254 255 #define UPDATE_PC_AND_CONTINUE(opsize) { \ 256 pc += opsize; opcode = *pc; \ 257 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 258 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 259 goto do_continue; \ 260 } 261 #else 262 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \ 263 pc += opsize; MORE_STACK(stack); \ 264 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 265 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 266 goto do_continue; \ 267 } 268 269 #define UPDATE_PC_AND_CONTINUE(opsize) { \ 270 pc += opsize; \ 271 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 272 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 273 goto do_continue; \ 274 } 275 #endif /* PREFETCH_OPCCODE */ 276 #endif /* USELABELS */ 277 278 // About to call a new method, update the save the adjusted pc and return to frame manager 279 #define UPDATE_PC_AND_RETURN(opsize) \ 280 DECACHE_TOS(); \ 281 istate->set_bcp(pc+opsize); \ 282 return; 283 284 285 #define METHOD istate->method() 286 #define GET_METHOD_COUNTERS(res) \ 287 res = METHOD->method_counters(); \ 288 if (res == NULL) { \ 289 CALL_VM(res = InterpreterRuntime::build_method_counters(THREAD, METHOD), handle_exception); \ 290 } 291 292 #define OSR_REQUEST(res, branch_pc) \ 293 CALL_VM(res=InterpreterRuntime::frequency_counter_overflow(THREAD, branch_pc), handle_exception); 294 /* 295 * For those opcodes that need to have a GC point on a backwards branch 296 */ 297 298 // Backedge counting is kind of strange. The asm interpreter will increment 299 // the backedge counter as a separate counter but it does it's comparisons 300 // to the sum (scaled) of invocation counter and backedge count to make 301 // a decision. Seems kind of odd to sum them together like that 302 303 // skip is delta from current bcp/bci for target, branch_pc is pre-branch bcp 304 305 306 #define DO_BACKEDGE_CHECKS(skip, branch_pc) \ 307 if ((skip) <= 0) { \ 308 MethodCounters* mcs; \ 309 GET_METHOD_COUNTERS(mcs); \ 310 if (UseLoopCounter) { \ 311 bool do_OSR = UseOnStackReplacement; \ 312 mcs->backedge_counter()->increment(); \ 313 if (ProfileInterpreter) { \ 314 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); \ 315 /* Check for overflow against MDO count. */ \ 316 do_OSR = do_OSR \ 317 && (mdo_last_branch_taken_count >= (uint)InvocationCounter::InterpreterBackwardBranchLimit)\ 318 /* When ProfileInterpreter is on, the backedge_count comes */ \ 319 /* from the methodDataOop, which value does not get reset on */ \ 320 /* the call to frequency_counter_overflow(). To avoid */ \ 321 /* excessive calls to the overflow routine while the method is */ \ 322 /* being compiled, add a second test to make sure the overflow */ \ 323 /* function is called only once every overflow_frequency. */ \ 324 && (!(mdo_last_branch_taken_count & 1023)); \ 325 } else { \ 326 /* check for overflow of backedge counter */ \ 327 do_OSR = do_OSR \ 328 && mcs->invocation_counter()->reached_InvocationLimit(mcs->backedge_counter()); \ 329 } \ 330 if (do_OSR) { \ 331 nmethod* osr_nmethod; \ 332 OSR_REQUEST(osr_nmethod, branch_pc); \ 333 if (osr_nmethod != NULL && osr_nmethod->is_in_use()) { \ 334 intptr_t* buf; \ 335 /* Call OSR migration with last java frame only, no checks. */ \ 336 CALL_VM_NAKED_LJF(buf=SharedRuntime::OSR_migration_begin(THREAD)); \ 337 istate->set_msg(do_osr); \ 338 istate->set_osr_buf((address)buf); \ 339 istate->set_osr_entry(osr_nmethod->osr_entry()); \ 340 return; \ 341 } \ 342 } \ 343 } /* UseCompiler ... */ \ 344 SAFEPOINT; \ 345 } 346 347 /* 348 * For those opcodes that need to have a GC point on a backwards branch 349 */ 350 351 /* 352 * Macros for caching and flushing the interpreter state. Some local 353 * variables need to be flushed out to the frame before we do certain 354 * things (like pushing frames or becomming gc safe) and some need to 355 * be recached later (like after popping a frame). We could use one 356 * macro to cache or decache everything, but this would be less then 357 * optimal because we don't always need to cache or decache everything 358 * because some things we know are already cached or decached. 359 */ 360 #undef DECACHE_TOS 361 #undef CACHE_TOS 362 #undef CACHE_PREV_TOS 363 #define DECACHE_TOS() istate->set_stack(topOfStack); 364 365 #define CACHE_TOS() topOfStack = (intptr_t *)istate->stack(); 366 367 #undef DECACHE_PC 368 #undef CACHE_PC 369 #define DECACHE_PC() istate->set_bcp(pc); 370 #define CACHE_PC() pc = istate->bcp(); 371 #define CACHE_CP() cp = istate->constants(); 372 #define CACHE_LOCALS() locals = istate->locals(); 373 #undef CACHE_FRAME 374 #define CACHE_FRAME() 375 376 // BCI() returns the current bytecode-index. 377 #undef BCI 378 #define BCI() ((int)(intptr_t)(pc - (intptr_t)istate->method()->code_base())) 379 380 /* 381 * CHECK_NULL - Macro for throwing a NullPointerException if the object 382 * passed is a null ref. 383 * On some architectures/platforms it should be possible to do this implicitly 384 */ 385 #undef CHECK_NULL 386 #define CHECK_NULL(obj_) \ 387 if ((obj_) == NULL) { \ 388 VM_JAVA_ERROR(vmSymbols::java_lang_NullPointerException(), NULL, note_nullCheck_trap); \ 389 } \ 390 VERIFY_OOP(obj_) 391 392 #define VMdoubleConstZero() 0.0 393 #define VMdoubleConstOne() 1.0 394 #define VMlongConstZero() (max_jlong-max_jlong) 395 #define VMlongConstOne() ((max_jlong-max_jlong)+1) 396 397 /* 398 * Alignment 399 */ 400 #define VMalignWordUp(val) (((uintptr_t)(val) + 3) & ~3) 401 402 // Decache the interpreter state that interpreter modifies directly (i.e. GC is indirect mod) 403 #define DECACHE_STATE() DECACHE_PC(); DECACHE_TOS(); 404 405 // Reload interpreter state after calling the VM or a possible GC 406 #define CACHE_STATE() \ 407 CACHE_TOS(); \ 408 CACHE_PC(); \ 409 CACHE_CP(); \ 410 CACHE_LOCALS(); 411 412 // Call the VM with last java frame only. 413 #define CALL_VM_NAKED_LJF(func) \ 414 DECACHE_STATE(); \ 415 SET_LAST_JAVA_FRAME(); \ 416 func; \ 417 RESET_LAST_JAVA_FRAME(); \ 418 CACHE_STATE(); 419 420 // Call the VM. Don't check for pending exceptions. 421 #define CALL_VM_NOCHECK(func) \ 422 CALL_VM_NAKED_LJF(func) \ 423 if (THREAD->pop_frame_pending() && \ 424 !THREAD->pop_frame_in_process()) { \ 425 goto handle_Pop_Frame; \ 426 } \ 427 if (THREAD->jvmti_thread_state() && \ 428 THREAD->jvmti_thread_state()->is_earlyret_pending()) { \ 429 goto handle_Early_Return; \ 430 } 431 432 // Call the VM and check for pending exceptions 433 #define CALL_VM(func, label) { \ 434 CALL_VM_NOCHECK(func); \ 435 if (THREAD->has_pending_exception()) goto label; \ 436 } 437 438 /* 439 * BytecodeInterpreter::run(interpreterState istate) 440 * BytecodeInterpreter::runWithChecks(interpreterState istate) 441 * 442 * The real deal. This is where byte codes actually get interpreted. 443 * Basically it's a big while loop that iterates until we return from 444 * the method passed in. 445 * 446 * The runWithChecks is used if JVMTI is enabled. 447 * 448 */ 449 #if defined(VM_JVMTI) 450 void 451 BytecodeInterpreter::runWithChecks(interpreterState istate) { 452 #else 453 void 454 BytecodeInterpreter::run(interpreterState istate) { 455 #endif 456 457 // In order to simplify some tests based on switches set at runtime 458 // we invoke the interpreter a single time after switches are enabled 459 // and set simpler to to test variables rather than method calls or complex 460 // boolean expressions. 461 462 static int initialized = 0; 463 static int checkit = 0; 464 static intptr_t* c_addr = NULL; 465 static intptr_t c_value; 466 467 if (checkit && *c_addr != c_value) { 468 os::breakpoint(); 469 } 470 #ifdef VM_JVMTI 471 static bool _jvmti_interp_events = 0; 472 #endif 473 474 static int _compiling; // (UseCompiler || CountCompiledCalls) 475 476 #ifdef ASSERT 477 if (istate->_msg != initialize) { 478 assert(labs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + 1), "bad stack limit"); 479 IA32_ONLY(assert(istate->_stack_limit == istate->_thread->last_Java_sp() + 1, "wrong")); 480 } 481 // Verify linkages. 482 interpreterState l = istate; 483 do { 484 assert(l == l->_self_link, "bad link"); 485 l = l->_prev_link; 486 } while (l != NULL); 487 // Screwups with stack management usually cause us to overwrite istate 488 // save a copy so we can verify it. 489 interpreterState orig = istate; 490 #endif 491 492 register intptr_t* topOfStack = (intptr_t *)istate->stack(); /* access with STACK macros */ 493 register address pc = istate->bcp(); 494 register jubyte opcode; 495 register intptr_t* locals = istate->locals(); 496 register ConstantPoolCache* cp = istate->constants(); // method()->constants()->cache() 497 #ifdef LOTS_OF_REGS 498 register JavaThread* THREAD = istate->thread(); 499 #else 500 #undef THREAD 501 #define THREAD istate->thread() 502 #endif 503 504 #ifdef USELABELS 505 const static void* const opclabels_data[256] = { 506 /* 0x00 */ &&opc_nop, &&opc_aconst_null,&&opc_iconst_m1,&&opc_iconst_0, 507 /* 0x04 */ &&opc_iconst_1,&&opc_iconst_2, &&opc_iconst_3, &&opc_iconst_4, 508 /* 0x08 */ &&opc_iconst_5,&&opc_lconst_0, &&opc_lconst_1, &&opc_fconst_0, 509 /* 0x0C */ &&opc_fconst_1,&&opc_fconst_2, &&opc_dconst_0, &&opc_dconst_1, 510 511 /* 0x10 */ &&opc_bipush, &&opc_sipush, &&opc_ldc, &&opc_ldc_w, 512 /* 0x14 */ &&opc_ldc2_w, &&opc_iload, &&opc_lload, &&opc_fload, 513 /* 0x18 */ &&opc_dload, &&opc_aload, &&opc_iload_0,&&opc_iload_1, 514 /* 0x1C */ &&opc_iload_2,&&opc_iload_3,&&opc_lload_0,&&opc_lload_1, 515 516 /* 0x20 */ &&opc_lload_2,&&opc_lload_3,&&opc_fload_0,&&opc_fload_1, 517 /* 0x24 */ &&opc_fload_2,&&opc_fload_3,&&opc_dload_0,&&opc_dload_1, 518 /* 0x28 */ &&opc_dload_2,&&opc_dload_3,&&opc_aload_0,&&opc_aload_1, 519 /* 0x2C */ &&opc_aload_2,&&opc_aload_3,&&opc_iaload, &&opc_laload, 520 521 /* 0x30 */ &&opc_faload, &&opc_daload, &&opc_aaload, &&opc_baload, 522 /* 0x34 */ &&opc_caload, &&opc_saload, &&opc_istore, &&opc_lstore, 523 /* 0x38 */ &&opc_fstore, &&opc_dstore, &&opc_astore, &&opc_istore_0, 524 /* 0x3C */ &&opc_istore_1,&&opc_istore_2,&&opc_istore_3,&&opc_lstore_0, 525 526 /* 0x40 */ &&opc_lstore_1,&&opc_lstore_2,&&opc_lstore_3,&&opc_fstore_0, 527 /* 0x44 */ &&opc_fstore_1,&&opc_fstore_2,&&opc_fstore_3,&&opc_dstore_0, 528 /* 0x48 */ &&opc_dstore_1,&&opc_dstore_2,&&opc_dstore_3,&&opc_astore_0, 529 /* 0x4C */ &&opc_astore_1,&&opc_astore_2,&&opc_astore_3,&&opc_iastore, 530 531 /* 0x50 */ &&opc_lastore,&&opc_fastore,&&opc_dastore,&&opc_aastore, 532 /* 0x54 */ &&opc_bastore,&&opc_castore,&&opc_sastore,&&opc_pop, 533 /* 0x58 */ &&opc_pop2, &&opc_dup, &&opc_dup_x1, &&opc_dup_x2, 534 /* 0x5C */ &&opc_dup2, &&opc_dup2_x1,&&opc_dup2_x2,&&opc_swap, 535 536 /* 0x60 */ &&opc_iadd,&&opc_ladd,&&opc_fadd,&&opc_dadd, 537 /* 0x64 */ &&opc_isub,&&opc_lsub,&&opc_fsub,&&opc_dsub, 538 /* 0x68 */ &&opc_imul,&&opc_lmul,&&opc_fmul,&&opc_dmul, 539 /* 0x6C */ &&opc_idiv,&&opc_ldiv,&&opc_fdiv,&&opc_ddiv, 540 541 /* 0x70 */ &&opc_irem, &&opc_lrem, &&opc_frem,&&opc_drem, 542 /* 0x74 */ &&opc_ineg, &&opc_lneg, &&opc_fneg,&&opc_dneg, 543 /* 0x78 */ &&opc_ishl, &&opc_lshl, &&opc_ishr,&&opc_lshr, 544 /* 0x7C */ &&opc_iushr,&&opc_lushr,&&opc_iand,&&opc_land, 545 546 /* 0x80 */ &&opc_ior, &&opc_lor,&&opc_ixor,&&opc_lxor, 547 /* 0x84 */ &&opc_iinc,&&opc_i2l,&&opc_i2f, &&opc_i2d, 548 /* 0x88 */ &&opc_l2i, &&opc_l2f,&&opc_l2d, &&opc_f2i, 549 /* 0x8C */ &&opc_f2l, &&opc_f2d,&&opc_d2i, &&opc_d2l, 550 551 /* 0x90 */ &&opc_d2f, &&opc_i2b, &&opc_i2c, &&opc_i2s, 552 /* 0x94 */ &&opc_lcmp, &&opc_fcmpl,&&opc_fcmpg,&&opc_dcmpl, 553 /* 0x98 */ &&opc_dcmpg,&&opc_ifeq, &&opc_ifne, &&opc_iflt, 554 /* 0x9C */ &&opc_ifge, &&opc_ifgt, &&opc_ifle, &&opc_if_icmpeq, 555 556 /* 0xA0 */ &&opc_if_icmpne,&&opc_if_icmplt,&&opc_if_icmpge, &&opc_if_icmpgt, 557 /* 0xA4 */ &&opc_if_icmple,&&opc_if_acmpeq,&&opc_if_acmpne, &&opc_goto, 558 /* 0xA8 */ &&opc_jsr, &&opc_ret, &&opc_tableswitch,&&opc_lookupswitch, 559 /* 0xAC */ &&opc_ireturn, &&opc_lreturn, &&opc_freturn, &&opc_dreturn, 560 561 /* 0xB0 */ &&opc_areturn, &&opc_return, &&opc_getstatic, &&opc_putstatic, 562 /* 0xB4 */ &&opc_getfield, &&opc_putfield, &&opc_invokevirtual,&&opc_invokespecial, 563 /* 0xB8 */ &&opc_invokestatic,&&opc_invokeinterface,&&opc_invokedynamic,&&opc_new, 564 /* 0xBC */ &&opc_newarray, &&opc_anewarray, &&opc_arraylength, &&opc_athrow, 565 566 /* 0xC0 */ &&opc_checkcast, &&opc_instanceof, &&opc_monitorenter, &&opc_monitorexit, 567 /* 0xC4 */ &&opc_wide, &&opc_multianewarray, &&opc_ifnull, &&opc_ifnonnull, 568 /* 0xC8 */ &&opc_goto_w, &&opc_jsr_w, &&opc_breakpoint, &&opc_default, 569 /* 0xCC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 570 571 /* 0xD0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 572 /* 0xD4 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 573 /* 0xD8 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 574 /* 0xDC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 575 576 /* 0xE0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 577 /* 0xE4 */ &&opc_default, &&opc_default, &&opc_fast_aldc, &&opc_fast_aldc_w, 578 /* 0xE8 */ &&opc_return_register_finalizer, 579 &&opc_invokehandle, &&opc_default, &&opc_default, 580 /* 0xEC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 581 582 /* 0xF0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 583 /* 0xF4 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 584 /* 0xF8 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 585 /* 0xFC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default 586 }; 587 register uintptr_t *dispatch_table = (uintptr_t*)&opclabels_data[0]; 588 #endif /* USELABELS */ 589 590 #ifdef ASSERT 591 // this will trigger a VERIFY_OOP on entry 592 if (istate->msg() != initialize && ! METHOD->is_static()) { 593 oop rcvr = LOCALS_OBJECT(0); 594 VERIFY_OOP(rcvr); 595 } 596 #endif 597 // #define HACK 598 #ifdef HACK 599 bool interesting = false; 600 #endif // HACK 601 602 /* QQQ this should be a stack method so we don't know actual direction */ 603 guarantee(istate->msg() == initialize || 604 topOfStack >= istate->stack_limit() && 605 topOfStack < istate->stack_base(), 606 "Stack top out of range"); 607 608 #ifdef CC_INTERP_PROFILE 609 // MethodData's last branch taken count. 610 uint mdo_last_branch_taken_count = 0; 611 #else 612 const uint mdo_last_branch_taken_count = 0; 613 #endif 614 615 switch (istate->msg()) { 616 case initialize: { 617 if (initialized++) ShouldNotReachHere(); // Only one initialize call. 618 _compiling = (UseCompiler || CountCompiledCalls); 619 #ifdef VM_JVMTI 620 _jvmti_interp_events = JvmtiExport::can_post_interpreter_events(); 621 #endif 622 return; 623 } 624 break; 625 case method_entry: { 626 THREAD->set_do_not_unlock(); 627 // count invocations 628 assert(initialized, "Interpreter not initialized"); 629 if (_compiling) { 630 MethodCounters* mcs; 631 GET_METHOD_COUNTERS(mcs); 632 #if COMPILER2_OR_JVMCI 633 if (ProfileInterpreter) { 634 METHOD->increment_interpreter_invocation_count(THREAD); 635 } 636 #endif 637 mcs->invocation_counter()->increment(); 638 if (mcs->invocation_counter()->reached_InvocationLimit(mcs->backedge_counter())) { 639 CALL_VM((void)InterpreterRuntime::frequency_counter_overflow(THREAD, NULL), handle_exception); 640 // We no longer retry on a counter overflow. 641 } 642 // Get or create profile data. Check for pending (async) exceptions. 643 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); 644 SAFEPOINT; 645 } 646 647 if ((istate->_stack_base - istate->_stack_limit) != istate->method()->max_stack() + 1) { 648 // initialize 649 os::breakpoint(); 650 } 651 652 #ifdef HACK 653 { 654 ResourceMark rm; 655 char *method_name = istate->method()->name_and_sig_as_C_string(); 656 if (strstr(method_name, "runThese$TestRunner.run()V") != NULL) { 657 tty->print_cr("entering: depth %d bci: %d", 658 (istate->_stack_base - istate->_stack), 659 istate->_bcp - istate->_method->code_base()); 660 interesting = true; 661 } 662 } 663 #endif // HACK 664 665 // Lock method if synchronized. 666 if (METHOD->is_synchronized()) { 667 // oop rcvr = locals[0].j.r; 668 oop rcvr; 669 if (METHOD->is_static()) { 670 rcvr = METHOD->constants()->pool_holder()->java_mirror(); 671 } else { 672 rcvr = LOCALS_OBJECT(0); 673 VERIFY_OOP(rcvr); 674 } 675 // The initial monitor is ours for the taking. 676 // Monitor not filled in frame manager any longer as this caused race condition with biased locking. 677 BasicObjectLock* mon = &istate->monitor_base()[-1]; 678 mon->set_obj(rcvr); 679 bool success = false; 680 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place; 681 markOop mark = rcvr->mark(); 682 intptr_t hash = (intptr_t) markOopDesc::no_hash; 683 // Implies UseBiasedLocking. 684 if (mark->has_bias_pattern()) { 685 uintptr_t thread_ident; 686 uintptr_t anticipated_bias_locking_value; 687 thread_ident = (uintptr_t)istate->thread(); 688 anticipated_bias_locking_value = 689 (((uintptr_t)rcvr->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) & 690 ~((uintptr_t) markOopDesc::age_mask_in_place); 691 692 if (anticipated_bias_locking_value == 0) { 693 // Already biased towards this thread, nothing to do. 694 if (PrintBiasedLockingStatistics) { 695 (* BiasedLocking::biased_lock_entry_count_addr())++; 696 } 697 success = true; 698 } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) { 699 // Try to revoke bias. 700 markOop header = rcvr->klass()->prototype_header(); 701 if (hash != markOopDesc::no_hash) { 702 header = header->copy_set_hash(hash); 703 } 704 if (Atomic::cmpxchg(header, rcvr->mark_addr(), mark) == mark) { 705 if (PrintBiasedLockingStatistics) 706 (*BiasedLocking::revoked_lock_entry_count_addr())++; 707 } 708 } else if ((anticipated_bias_locking_value & epoch_mask_in_place) != 0) { 709 // Try to rebias. 710 markOop new_header = (markOop) ( (intptr_t) rcvr->klass()->prototype_header() | thread_ident); 711 if (hash != markOopDesc::no_hash) { 712 new_header = new_header->copy_set_hash(hash); 713 } 714 if (Atomic::cmpxchg(new_header, rcvr->mark_addr(), mark) == mark) { 715 if (PrintBiasedLockingStatistics) { 716 (* BiasedLocking::rebiased_lock_entry_count_addr())++; 717 } 718 } else { 719 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception); 720 } 721 success = true; 722 } else { 723 // Try to bias towards thread in case object is anonymously biased. 724 markOop header = (markOop) ((uintptr_t) mark & 725 ((uintptr_t)markOopDesc::biased_lock_mask_in_place | 726 (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place)); 727 if (hash != markOopDesc::no_hash) { 728 header = header->copy_set_hash(hash); 729 } 730 markOop new_header = (markOop) ((uintptr_t) header | thread_ident); 731 // Debugging hint. 732 DEBUG_ONLY(mon->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);) 733 if (Atomic::cmpxchg(new_header, rcvr->mark_addr(), header) == header) { 734 if (PrintBiasedLockingStatistics) { 735 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++; 736 } 737 } else { 738 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception); 739 } 740 success = true; 741 } 742 } 743 744 // Traditional lightweight locking. 745 if (!success) { 746 markOop displaced = rcvr->mark()->set_unlocked(); 747 mon->lock()->set_displaced_header(displaced); 748 bool call_vm = UseHeavyMonitors; 749 if (call_vm || Atomic::cmpxchg((markOop)mon, rcvr->mark_addr(), displaced) != displaced) { 750 // Is it simple recursive case? 751 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { 752 mon->lock()->set_displaced_header(NULL); 753 } else { 754 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception); 755 } 756 } 757 } 758 } 759 THREAD->clr_do_not_unlock(); 760 761 // Notify jvmti 762 #ifdef VM_JVMTI 763 if (_jvmti_interp_events) { 764 // Whenever JVMTI puts a thread in interp_only_mode, method 765 // entry/exit events are sent for that thread to track stack depth. 766 if (THREAD->is_interp_only_mode()) { 767 CALL_VM(InterpreterRuntime::post_method_entry(THREAD), 768 handle_exception); 769 } 770 } 771 #endif /* VM_JVMTI */ 772 773 goto run; 774 } 775 776 case popping_frame: { 777 // returned from a java call to pop the frame, restart the call 778 // clear the message so we don't confuse ourselves later 779 assert(THREAD->pop_frame_in_process(), "wrong frame pop state"); 780 istate->set_msg(no_request); 781 if (_compiling) { 782 // Set MDX back to the ProfileData of the invoke bytecode that will be 783 // restarted. 784 SET_MDX(NULL); 785 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); 786 } 787 THREAD->clr_pop_frame_in_process(); 788 goto run; 789 } 790 791 case method_resume: { 792 if ((istate->_stack_base - istate->_stack_limit) != istate->method()->max_stack() + 1) { 793 // resume 794 os::breakpoint(); 795 } 796 #ifdef HACK 797 { 798 ResourceMark rm; 799 char *method_name = istate->method()->name_and_sig_as_C_string(); 800 if (strstr(method_name, "runThese$TestRunner.run()V") != NULL) { 801 tty->print_cr("resume: depth %d bci: %d", 802 (istate->_stack_base - istate->_stack) , 803 istate->_bcp - istate->_method->code_base()); 804 interesting = true; 805 } 806 } 807 #endif // HACK 808 // returned from a java call, continue executing. 809 if (THREAD->pop_frame_pending() && !THREAD->pop_frame_in_process()) { 810 goto handle_Pop_Frame; 811 } 812 if (THREAD->jvmti_thread_state() && 813 THREAD->jvmti_thread_state()->is_earlyret_pending()) { 814 goto handle_Early_Return; 815 } 816 817 if (THREAD->has_pending_exception()) goto handle_exception; 818 // Update the pc by the saved amount of the invoke bytecode size 819 UPDATE_PC(istate->bcp_advance()); 820 821 if (_compiling) { 822 // Get or create profile data. Check for pending (async) exceptions. 823 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); 824 } 825 goto run; 826 } 827 828 case deopt_resume2: { 829 // Returned from an opcode that will reexecute. Deopt was 830 // a result of a PopFrame request. 831 // 832 833 if (_compiling) { 834 // Get or create profile data. Check for pending (async) exceptions. 835 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); 836 } 837 goto run; 838 } 839 840 case deopt_resume: { 841 // Returned from an opcode that has completed. The stack has 842 // the result all we need to do is skip across the bytecode 843 // and continue (assuming there is no exception pending) 844 // 845 // compute continuation length 846 // 847 // Note: it is possible to deopt at a return_register_finalizer opcode 848 // because this requires entering the vm to do the registering. While the 849 // opcode is complete we can't advance because there are no more opcodes 850 // much like trying to deopt at a poll return. In that has we simply 851 // get out of here 852 // 853 if ( Bytecodes::code_at(METHOD, pc) == Bytecodes::_return_register_finalizer) { 854 // this will do the right thing even if an exception is pending. 855 goto handle_return; 856 } 857 UPDATE_PC(Bytecodes::length_at(METHOD, pc)); 858 if (THREAD->has_pending_exception()) goto handle_exception; 859 860 if (_compiling) { 861 // Get or create profile data. Check for pending (async) exceptions. 862 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); 863 } 864 goto run; 865 } 866 case got_monitors: { 867 // continue locking now that we have a monitor to use 868 // we expect to find newly allocated monitor at the "top" of the monitor stack. 869 oop lockee = STACK_OBJECT(-1); 870 VERIFY_OOP(lockee); 871 // derefing's lockee ought to provoke implicit null check 872 // find a free monitor 873 BasicObjectLock* entry = (BasicObjectLock*) istate->stack_base(); 874 assert(entry->obj() == NULL, "Frame manager didn't allocate the monitor"); 875 entry->set_obj(lockee); 876 bool success = false; 877 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place; 878 879 markOop mark = lockee->mark(); 880 intptr_t hash = (intptr_t) markOopDesc::no_hash; 881 // implies UseBiasedLocking 882 if (mark->has_bias_pattern()) { 883 uintptr_t thread_ident; 884 uintptr_t anticipated_bias_locking_value; 885 thread_ident = (uintptr_t)istate->thread(); 886 anticipated_bias_locking_value = 887 (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) & 888 ~((uintptr_t) markOopDesc::age_mask_in_place); 889 890 if (anticipated_bias_locking_value == 0) { 891 // already biased towards this thread, nothing to do 892 if (PrintBiasedLockingStatistics) { 893 (* BiasedLocking::biased_lock_entry_count_addr())++; 894 } 895 success = true; 896 } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) { 897 // try revoke bias 898 markOop header = lockee->klass()->prototype_header(); 899 if (hash != markOopDesc::no_hash) { 900 header = header->copy_set_hash(hash); 901 } 902 if (Atomic::cmpxchg(header, lockee->mark_addr(), mark) == mark) { 903 if (PrintBiasedLockingStatistics) { 904 (*BiasedLocking::revoked_lock_entry_count_addr())++; 905 } 906 } 907 } else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) { 908 // try rebias 909 markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident); 910 if (hash != markOopDesc::no_hash) { 911 new_header = new_header->copy_set_hash(hash); 912 } 913 if (Atomic::cmpxchg(new_header, lockee->mark_addr(), mark) == mark) { 914 if (PrintBiasedLockingStatistics) { 915 (* BiasedLocking::rebiased_lock_entry_count_addr())++; 916 } 917 } else { 918 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 919 } 920 success = true; 921 } else { 922 // try to bias towards thread in case object is anonymously biased 923 markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place | 924 (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place)); 925 if (hash != markOopDesc::no_hash) { 926 header = header->copy_set_hash(hash); 927 } 928 markOop new_header = (markOop) ((uintptr_t) header | thread_ident); 929 // debugging hint 930 DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);) 931 if (Atomic::cmpxchg(new_header, lockee->mark_addr(), header) == header) { 932 if (PrintBiasedLockingStatistics) { 933 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++; 934 } 935 } else { 936 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 937 } 938 success = true; 939 } 940 } 941 942 // traditional lightweight locking 943 if (!success) { 944 markOop displaced = lockee->mark()->set_unlocked(); 945 entry->lock()->set_displaced_header(displaced); 946 bool call_vm = UseHeavyMonitors; 947 if (call_vm || Atomic::cmpxchg((markOop)entry, lockee->mark_addr(), displaced) != displaced) { 948 // Is it simple recursive case? 949 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { 950 entry->lock()->set_displaced_header(NULL); 951 } else { 952 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 953 } 954 } 955 } 956 UPDATE_PC_AND_TOS(1, -1); 957 goto run; 958 } 959 default: { 960 fatal("Unexpected message from frame manager"); 961 } 962 } 963 964 run: 965 966 DO_UPDATE_INSTRUCTION_COUNT(*pc) 967 DEBUGGER_SINGLE_STEP_NOTIFY(); 968 #ifdef PREFETCH_OPCCODE 969 opcode = *pc; /* prefetch first opcode */ 970 #endif 971 972 #ifndef USELABELS 973 while (1) 974 #endif 975 { 976 #ifndef PREFETCH_OPCCODE 977 opcode = *pc; 978 #endif 979 // Seems like this happens twice per opcode. At worst this is only 980 // need at entry to the loop. 981 // DEBUGGER_SINGLE_STEP_NOTIFY(); 982 /* Using this labels avoids double breakpoints when quickening and 983 * when returing from transition frames. 984 */ 985 opcode_switch: 986 assert(istate == orig, "Corrupted istate"); 987 /* QQQ Hmm this has knowledge of direction, ought to be a stack method */ 988 assert(topOfStack >= istate->stack_limit(), "Stack overrun"); 989 assert(topOfStack < istate->stack_base(), "Stack underrun"); 990 991 #ifdef USELABELS 992 DISPATCH(opcode); 993 #else 994 switch (opcode) 995 #endif 996 { 997 CASE(_nop): 998 UPDATE_PC_AND_CONTINUE(1); 999 1000 /* Push miscellaneous constants onto the stack. */ 1001 1002 CASE(_aconst_null): 1003 SET_STACK_OBJECT(NULL, 0); 1004 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1005 1006 #undef OPC_CONST_n 1007 #define OPC_CONST_n(opcode, const_type, value) \ 1008 CASE(opcode): \ 1009 SET_STACK_ ## const_type(value, 0); \ 1010 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1011 1012 OPC_CONST_n(_iconst_m1, INT, -1); 1013 OPC_CONST_n(_iconst_0, INT, 0); 1014 OPC_CONST_n(_iconst_1, INT, 1); 1015 OPC_CONST_n(_iconst_2, INT, 2); 1016 OPC_CONST_n(_iconst_3, INT, 3); 1017 OPC_CONST_n(_iconst_4, INT, 4); 1018 OPC_CONST_n(_iconst_5, INT, 5); 1019 OPC_CONST_n(_fconst_0, FLOAT, 0.0); 1020 OPC_CONST_n(_fconst_1, FLOAT, 1.0); 1021 OPC_CONST_n(_fconst_2, FLOAT, 2.0); 1022 1023 #undef OPC_CONST2_n 1024 #define OPC_CONST2_n(opcname, value, key, kind) \ 1025 CASE(_##opcname): \ 1026 { \ 1027 SET_STACK_ ## kind(VM##key##Const##value(), 1); \ 1028 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); \ 1029 } 1030 OPC_CONST2_n(dconst_0, Zero, double, DOUBLE); 1031 OPC_CONST2_n(dconst_1, One, double, DOUBLE); 1032 OPC_CONST2_n(lconst_0, Zero, long, LONG); 1033 OPC_CONST2_n(lconst_1, One, long, LONG); 1034 1035 /* Load constant from constant pool: */ 1036 1037 /* Push a 1-byte signed integer value onto the stack. */ 1038 CASE(_bipush): 1039 SET_STACK_INT((jbyte)(pc[1]), 0); 1040 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1); 1041 1042 /* Push a 2-byte signed integer constant onto the stack. */ 1043 CASE(_sipush): 1044 SET_STACK_INT((int16_t)Bytes::get_Java_u2(pc + 1), 0); 1045 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1); 1046 1047 /* load from local variable */ 1048 1049 CASE(_aload): 1050 VERIFY_OOP(LOCALS_OBJECT(pc[1])); 1051 SET_STACK_OBJECT(LOCALS_OBJECT(pc[1]), 0); 1052 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1); 1053 1054 CASE(_iload): 1055 CASE(_fload): 1056 SET_STACK_SLOT(LOCALS_SLOT(pc[1]), 0); 1057 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1); 1058 1059 CASE(_lload): 1060 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(pc[1]), 1); 1061 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 2); 1062 1063 CASE(_dload): 1064 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_DOUBLE_AT(pc[1]), 1); 1065 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 2); 1066 1067 #undef OPC_LOAD_n 1068 #define OPC_LOAD_n(num) \ 1069 CASE(_aload_##num): \ 1070 VERIFY_OOP(LOCALS_OBJECT(num)); \ 1071 SET_STACK_OBJECT(LOCALS_OBJECT(num), 0); \ 1072 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); \ 1073 \ 1074 CASE(_iload_##num): \ 1075 CASE(_fload_##num): \ 1076 SET_STACK_SLOT(LOCALS_SLOT(num), 0); \ 1077 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); \ 1078 \ 1079 CASE(_lload_##num): \ 1080 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(num), 1); \ 1081 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); \ 1082 CASE(_dload_##num): \ 1083 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_DOUBLE_AT(num), 1); \ 1084 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1085 1086 OPC_LOAD_n(0); 1087 OPC_LOAD_n(1); 1088 OPC_LOAD_n(2); 1089 OPC_LOAD_n(3); 1090 1091 /* store to a local variable */ 1092 1093 CASE(_astore): 1094 astore(topOfStack, -1, locals, pc[1]); 1095 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -1); 1096 1097 CASE(_istore): 1098 CASE(_fstore): 1099 SET_LOCALS_SLOT(STACK_SLOT(-1), pc[1]); 1100 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -1); 1101 1102 CASE(_lstore): 1103 SET_LOCALS_LONG(STACK_LONG(-1), pc[1]); 1104 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -2); 1105 1106 CASE(_dstore): 1107 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), pc[1]); 1108 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -2); 1109 1110 CASE(_wide): { 1111 uint16_t reg = Bytes::get_Java_u2(pc + 2); 1112 1113 opcode = pc[1]; 1114 1115 // Wide and it's sub-bytecode are counted as separate instructions. If we 1116 // don't account for this here, the bytecode trace skips the next bytecode. 1117 DO_UPDATE_INSTRUCTION_COUNT(opcode); 1118 1119 switch(opcode) { 1120 case Bytecodes::_aload: 1121 VERIFY_OOP(LOCALS_OBJECT(reg)); 1122 SET_STACK_OBJECT(LOCALS_OBJECT(reg), 0); 1123 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1); 1124 1125 case Bytecodes::_iload: 1126 case Bytecodes::_fload: 1127 SET_STACK_SLOT(LOCALS_SLOT(reg), 0); 1128 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1); 1129 1130 case Bytecodes::_lload: 1131 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(reg), 1); 1132 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2); 1133 1134 case Bytecodes::_dload: 1135 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_LONG_AT(reg), 1); 1136 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2); 1137 1138 case Bytecodes::_astore: 1139 astore(topOfStack, -1, locals, reg); 1140 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -1); 1141 1142 case Bytecodes::_istore: 1143 case Bytecodes::_fstore: 1144 SET_LOCALS_SLOT(STACK_SLOT(-1), reg); 1145 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -1); 1146 1147 case Bytecodes::_lstore: 1148 SET_LOCALS_LONG(STACK_LONG(-1), reg); 1149 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2); 1150 1151 case Bytecodes::_dstore: 1152 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), reg); 1153 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2); 1154 1155 case Bytecodes::_iinc: { 1156 int16_t offset = (int16_t)Bytes::get_Java_u2(pc+4); 1157 // Be nice to see what this generates.... QQQ 1158 SET_LOCALS_INT(LOCALS_INT(reg) + offset, reg); 1159 UPDATE_PC_AND_CONTINUE(6); 1160 } 1161 case Bytecodes::_ret: 1162 // Profile ret. 1163 BI_PROFILE_UPDATE_RET(/*bci=*/((int)(intptr_t)(LOCALS_ADDR(reg)))); 1164 // Now, update the pc. 1165 pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(reg)); 1166 UPDATE_PC_AND_CONTINUE(0); 1167 default: 1168 VM_JAVA_ERROR(vmSymbols::java_lang_InternalError(), "undefined opcode", note_no_trap); 1169 } 1170 } 1171 1172 1173 #undef OPC_STORE_n 1174 #define OPC_STORE_n(num) \ 1175 CASE(_astore_##num): \ 1176 astore(topOfStack, -1, locals, num); \ 1177 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1178 CASE(_istore_##num): \ 1179 CASE(_fstore_##num): \ 1180 SET_LOCALS_SLOT(STACK_SLOT(-1), num); \ 1181 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1182 1183 OPC_STORE_n(0); 1184 OPC_STORE_n(1); 1185 OPC_STORE_n(2); 1186 OPC_STORE_n(3); 1187 1188 #undef OPC_DSTORE_n 1189 #define OPC_DSTORE_n(num) \ 1190 CASE(_dstore_##num): \ 1191 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), num); \ 1192 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \ 1193 CASE(_lstore_##num): \ 1194 SET_LOCALS_LONG(STACK_LONG(-1), num); \ 1195 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); 1196 1197 OPC_DSTORE_n(0); 1198 OPC_DSTORE_n(1); 1199 OPC_DSTORE_n(2); 1200 OPC_DSTORE_n(3); 1201 1202 /* stack pop, dup, and insert opcodes */ 1203 1204 1205 CASE(_pop): /* Discard the top item on the stack */ 1206 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1207 1208 1209 CASE(_pop2): /* Discard the top 2 items on the stack */ 1210 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); 1211 1212 1213 CASE(_dup): /* Duplicate the top item on the stack */ 1214 dup(topOfStack); 1215 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1216 1217 CASE(_dup2): /* Duplicate the top 2 items on the stack */ 1218 dup2(topOfStack); 1219 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1220 1221 CASE(_dup_x1): /* insert top word two down */ 1222 dup_x1(topOfStack); 1223 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1224 1225 CASE(_dup_x2): /* insert top word three down */ 1226 dup_x2(topOfStack); 1227 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1228 1229 CASE(_dup2_x1): /* insert top 2 slots three down */ 1230 dup2_x1(topOfStack); 1231 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1232 1233 CASE(_dup2_x2): /* insert top 2 slots four down */ 1234 dup2_x2(topOfStack); 1235 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1236 1237 CASE(_swap): { /* swap top two elements on the stack */ 1238 swap(topOfStack); 1239 UPDATE_PC_AND_CONTINUE(1); 1240 } 1241 1242 /* Perform various binary integer operations */ 1243 1244 #undef OPC_INT_BINARY 1245 #define OPC_INT_BINARY(opcname, opname, test) \ 1246 CASE(_i##opcname): \ 1247 if (test && (STACK_INT(-1) == 0)) { \ 1248 VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \ 1249 "/ by zero", note_div0Check_trap); \ 1250 } \ 1251 SET_STACK_INT(VMint##opname(STACK_INT(-2), \ 1252 STACK_INT(-1)), \ 1253 -2); \ 1254 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1255 CASE(_l##opcname): \ 1256 { \ 1257 if (test) { \ 1258 jlong l1 = STACK_LONG(-1); \ 1259 if (VMlongEqz(l1)) { \ 1260 VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \ 1261 "/ by long zero", note_div0Check_trap); \ 1262 } \ 1263 } \ 1264 /* First long at (-1,-2) next long at (-3,-4) */ \ 1265 SET_STACK_LONG(VMlong##opname(STACK_LONG(-3), \ 1266 STACK_LONG(-1)), \ 1267 -3); \ 1268 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \ 1269 } 1270 1271 OPC_INT_BINARY(add, Add, 0); 1272 OPC_INT_BINARY(sub, Sub, 0); 1273 OPC_INT_BINARY(mul, Mul, 0); 1274 OPC_INT_BINARY(and, And, 0); 1275 OPC_INT_BINARY(or, Or, 0); 1276 OPC_INT_BINARY(xor, Xor, 0); 1277 OPC_INT_BINARY(div, Div, 1); 1278 OPC_INT_BINARY(rem, Rem, 1); 1279 1280 1281 /* Perform various binary floating number operations */ 1282 /* On some machine/platforms/compilers div zero check can be implicit */ 1283 1284 #undef OPC_FLOAT_BINARY 1285 #define OPC_FLOAT_BINARY(opcname, opname) \ 1286 CASE(_d##opcname): { \ 1287 SET_STACK_DOUBLE(VMdouble##opname(STACK_DOUBLE(-3), \ 1288 STACK_DOUBLE(-1)), \ 1289 -3); \ 1290 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \ 1291 } \ 1292 CASE(_f##opcname): \ 1293 SET_STACK_FLOAT(VMfloat##opname(STACK_FLOAT(-2), \ 1294 STACK_FLOAT(-1)), \ 1295 -2); \ 1296 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1297 1298 1299 OPC_FLOAT_BINARY(add, Add); 1300 OPC_FLOAT_BINARY(sub, Sub); 1301 OPC_FLOAT_BINARY(mul, Mul); 1302 OPC_FLOAT_BINARY(div, Div); 1303 OPC_FLOAT_BINARY(rem, Rem); 1304 1305 /* Shift operations 1306 * Shift left int and long: ishl, lshl 1307 * Logical shift right int and long w/zero extension: iushr, lushr 1308 * Arithmetic shift right int and long w/sign extension: ishr, lshr 1309 */ 1310 1311 #undef OPC_SHIFT_BINARY 1312 #define OPC_SHIFT_BINARY(opcname, opname) \ 1313 CASE(_i##opcname): \ 1314 SET_STACK_INT(VMint##opname(STACK_INT(-2), \ 1315 STACK_INT(-1)), \ 1316 -2); \ 1317 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1318 CASE(_l##opcname): \ 1319 { \ 1320 SET_STACK_LONG(VMlong##opname(STACK_LONG(-2), \ 1321 STACK_INT(-1)), \ 1322 -2); \ 1323 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1324 } 1325 1326 OPC_SHIFT_BINARY(shl, Shl); 1327 OPC_SHIFT_BINARY(shr, Shr); 1328 OPC_SHIFT_BINARY(ushr, Ushr); 1329 1330 /* Increment local variable by constant */ 1331 CASE(_iinc): 1332 { 1333 // locals[pc[1]].j.i += (jbyte)(pc[2]); 1334 SET_LOCALS_INT(LOCALS_INT(pc[1]) + (jbyte)(pc[2]), pc[1]); 1335 UPDATE_PC_AND_CONTINUE(3); 1336 } 1337 1338 /* negate the value on the top of the stack */ 1339 1340 CASE(_ineg): 1341 SET_STACK_INT(VMintNeg(STACK_INT(-1)), -1); 1342 UPDATE_PC_AND_CONTINUE(1); 1343 1344 CASE(_fneg): 1345 SET_STACK_FLOAT(VMfloatNeg(STACK_FLOAT(-1)), -1); 1346 UPDATE_PC_AND_CONTINUE(1); 1347 1348 CASE(_lneg): 1349 { 1350 SET_STACK_LONG(VMlongNeg(STACK_LONG(-1)), -1); 1351 UPDATE_PC_AND_CONTINUE(1); 1352 } 1353 1354 CASE(_dneg): 1355 { 1356 SET_STACK_DOUBLE(VMdoubleNeg(STACK_DOUBLE(-1)), -1); 1357 UPDATE_PC_AND_CONTINUE(1); 1358 } 1359 1360 /* Conversion operations */ 1361 1362 CASE(_i2f): /* convert top of stack int to float */ 1363 SET_STACK_FLOAT(VMint2Float(STACK_INT(-1)), -1); 1364 UPDATE_PC_AND_CONTINUE(1); 1365 1366 CASE(_i2l): /* convert top of stack int to long */ 1367 { 1368 // this is ugly QQQ 1369 jlong r = VMint2Long(STACK_INT(-1)); 1370 MORE_STACK(-1); // Pop 1371 SET_STACK_LONG(r, 1); 1372 1373 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1374 } 1375 1376 CASE(_i2d): /* convert top of stack int to double */ 1377 { 1378 // this is ugly QQQ (why cast to jlong?? ) 1379 jdouble r = (jlong)STACK_INT(-1); 1380 MORE_STACK(-1); // Pop 1381 SET_STACK_DOUBLE(r, 1); 1382 1383 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1384 } 1385 1386 CASE(_l2i): /* convert top of stack long to int */ 1387 { 1388 jint r = VMlong2Int(STACK_LONG(-1)); 1389 MORE_STACK(-2); // Pop 1390 SET_STACK_INT(r, 0); 1391 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1392 } 1393 1394 CASE(_l2f): /* convert top of stack long to float */ 1395 { 1396 jlong r = STACK_LONG(-1); 1397 MORE_STACK(-2); // Pop 1398 SET_STACK_FLOAT(VMlong2Float(r), 0); 1399 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1400 } 1401 1402 CASE(_l2d): /* convert top of stack long to double */ 1403 { 1404 jlong r = STACK_LONG(-1); 1405 MORE_STACK(-2); // Pop 1406 SET_STACK_DOUBLE(VMlong2Double(r), 1); 1407 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1408 } 1409 1410 CASE(_f2i): /* Convert top of stack float to int */ 1411 SET_STACK_INT(SharedRuntime::f2i(STACK_FLOAT(-1)), -1); 1412 UPDATE_PC_AND_CONTINUE(1); 1413 1414 CASE(_f2l): /* convert top of stack float to long */ 1415 { 1416 jlong r = SharedRuntime::f2l(STACK_FLOAT(-1)); 1417 MORE_STACK(-1); // POP 1418 SET_STACK_LONG(r, 1); 1419 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1420 } 1421 1422 CASE(_f2d): /* convert top of stack float to double */ 1423 { 1424 jfloat f; 1425 jdouble r; 1426 f = STACK_FLOAT(-1); 1427 r = (jdouble) f; 1428 MORE_STACK(-1); // POP 1429 SET_STACK_DOUBLE(r, 1); 1430 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1431 } 1432 1433 CASE(_d2i): /* convert top of stack double to int */ 1434 { 1435 jint r1 = SharedRuntime::d2i(STACK_DOUBLE(-1)); 1436 MORE_STACK(-2); 1437 SET_STACK_INT(r1, 0); 1438 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1439 } 1440 1441 CASE(_d2f): /* convert top of stack double to float */ 1442 { 1443 jfloat r1 = VMdouble2Float(STACK_DOUBLE(-1)); 1444 MORE_STACK(-2); 1445 SET_STACK_FLOAT(r1, 0); 1446 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1447 } 1448 1449 CASE(_d2l): /* convert top of stack double to long */ 1450 { 1451 jlong r1 = SharedRuntime::d2l(STACK_DOUBLE(-1)); 1452 MORE_STACK(-2); 1453 SET_STACK_LONG(r1, 1); 1454 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1455 } 1456 1457 CASE(_i2b): 1458 SET_STACK_INT(VMint2Byte(STACK_INT(-1)), -1); 1459 UPDATE_PC_AND_CONTINUE(1); 1460 1461 CASE(_i2c): 1462 SET_STACK_INT(VMint2Char(STACK_INT(-1)), -1); 1463 UPDATE_PC_AND_CONTINUE(1); 1464 1465 CASE(_i2s): 1466 SET_STACK_INT(VMint2Short(STACK_INT(-1)), -1); 1467 UPDATE_PC_AND_CONTINUE(1); 1468 1469 /* comparison operators */ 1470 1471 1472 #define COMPARISON_OP(name, comparison) \ 1473 CASE(_if_icmp##name): { \ 1474 const bool cmp = (STACK_INT(-2) comparison STACK_INT(-1)); \ 1475 int skip = cmp \ 1476 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1477 address branch_pc = pc; \ 1478 /* Profile branch. */ \ 1479 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \ 1480 UPDATE_PC_AND_TOS(skip, -2); \ 1481 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1482 CONTINUE; \ 1483 } \ 1484 CASE(_if##name): { \ 1485 const bool cmp = (STACK_INT(-1) comparison 0); \ 1486 int skip = cmp \ 1487 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1488 address branch_pc = pc; \ 1489 /* Profile branch. */ \ 1490 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \ 1491 UPDATE_PC_AND_TOS(skip, -1); \ 1492 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1493 CONTINUE; \ 1494 } 1495 1496 #define COMPARISON_OP2(name, comparison) \ 1497 COMPARISON_OP(name, comparison) \ 1498 CASE(_if_acmp##name): { \ 1499 const bool cmp = (STACK_OBJECT(-2) comparison STACK_OBJECT(-1)); \ 1500 int skip = cmp \ 1501 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1502 address branch_pc = pc; \ 1503 /* Profile branch. */ \ 1504 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \ 1505 UPDATE_PC_AND_TOS(skip, -2); \ 1506 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1507 CONTINUE; \ 1508 } 1509 1510 #define NULL_COMPARISON_NOT_OP(name) \ 1511 CASE(_if##name): { \ 1512 const bool cmp = (!(STACK_OBJECT(-1) == NULL)); \ 1513 int skip = cmp \ 1514 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1515 address branch_pc = pc; \ 1516 /* Profile branch. */ \ 1517 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \ 1518 UPDATE_PC_AND_TOS(skip, -1); \ 1519 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1520 CONTINUE; \ 1521 } 1522 1523 #define NULL_COMPARISON_OP(name) \ 1524 CASE(_if##name): { \ 1525 const bool cmp = ((STACK_OBJECT(-1) == NULL)); \ 1526 int skip = cmp \ 1527 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1528 address branch_pc = pc; \ 1529 /* Profile branch. */ \ 1530 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \ 1531 UPDATE_PC_AND_TOS(skip, -1); \ 1532 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1533 CONTINUE; \ 1534 } 1535 COMPARISON_OP(lt, <); 1536 COMPARISON_OP(gt, >); 1537 COMPARISON_OP(le, <=); 1538 COMPARISON_OP(ge, >=); 1539 COMPARISON_OP2(eq, ==); /* include ref comparison */ 1540 COMPARISON_OP2(ne, !=); /* include ref comparison */ 1541 NULL_COMPARISON_OP(null); 1542 NULL_COMPARISON_NOT_OP(nonnull); 1543 1544 /* Goto pc at specified offset in switch table. */ 1545 1546 CASE(_tableswitch): { 1547 jint* lpc = (jint*)VMalignWordUp(pc+1); 1548 int32_t key = STACK_INT(-1); 1549 int32_t low = Bytes::get_Java_u4((address)&lpc[1]); 1550 int32_t high = Bytes::get_Java_u4((address)&lpc[2]); 1551 int32_t skip; 1552 key -= low; 1553 if (((uint32_t) key > (uint32_t)(high - low))) { 1554 key = -1; 1555 skip = Bytes::get_Java_u4((address)&lpc[0]); 1556 } else { 1557 skip = Bytes::get_Java_u4((address)&lpc[key + 3]); 1558 } 1559 // Profile switch. 1560 BI_PROFILE_UPDATE_SWITCH(/*switch_index=*/key); 1561 // Does this really need a full backedge check (osr)? 1562 address branch_pc = pc; 1563 UPDATE_PC_AND_TOS(skip, -1); 1564 DO_BACKEDGE_CHECKS(skip, branch_pc); 1565 CONTINUE; 1566 } 1567 1568 /* Goto pc whose table entry matches specified key. */ 1569 1570 CASE(_lookupswitch): { 1571 jint* lpc = (jint*)VMalignWordUp(pc+1); 1572 int32_t key = STACK_INT(-1); 1573 int32_t skip = Bytes::get_Java_u4((address) lpc); /* default amount */ 1574 // Remember index. 1575 int index = -1; 1576 int newindex = 0; 1577 int32_t npairs = Bytes::get_Java_u4((address) &lpc[1]); 1578 while (--npairs >= 0) { 1579 lpc += 2; 1580 if (key == (int32_t)Bytes::get_Java_u4((address)lpc)) { 1581 skip = Bytes::get_Java_u4((address)&lpc[1]); 1582 index = newindex; 1583 break; 1584 } 1585 newindex += 1; 1586 } 1587 // Profile switch. 1588 BI_PROFILE_UPDATE_SWITCH(/*switch_index=*/index); 1589 address branch_pc = pc; 1590 UPDATE_PC_AND_TOS(skip, -1); 1591 DO_BACKEDGE_CHECKS(skip, branch_pc); 1592 CONTINUE; 1593 } 1594 1595 CASE(_fcmpl): 1596 CASE(_fcmpg): 1597 { 1598 SET_STACK_INT(VMfloatCompare(STACK_FLOAT(-2), 1599 STACK_FLOAT(-1), 1600 (opcode == Bytecodes::_fcmpl ? -1 : 1)), 1601 -2); 1602 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1603 } 1604 1605 CASE(_dcmpl): 1606 CASE(_dcmpg): 1607 { 1608 int r = VMdoubleCompare(STACK_DOUBLE(-3), 1609 STACK_DOUBLE(-1), 1610 (opcode == Bytecodes::_dcmpl ? -1 : 1)); 1611 MORE_STACK(-4); // Pop 1612 SET_STACK_INT(r, 0); 1613 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1614 } 1615 1616 CASE(_lcmp): 1617 { 1618 int r = VMlongCompare(STACK_LONG(-3), STACK_LONG(-1)); 1619 MORE_STACK(-4); 1620 SET_STACK_INT(r, 0); 1621 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1622 } 1623 1624 1625 /* Return from a method */ 1626 1627 CASE(_areturn): 1628 CASE(_ireturn): 1629 CASE(_freturn): 1630 { 1631 // Allow a safepoint before returning to frame manager. 1632 SAFEPOINT; 1633 1634 goto handle_return; 1635 } 1636 1637 CASE(_lreturn): 1638 CASE(_dreturn): 1639 { 1640 // Allow a safepoint before returning to frame manager. 1641 SAFEPOINT; 1642 goto handle_return; 1643 } 1644 1645 CASE(_return_register_finalizer): { 1646 1647 oop rcvr = LOCALS_OBJECT(0); 1648 VERIFY_OOP(rcvr); 1649 if (rcvr->klass()->has_finalizer()) { 1650 CALL_VM(InterpreterRuntime::register_finalizer(THREAD, rcvr), handle_exception); 1651 } 1652 goto handle_return; 1653 } 1654 CASE(_return): { 1655 1656 // Allow a safepoint before returning to frame manager. 1657 SAFEPOINT; 1658 goto handle_return; 1659 } 1660 1661 /* Array access byte-codes */ 1662 1663 /* Every array access byte-code starts out like this */ 1664 // arrayOopDesc* arrObj = (arrayOopDesc*)STACK_OBJECT(arrayOff); 1665 #define ARRAY_INTRO(arrayOff) \ 1666 arrayOop arrObj = (arrayOop)STACK_OBJECT(arrayOff); \ 1667 jint index = STACK_INT(arrayOff + 1); \ 1668 char message[jintAsStringSize]; \ 1669 CHECK_NULL(arrObj); \ 1670 if ((uint32_t)index >= (uint32_t)arrObj->length()) { \ 1671 sprintf(message, "%d", index); \ 1672 VM_JAVA_ERROR(vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), \ 1673 message, note_rangeCheck_trap); \ 1674 } 1675 1676 /* 32-bit loads. These handle conversion from < 32-bit types */ 1677 #define ARRAY_LOADTO32(T, T2, format, stackRes, extra) \ 1678 { \ 1679 ARRAY_INTRO(-2); \ 1680 (void)extra; \ 1681 SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), \ 1682 -2); \ 1683 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1684 } 1685 1686 /* 64-bit loads */ 1687 #define ARRAY_LOADTO64(T,T2, stackRes, extra) \ 1688 { \ 1689 ARRAY_INTRO(-2); \ 1690 SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), -1); \ 1691 (void)extra; \ 1692 UPDATE_PC_AND_CONTINUE(1); \ 1693 } 1694 1695 CASE(_iaload): 1696 ARRAY_LOADTO32(T_INT, jint, "%d", STACK_INT, 0); 1697 CASE(_faload): 1698 ARRAY_LOADTO32(T_FLOAT, jfloat, "%f", STACK_FLOAT, 0); 1699 CASE(_aaload): { 1700 ARRAY_INTRO(-2); 1701 SET_STACK_OBJECT(((objArrayOop) arrObj)->obj_at(index), -2); 1702 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1703 } 1704 CASE(_baload): 1705 ARRAY_LOADTO32(T_BYTE, jbyte, "%d", STACK_INT, 0); 1706 CASE(_caload): 1707 ARRAY_LOADTO32(T_CHAR, jchar, "%d", STACK_INT, 0); 1708 CASE(_saload): 1709 ARRAY_LOADTO32(T_SHORT, jshort, "%d", STACK_INT, 0); 1710 CASE(_laload): 1711 ARRAY_LOADTO64(T_LONG, jlong, STACK_LONG, 0); 1712 CASE(_daload): 1713 ARRAY_LOADTO64(T_DOUBLE, jdouble, STACK_DOUBLE, 0); 1714 1715 /* 32-bit stores. These handle conversion to < 32-bit types */ 1716 #define ARRAY_STOREFROM32(T, T2, format, stackSrc, extra) \ 1717 { \ 1718 ARRAY_INTRO(-3); \ 1719 (void)extra; \ 1720 *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \ 1721 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3); \ 1722 } 1723 1724 /* 64-bit stores */ 1725 #define ARRAY_STOREFROM64(T, T2, stackSrc, extra) \ 1726 { \ 1727 ARRAY_INTRO(-4); \ 1728 (void)extra; \ 1729 *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \ 1730 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -4); \ 1731 } 1732 1733 CASE(_iastore): 1734 ARRAY_STOREFROM32(T_INT, jint, "%d", STACK_INT, 0); 1735 CASE(_fastore): 1736 ARRAY_STOREFROM32(T_FLOAT, jfloat, "%f", STACK_FLOAT, 0); 1737 /* 1738 * This one looks different because of the assignability check 1739 */ 1740 CASE(_aastore): { 1741 oop rhsObject = STACK_OBJECT(-1); 1742 VERIFY_OOP(rhsObject); 1743 ARRAY_INTRO( -3); 1744 // arrObj, index are set 1745 if (rhsObject != NULL) { 1746 /* Check assignability of rhsObject into arrObj */ 1747 Klass* rhsKlass = rhsObject->klass(); // EBX (subclass) 1748 Klass* elemKlass = ObjArrayKlass::cast(arrObj->klass())->element_klass(); // superklass EAX 1749 // 1750 // Check for compatibilty. This check must not GC!! 1751 // Seems way more expensive now that we must dispatch 1752 // 1753 if (rhsKlass != elemKlass && !rhsKlass->is_subtype_of(elemKlass)) { // ebx->is... 1754 // Decrement counter if subtype check failed. 1755 BI_PROFILE_SUBTYPECHECK_FAILED(rhsKlass); 1756 VM_JAVA_ERROR(vmSymbols::java_lang_ArrayStoreException(), "", note_arrayCheck_trap); 1757 } 1758 // Profile checkcast with null_seen and receiver. 1759 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/false, rhsKlass); 1760 } else { 1761 // Profile checkcast with null_seen and receiver. 1762 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/true, NULL); 1763 } 1764 ((objArrayOop) arrObj)->obj_at_put(index, rhsObject); 1765 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3); 1766 } 1767 CASE(_bastore): { 1768 ARRAY_INTRO(-3); 1769 int item = STACK_INT(-1); 1770 // if it is a T_BOOLEAN array, mask the stored value to 0/1 1771 if (arrObj->klass() == Universe::boolArrayKlassObj()) { 1772 item &= 1; 1773 } else { 1774 assert(arrObj->klass() == Universe::byteArrayKlassObj(), 1775 "should be byte array otherwise"); 1776 } 1777 ((typeArrayOop)arrObj)->byte_at_put(index, item); 1778 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3); 1779 } 1780 CASE(_castore): 1781 ARRAY_STOREFROM32(T_CHAR, jchar, "%d", STACK_INT, 0); 1782 CASE(_sastore): 1783 ARRAY_STOREFROM32(T_SHORT, jshort, "%d", STACK_INT, 0); 1784 CASE(_lastore): 1785 ARRAY_STOREFROM64(T_LONG, jlong, STACK_LONG, 0); 1786 CASE(_dastore): 1787 ARRAY_STOREFROM64(T_DOUBLE, jdouble, STACK_DOUBLE, 0); 1788 1789 CASE(_arraylength): 1790 { 1791 arrayOop ary = (arrayOop) STACK_OBJECT(-1); 1792 CHECK_NULL(ary); 1793 SET_STACK_INT(ary->length(), -1); 1794 UPDATE_PC_AND_CONTINUE(1); 1795 } 1796 1797 /* monitorenter and monitorexit for locking/unlocking an object */ 1798 1799 CASE(_monitorenter): { 1800 oop lockee = STACK_OBJECT(-1); 1801 // derefing's lockee ought to provoke implicit null check 1802 CHECK_NULL(lockee); 1803 // find a free monitor or one already allocated for this object 1804 // if we find a matching object then we need a new monitor 1805 // since this is recursive enter 1806 BasicObjectLock* limit = istate->monitor_base(); 1807 BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base(); 1808 BasicObjectLock* entry = NULL; 1809 while (most_recent != limit ) { 1810 if (most_recent->obj() == NULL) entry = most_recent; 1811 else if (most_recent->obj() == lockee) break; 1812 most_recent++; 1813 } 1814 if (entry != NULL) { 1815 entry->set_obj(lockee); 1816 int success = false; 1817 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place; 1818 1819 markOop mark = lockee->mark(); 1820 intptr_t hash = (intptr_t) markOopDesc::no_hash; 1821 // implies UseBiasedLocking 1822 if (mark->has_bias_pattern()) { 1823 uintptr_t thread_ident; 1824 uintptr_t anticipated_bias_locking_value; 1825 thread_ident = (uintptr_t)istate->thread(); 1826 anticipated_bias_locking_value = 1827 (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) & 1828 ~((uintptr_t) markOopDesc::age_mask_in_place); 1829 1830 if (anticipated_bias_locking_value == 0) { 1831 // already biased towards this thread, nothing to do 1832 if (PrintBiasedLockingStatistics) { 1833 (* BiasedLocking::biased_lock_entry_count_addr())++; 1834 } 1835 success = true; 1836 } 1837 else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) { 1838 // try revoke bias 1839 markOop header = lockee->klass()->prototype_header(); 1840 if (hash != markOopDesc::no_hash) { 1841 header = header->copy_set_hash(hash); 1842 } 1843 if (Atomic::cmpxchg(header, lockee->mark_addr(), mark) == mark) { 1844 if (PrintBiasedLockingStatistics) 1845 (*BiasedLocking::revoked_lock_entry_count_addr())++; 1846 } 1847 } 1848 else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) { 1849 // try rebias 1850 markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident); 1851 if (hash != markOopDesc::no_hash) { 1852 new_header = new_header->copy_set_hash(hash); 1853 } 1854 if (Atomic::cmpxchg(new_header, lockee->mark_addr(), mark) == mark) { 1855 if (PrintBiasedLockingStatistics) 1856 (* BiasedLocking::rebiased_lock_entry_count_addr())++; 1857 } 1858 else { 1859 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 1860 } 1861 success = true; 1862 } 1863 else { 1864 // try to bias towards thread in case object is anonymously biased 1865 markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place | 1866 (uintptr_t)markOopDesc::age_mask_in_place | 1867 epoch_mask_in_place)); 1868 if (hash != markOopDesc::no_hash) { 1869 header = header->copy_set_hash(hash); 1870 } 1871 markOop new_header = (markOop) ((uintptr_t) header | thread_ident); 1872 // debugging hint 1873 DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);) 1874 if (Atomic::cmpxchg(new_header, lockee->mark_addr(), header) == header) { 1875 if (PrintBiasedLockingStatistics) 1876 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++; 1877 } 1878 else { 1879 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 1880 } 1881 success = true; 1882 } 1883 } 1884 1885 // traditional lightweight locking 1886 if (!success) { 1887 markOop displaced = lockee->mark()->set_unlocked(); 1888 entry->lock()->set_displaced_header(displaced); 1889 bool call_vm = UseHeavyMonitors; 1890 if (call_vm || Atomic::cmpxchg((markOop)entry, lockee->mark_addr(), displaced) != displaced) { 1891 // Is it simple recursive case? 1892 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { 1893 entry->lock()->set_displaced_header(NULL); 1894 } else { 1895 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 1896 } 1897 } 1898 } 1899 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1900 } else { 1901 istate->set_msg(more_monitors); 1902 UPDATE_PC_AND_RETURN(0); // Re-execute 1903 } 1904 } 1905 1906 CASE(_monitorexit): { 1907 oop lockee = STACK_OBJECT(-1); 1908 CHECK_NULL(lockee); 1909 // derefing's lockee ought to provoke implicit null check 1910 // find our monitor slot 1911 BasicObjectLock* limit = istate->monitor_base(); 1912 BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base(); 1913 while (most_recent != limit ) { 1914 if ((most_recent)->obj() == lockee) { 1915 BasicLock* lock = most_recent->lock(); 1916 markOop header = lock->displaced_header(); 1917 most_recent->set_obj(NULL); 1918 if (!lockee->mark()->has_bias_pattern()) { 1919 bool call_vm = UseHeavyMonitors; 1920 // If it isn't recursive we either must swap old header or call the runtime 1921 if (header != NULL || call_vm) { 1922 markOop old_header = markOopDesc::encode(lock); 1923 if (call_vm || lockee->cas_set_mark(header, old_header) != old_header) { 1924 // restore object for the slow case 1925 most_recent->set_obj(lockee); 1926 CALL_VM(InterpreterRuntime::monitorexit(THREAD, most_recent), handle_exception); 1927 } 1928 } 1929 } 1930 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1931 } 1932 most_recent++; 1933 } 1934 // Need to throw illegal monitor state exception 1935 CALL_VM(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD), handle_exception); 1936 ShouldNotReachHere(); 1937 } 1938 1939 /* All of the non-quick opcodes. */ 1940 1941 /* -Set clobbersCpIndex true if the quickened opcode clobbers the 1942 * constant pool index in the instruction. 1943 */ 1944 CASE(_getfield): 1945 CASE(_getstatic): 1946 { 1947 u2 index; 1948 ConstantPoolCacheEntry* cache; 1949 index = Bytes::get_native_u2(pc+1); 1950 1951 // QQQ Need to make this as inlined as possible. Probably need to 1952 // split all the bytecode cases out so c++ compiler has a chance 1953 // for constant prop to fold everything possible away. 1954 1955 cache = cp->entry_at(index); 1956 if (!cache->is_resolved((Bytecodes::Code)opcode)) { 1957 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode), 1958 handle_exception); 1959 cache = cp->entry_at(index); 1960 } 1961 1962 #ifdef VM_JVMTI 1963 if (_jvmti_interp_events) { 1964 int *count_addr; 1965 oop obj; 1966 // Check to see if a field modification watch has been set 1967 // before we take the time to call into the VM. 1968 count_addr = (int *)JvmtiExport::get_field_access_count_addr(); 1969 if ( *count_addr > 0 ) { 1970 if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) { 1971 obj = (oop)NULL; 1972 } else { 1973 obj = (oop) STACK_OBJECT(-1); 1974 VERIFY_OOP(obj); 1975 } 1976 CALL_VM(InterpreterRuntime::post_field_access(THREAD, 1977 obj, 1978 cache), 1979 handle_exception); 1980 } 1981 } 1982 #endif /* VM_JVMTI */ 1983 1984 oop obj; 1985 if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) { 1986 Klass* k = cache->f1_as_klass(); 1987 obj = k->java_mirror(); 1988 MORE_STACK(1); // Assume single slot push 1989 } else { 1990 obj = (oop) STACK_OBJECT(-1); 1991 CHECK_NULL(obj); 1992 } 1993 1994 // 1995 // Now store the result on the stack 1996 // 1997 TosState tos_type = cache->flag_state(); 1998 int field_offset = cache->f2_as_index(); 1999 if (cache->is_volatile()) { 2000 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 2001 OrderAccess::fence(); 2002 } 2003 if (tos_type == atos) { 2004 VERIFY_OOP(obj->obj_field_acquire(field_offset)); 2005 SET_STACK_OBJECT(obj->obj_field_acquire(field_offset), -1); 2006 } else if (tos_type == itos) { 2007 SET_STACK_INT(obj->int_field_acquire(field_offset), -1); 2008 } else if (tos_type == ltos) { 2009 SET_STACK_LONG(obj->long_field_acquire(field_offset), 0); 2010 MORE_STACK(1); 2011 } else if (tos_type == btos || tos_type == ztos) { 2012 SET_STACK_INT(obj->byte_field_acquire(field_offset), -1); 2013 } else if (tos_type == ctos) { 2014 SET_STACK_INT(obj->char_field_acquire(field_offset), -1); 2015 } else if (tos_type == stos) { 2016 SET_STACK_INT(obj->short_field_acquire(field_offset), -1); 2017 } else if (tos_type == ftos) { 2018 SET_STACK_FLOAT(obj->float_field_acquire(field_offset), -1); 2019 } else { 2020 SET_STACK_DOUBLE(obj->double_field_acquire(field_offset), 0); 2021 MORE_STACK(1); 2022 } 2023 } else { 2024 if (tos_type == atos) { 2025 VERIFY_OOP(obj->obj_field(field_offset)); 2026 SET_STACK_OBJECT(obj->obj_field(field_offset), -1); 2027 } else if (tos_type == itos) { 2028 SET_STACK_INT(obj->int_field(field_offset), -1); 2029 } else if (tos_type == ltos) { 2030 SET_STACK_LONG(obj->long_field(field_offset), 0); 2031 MORE_STACK(1); 2032 } else if (tos_type == btos || tos_type == ztos) { 2033 SET_STACK_INT(obj->byte_field(field_offset), -1); 2034 } else if (tos_type == ctos) { 2035 SET_STACK_INT(obj->char_field(field_offset), -1); 2036 } else if (tos_type == stos) { 2037 SET_STACK_INT(obj->short_field(field_offset), -1); 2038 } else if (tos_type == ftos) { 2039 SET_STACK_FLOAT(obj->float_field(field_offset), -1); 2040 } else { 2041 SET_STACK_DOUBLE(obj->double_field(field_offset), 0); 2042 MORE_STACK(1); 2043 } 2044 } 2045 2046 UPDATE_PC_AND_CONTINUE(3); 2047 } 2048 2049 CASE(_putfield): 2050 CASE(_putstatic): 2051 { 2052 u2 index = Bytes::get_native_u2(pc+1); 2053 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2054 if (!cache->is_resolved((Bytecodes::Code)opcode)) { 2055 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode), 2056 handle_exception); 2057 cache = cp->entry_at(index); 2058 } 2059 2060 #ifdef VM_JVMTI 2061 if (_jvmti_interp_events) { 2062 int *count_addr; 2063 oop obj; 2064 // Check to see if a field modification watch has been set 2065 // before we take the time to call into the VM. 2066 count_addr = (int *)JvmtiExport::get_field_modification_count_addr(); 2067 if ( *count_addr > 0 ) { 2068 if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) { 2069 obj = (oop)NULL; 2070 } 2071 else { 2072 if (cache->is_long() || cache->is_double()) { 2073 obj = (oop) STACK_OBJECT(-3); 2074 } else { 2075 obj = (oop) STACK_OBJECT(-2); 2076 } 2077 VERIFY_OOP(obj); 2078 } 2079 2080 CALL_VM(InterpreterRuntime::post_field_modification(THREAD, 2081 obj, 2082 cache, 2083 (jvalue *)STACK_SLOT(-1)), 2084 handle_exception); 2085 } 2086 } 2087 #endif /* VM_JVMTI */ 2088 2089 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases 2090 // out so c++ compiler has a chance for constant prop to fold everything possible away. 2091 2092 oop obj; 2093 int count; 2094 TosState tos_type = cache->flag_state(); 2095 2096 count = -1; 2097 if (tos_type == ltos || tos_type == dtos) { 2098 --count; 2099 } 2100 if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) { 2101 Klass* k = cache->f1_as_klass(); 2102 obj = k->java_mirror(); 2103 } else { 2104 --count; 2105 obj = (oop) STACK_OBJECT(count); 2106 CHECK_NULL(obj); 2107 } 2108 2109 // 2110 // Now store the result 2111 // 2112 int field_offset = cache->f2_as_index(); 2113 if (cache->is_volatile()) { 2114 if (tos_type == itos) { 2115 obj->release_int_field_put(field_offset, STACK_INT(-1)); 2116 } else if (tos_type == atos) { 2117 VERIFY_OOP(STACK_OBJECT(-1)); 2118 obj->release_obj_field_put(field_offset, STACK_OBJECT(-1)); 2119 } else if (tos_type == btos) { 2120 obj->release_byte_field_put(field_offset, STACK_INT(-1)); 2121 } else if (tos_type == ztos) { 2122 int bool_field = STACK_INT(-1); // only store LSB 2123 obj->release_byte_field_put(field_offset, (bool_field & 1)); 2124 } else if (tos_type == ltos) { 2125 obj->release_long_field_put(field_offset, STACK_LONG(-1)); 2126 } else if (tos_type == ctos) { 2127 obj->release_char_field_put(field_offset, STACK_INT(-1)); 2128 } else if (tos_type == stos) { 2129 obj->release_short_field_put(field_offset, STACK_INT(-1)); 2130 } else if (tos_type == ftos) { 2131 obj->release_float_field_put(field_offset, STACK_FLOAT(-1)); 2132 } else { 2133 obj->release_double_field_put(field_offset, STACK_DOUBLE(-1)); 2134 } 2135 OrderAccess::storeload(); 2136 } else { 2137 if (tos_type == itos) { 2138 obj->int_field_put(field_offset, STACK_INT(-1)); 2139 } else if (tos_type == atos) { 2140 VERIFY_OOP(STACK_OBJECT(-1)); 2141 obj->obj_field_put(field_offset, STACK_OBJECT(-1)); 2142 } else if (tos_type == btos) { 2143 obj->byte_field_put(field_offset, STACK_INT(-1)); 2144 } else if (tos_type == ztos) { 2145 int bool_field = STACK_INT(-1); // only store LSB 2146 obj->byte_field_put(field_offset, (bool_field & 1)); 2147 } else if (tos_type == ltos) { 2148 obj->long_field_put(field_offset, STACK_LONG(-1)); 2149 } else if (tos_type == ctos) { 2150 obj->char_field_put(field_offset, STACK_INT(-1)); 2151 } else if (tos_type == stos) { 2152 obj->short_field_put(field_offset, STACK_INT(-1)); 2153 } else if (tos_type == ftos) { 2154 obj->float_field_put(field_offset, STACK_FLOAT(-1)); 2155 } else { 2156 obj->double_field_put(field_offset, STACK_DOUBLE(-1)); 2157 } 2158 } 2159 2160 UPDATE_PC_AND_TOS_AND_CONTINUE(3, count); 2161 } 2162 2163 CASE(_new): { 2164 u2 index = Bytes::get_Java_u2(pc+1); 2165 ConstantPool* constants = istate->method()->constants(); 2166 if (!constants->tag_at(index).is_unresolved_klass()) { 2167 // Make sure klass is initialized and doesn't have a finalizer 2168 Klass* entry = constants->resolved_klass_at(index); 2169 InstanceKlass* ik = InstanceKlass::cast(entry); 2170 if (ik->is_initialized() && ik->can_be_fastpath_allocated() ) { 2171 size_t obj_size = ik->size_helper(); 2172 oop result = NULL; 2173 // If the TLAB isn't pre-zeroed then we'll have to do it 2174 bool need_zero = !ZeroTLAB; 2175 if (UseTLAB) { 2176 result = (oop) THREAD->tlab().allocate(obj_size); 2177 } 2178 // Disable non-TLAB-based fast-path, because profiling requires that all 2179 // allocations go through InterpreterRuntime::_new() if THREAD->tlab().allocate 2180 // returns NULL. 2181 #ifndef CC_INTERP_PROFILE 2182 if (result == NULL) { 2183 need_zero = true; 2184 // Try allocate in shared eden 2185 retry: 2186 HeapWord* compare_to = *Universe::heap()->top_addr(); 2187 HeapWord* new_top = compare_to + obj_size; 2188 if (new_top <= *Universe::heap()->end_addr()) { 2189 if (Atomic::cmpxchg(new_top, Universe::heap()->top_addr(), compare_to) != compare_to) { 2190 goto retry; 2191 } 2192 result = (oop) compare_to; 2193 } 2194 } 2195 #endif 2196 if (result != NULL) { 2197 // Initialize object (if nonzero size and need) and then the header 2198 if (need_zero ) { 2199 HeapWord* to_zero = (HeapWord*) result + sizeof(oopDesc) / oopSize; 2200 obj_size -= sizeof(oopDesc) / oopSize; 2201 if (obj_size > 0 ) { 2202 memset(to_zero, 0, obj_size * HeapWordSize); 2203 } 2204 } 2205 if (UseBiasedLocking) { 2206 result->set_mark(ik->prototype_header()); 2207 } else { 2208 result->set_mark(markOopDesc::prototype()); 2209 } 2210 result->set_klass_gap(0); 2211 result->set_klass(ik); 2212 // Must prevent reordering of stores for object initialization 2213 // with stores that publish the new object. 2214 OrderAccess::storestore(); 2215 SET_STACK_OBJECT(result, 0); 2216 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1); 2217 } 2218 } 2219 } 2220 // Slow case allocation 2221 CALL_VM(InterpreterRuntime::_new(THREAD, METHOD->constants(), index), 2222 handle_exception); 2223 // Must prevent reordering of stores for object initialization 2224 // with stores that publish the new object. 2225 OrderAccess::storestore(); 2226 SET_STACK_OBJECT(THREAD->vm_result(), 0); 2227 THREAD->set_vm_result(NULL); 2228 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1); 2229 } 2230 CASE(_anewarray): { 2231 u2 index = Bytes::get_Java_u2(pc+1); 2232 jint size = STACK_INT(-1); 2233 CALL_VM(InterpreterRuntime::anewarray(THREAD, METHOD->constants(), index, size), 2234 handle_exception); 2235 // Must prevent reordering of stores for object initialization 2236 // with stores that publish the new object. 2237 OrderAccess::storestore(); 2238 SET_STACK_OBJECT(THREAD->vm_result(), -1); 2239 THREAD->set_vm_result(NULL); 2240 UPDATE_PC_AND_CONTINUE(3); 2241 } 2242 CASE(_multianewarray): { 2243 jint dims = *(pc+3); 2244 jint size = STACK_INT(-1); 2245 // stack grows down, dimensions are up! 2246 jint *dimarray = 2247 (jint*)&topOfStack[dims * Interpreter::stackElementWords+ 2248 Interpreter::stackElementWords-1]; 2249 //adjust pointer to start of stack element 2250 CALL_VM(InterpreterRuntime::multianewarray(THREAD, dimarray), 2251 handle_exception); 2252 // Must prevent reordering of stores for object initialization 2253 // with stores that publish the new object. 2254 OrderAccess::storestore(); 2255 SET_STACK_OBJECT(THREAD->vm_result(), -dims); 2256 THREAD->set_vm_result(NULL); 2257 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -(dims-1)); 2258 } 2259 CASE(_checkcast): 2260 if (STACK_OBJECT(-1) != NULL) { 2261 VERIFY_OOP(STACK_OBJECT(-1)); 2262 u2 index = Bytes::get_Java_u2(pc+1); 2263 // Constant pool may have actual klass or unresolved klass. If it is 2264 // unresolved we must resolve it. 2265 if (METHOD->constants()->tag_at(index).is_unresolved_klass()) { 2266 CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception); 2267 } 2268 Klass* klassOf = (Klass*) METHOD->constants()->resolved_klass_at(index); 2269 Klass* objKlass = STACK_OBJECT(-1)->klass(); // ebx 2270 // 2271 // Check for compatibilty. This check must not GC!! 2272 // Seems way more expensive now that we must dispatch. 2273 // 2274 if (objKlass != klassOf && !objKlass->is_subtype_of(klassOf)) { 2275 // Decrement counter at checkcast. 2276 BI_PROFILE_SUBTYPECHECK_FAILED(objKlass); 2277 ResourceMark rm(THREAD); 2278 char* message = SharedRuntime::generate_class_cast_message( 2279 objKlass, klassOf); 2280 VM_JAVA_ERROR(vmSymbols::java_lang_ClassCastException(), message, note_classCheck_trap); 2281 } 2282 // Profile checkcast with null_seen and receiver. 2283 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/false, objKlass); 2284 } else { 2285 // Profile checkcast with null_seen and receiver. 2286 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/true, NULL); 2287 } 2288 UPDATE_PC_AND_CONTINUE(3); 2289 2290 CASE(_instanceof): 2291 if (STACK_OBJECT(-1) == NULL) { 2292 SET_STACK_INT(0, -1); 2293 // Profile instanceof with null_seen and receiver. 2294 BI_PROFILE_UPDATE_INSTANCEOF(/*null_seen=*/true, NULL); 2295 } else { 2296 VERIFY_OOP(STACK_OBJECT(-1)); 2297 u2 index = Bytes::get_Java_u2(pc+1); 2298 // Constant pool may have actual klass or unresolved klass. If it is 2299 // unresolved we must resolve it. 2300 if (METHOD->constants()->tag_at(index).is_unresolved_klass()) { 2301 CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception); 2302 } 2303 Klass* klassOf = (Klass*) METHOD->constants()->resolved_klass_at(index); 2304 Klass* objKlass = STACK_OBJECT(-1)->klass(); 2305 // 2306 // Check for compatibilty. This check must not GC!! 2307 // Seems way more expensive now that we must dispatch. 2308 // 2309 if ( objKlass == klassOf || objKlass->is_subtype_of(klassOf)) { 2310 SET_STACK_INT(1, -1); 2311 } else { 2312 SET_STACK_INT(0, -1); 2313 // Decrement counter at checkcast. 2314 BI_PROFILE_SUBTYPECHECK_FAILED(objKlass); 2315 } 2316 // Profile instanceof with null_seen and receiver. 2317 BI_PROFILE_UPDATE_INSTANCEOF(/*null_seen=*/false, objKlass); 2318 } 2319 UPDATE_PC_AND_CONTINUE(3); 2320 2321 CASE(_ldc_w): 2322 CASE(_ldc): 2323 { 2324 u2 index; 2325 bool wide = false; 2326 int incr = 2; // frequent case 2327 if (opcode == Bytecodes::_ldc) { 2328 index = pc[1]; 2329 } else { 2330 index = Bytes::get_Java_u2(pc+1); 2331 incr = 3; 2332 wide = true; 2333 } 2334 2335 ConstantPool* constants = METHOD->constants(); 2336 switch (constants->tag_at(index).value()) { 2337 case JVM_CONSTANT_Integer: 2338 SET_STACK_INT(constants->int_at(index), 0); 2339 break; 2340 2341 case JVM_CONSTANT_Float: 2342 SET_STACK_FLOAT(constants->float_at(index), 0); 2343 break; 2344 2345 case JVM_CONSTANT_String: 2346 { 2347 oop result = constants->resolved_references()->obj_at(index); 2348 if (result == NULL) { 2349 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), handle_exception); 2350 SET_STACK_OBJECT(THREAD->vm_result(), 0); 2351 THREAD->set_vm_result(NULL); 2352 } else { 2353 VERIFY_OOP(result); 2354 SET_STACK_OBJECT(result, 0); 2355 } 2356 break; 2357 } 2358 2359 case JVM_CONSTANT_Class: 2360 case JVM_CONSTANT_Value: 2361 VERIFY_OOP(constants->resolved_klass_at(index)->java_mirror()); 2362 SET_STACK_OBJECT(constants->resolved_klass_at(index)->java_mirror(), 0); 2363 break; 2364 2365 case JVM_CONSTANT_UnresolvedClass: 2366 case JVM_CONSTANT_UnresolvedClassInError: 2367 case JVM_CONSTANT_UnresolvedValue: 2368 case JVM_CONSTANT_UnresolvedValueInError: 2369 CALL_VM(InterpreterRuntime::ldc(THREAD, wide), handle_exception); 2370 SET_STACK_OBJECT(THREAD->vm_result(), 0); 2371 THREAD->set_vm_result(NULL); 2372 break; 2373 2374 case JVM_CONSTANT_Dynamic: 2375 { 2376 oop result = constants->resolved_references()->obj_at(index); 2377 if (result == NULL) { 2378 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), handle_exception); 2379 result = THREAD->vm_result(); 2380 } 2381 VERIFY_OOP(result); 2382 2383 jvalue value; 2384 BasicType type = java_lang_boxing_object::get_value(result, &value); 2385 switch (type) { 2386 case T_FLOAT: SET_STACK_FLOAT(value.f, 0); break; 2387 case T_INT: SET_STACK_INT(value.i, 0); break; 2388 case T_SHORT: SET_STACK_INT(value.s, 0); break; 2389 case T_BYTE: SET_STACK_INT(value.b, 0); break; 2390 case T_CHAR: SET_STACK_INT(value.c, 0); break; 2391 case T_BOOLEAN: SET_STACK_INT(value.z, 0); break; 2392 default: ShouldNotReachHere(); 2393 } 2394 2395 break; 2396 } 2397 2398 default: ShouldNotReachHere(); 2399 } 2400 UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1); 2401 } 2402 2403 CASE(_ldc2_w): 2404 { 2405 u2 index = Bytes::get_Java_u2(pc+1); 2406 2407 ConstantPool* constants = METHOD->constants(); 2408 switch (constants->tag_at(index).value()) { 2409 2410 case JVM_CONSTANT_Long: 2411 SET_STACK_LONG(constants->long_at(index), 1); 2412 break; 2413 2414 case JVM_CONSTANT_Double: 2415 SET_STACK_DOUBLE(constants->double_at(index), 1); 2416 break; 2417 2418 case JVM_CONSTANT_Dynamic: 2419 { 2420 oop result = constants->resolved_references()->obj_at(index); 2421 if (result == NULL) { 2422 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), handle_exception); 2423 result = THREAD->vm_result(); 2424 } 2425 VERIFY_OOP(result); 2426 2427 jvalue value; 2428 BasicType type = java_lang_boxing_object::get_value(result, &value); 2429 switch (type) { 2430 case T_DOUBLE: SET_STACK_DOUBLE(value.d, 1); break; 2431 case T_LONG: SET_STACK_LONG(value.j, 1); break; 2432 default: ShouldNotReachHere(); 2433 } 2434 2435 break; 2436 } 2437 2438 default: ShouldNotReachHere(); 2439 } 2440 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 2); 2441 } 2442 2443 CASE(_fast_aldc_w): 2444 CASE(_fast_aldc): { 2445 u2 index; 2446 int incr; 2447 if (opcode == Bytecodes::_fast_aldc) { 2448 index = pc[1]; 2449 incr = 2; 2450 } else { 2451 index = Bytes::get_native_u2(pc+1); 2452 incr = 3; 2453 } 2454 2455 // We are resolved if the resolved_references array contains a non-null object (CallSite, etc.) 2456 // This kind of CP cache entry does not need to match the flags byte, because 2457 // there is a 1-1 relation between bytecode type and CP entry type. 2458 ConstantPool* constants = METHOD->constants(); 2459 oop result = constants->resolved_references()->obj_at(index); 2460 if (result == NULL) { 2461 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), 2462 handle_exception); 2463 result = THREAD->vm_result(); 2464 } 2465 if (result == Universe::the_null_sentinel()) 2466 result = NULL; 2467 2468 VERIFY_OOP(result); 2469 SET_STACK_OBJECT(result, 0); 2470 UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1); 2471 } 2472 2473 CASE(_invokedynamic): { 2474 2475 u4 index = Bytes::get_native_u4(pc+1); 2476 ConstantPoolCacheEntry* cache = cp->constant_pool()->invokedynamic_cp_cache_entry_at(index); 2477 2478 // We are resolved if the resolved_references array contains a non-null object (CallSite, etc.) 2479 // This kind of CP cache entry does not need to match the flags byte, because 2480 // there is a 1-1 relation between bytecode type and CP entry type. 2481 if (! cache->is_resolved((Bytecodes::Code) opcode)) { 2482 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode), 2483 handle_exception); 2484 cache = cp->constant_pool()->invokedynamic_cp_cache_entry_at(index); 2485 } 2486 2487 Method* method = cache->f1_as_method(); 2488 if (VerifyOops) method->verify(); 2489 2490 if (cache->has_appendix()) { 2491 ConstantPool* constants = METHOD->constants(); 2492 SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0); 2493 MORE_STACK(1); 2494 } 2495 2496 istate->set_msg(call_method); 2497 istate->set_callee(method); 2498 istate->set_callee_entry_point(method->from_interpreted_entry()); 2499 istate->set_bcp_advance(5); 2500 2501 // Invokedynamic has got a call counter, just like an invokestatic -> increment! 2502 BI_PROFILE_UPDATE_CALL(); 2503 2504 UPDATE_PC_AND_RETURN(0); // I'll be back... 2505 } 2506 2507 CASE(_invokehandle): { 2508 2509 u2 index = Bytes::get_native_u2(pc+1); 2510 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2511 2512 if (! cache->is_resolved((Bytecodes::Code) opcode)) { 2513 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode), 2514 handle_exception); 2515 cache = cp->entry_at(index); 2516 } 2517 2518 Method* method = cache->f1_as_method(); 2519 if (VerifyOops) method->verify(); 2520 2521 if (cache->has_appendix()) { 2522 ConstantPool* constants = METHOD->constants(); 2523 SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0); 2524 MORE_STACK(1); 2525 } 2526 2527 istate->set_msg(call_method); 2528 istate->set_callee(method); 2529 istate->set_callee_entry_point(method->from_interpreted_entry()); 2530 istate->set_bcp_advance(3); 2531 2532 // Invokehandle has got a call counter, just like a final call -> increment! 2533 BI_PROFILE_UPDATE_FINALCALL(); 2534 2535 UPDATE_PC_AND_RETURN(0); // I'll be back... 2536 } 2537 2538 CASE(_invokeinterface): { 2539 u2 index = Bytes::get_native_u2(pc+1); 2540 2541 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases 2542 // out so c++ compiler has a chance for constant prop to fold everything possible away. 2543 2544 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2545 if (!cache->is_resolved((Bytecodes::Code)opcode)) { 2546 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode), 2547 handle_exception); 2548 cache = cp->entry_at(index); 2549 } 2550 2551 istate->set_msg(call_method); 2552 2553 // Special case of invokeinterface called for virtual method of 2554 // java.lang.Object. See cpCacheOop.cpp for details. 2555 // This code isn't produced by javac, but could be produced by 2556 // another compliant java compiler. 2557 if (cache->is_forced_virtual()) { 2558 Method* callee; 2559 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); 2560 if (cache->is_vfinal()) { 2561 callee = cache->f2_as_vfinal_method(); 2562 // Profile 'special case of invokeinterface' final call. 2563 BI_PROFILE_UPDATE_FINALCALL(); 2564 } else { 2565 // Get receiver. 2566 int parms = cache->parameter_size(); 2567 // Same comments as invokevirtual apply here. 2568 oop rcvr = STACK_OBJECT(-parms); 2569 VERIFY_OOP(rcvr); 2570 Klass* rcvrKlass = rcvr->klass(); 2571 callee = (Method*) rcvrKlass->method_at_vtable(cache->f2_as_index()); 2572 // Profile 'special case of invokeinterface' virtual call. 2573 BI_PROFILE_UPDATE_VIRTUALCALL(rcvrKlass); 2574 } 2575 istate->set_callee(callee); 2576 istate->set_callee_entry_point(callee->from_interpreted_entry()); 2577 #ifdef VM_JVMTI 2578 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) { 2579 istate->set_callee_entry_point(callee->interpreter_entry()); 2580 } 2581 #endif /* VM_JVMTI */ 2582 istate->set_bcp_advance(5); 2583 UPDATE_PC_AND_RETURN(0); // I'll be back... 2584 } 2585 2586 // this could definitely be cleaned up QQQ 2587 Method* callee; 2588 Method *interface_method = cache->f2_as_interface_method(); 2589 InstanceKlass* iclass = interface_method->method_holder(); 2590 2591 // get receiver 2592 int parms = cache->parameter_size(); 2593 oop rcvr = STACK_OBJECT(-parms); 2594 CHECK_NULL(rcvr); 2595 InstanceKlass* int2 = (InstanceKlass*) rcvr->klass(); 2596 2597 // Receiver subtype check against resolved interface klass (REFC). 2598 { 2599 Klass* refc = cache->f1_as_klass(); 2600 itableOffsetEntry* scan; 2601 for (scan = (itableOffsetEntry*) int2->start_of_itable(); 2602 scan->interface_klass() != NULL; 2603 scan++) { 2604 if (scan->interface_klass() == refc) { 2605 break; 2606 } 2607 } 2608 // Check that the entry is non-null. A null entry means 2609 // that the receiver class doesn't implement the 2610 // interface, and wasn't the same as when the caller was 2611 // compiled. 2612 if (scan->interface_klass() == NULL) { 2613 VM_JAVA_ERROR(vmSymbols::java_lang_IncompatibleClassChangeError(), "", note_no_trap); 2614 } 2615 } 2616 2617 itableOffsetEntry* ki = (itableOffsetEntry*) int2->start_of_itable(); 2618 int i; 2619 for ( i = 0 ; i < int2->itable_length() ; i++, ki++ ) { 2620 if (ki->interface_klass() == iclass) break; 2621 } 2622 // If the interface isn't found, this class doesn't implement this 2623 // interface. The link resolver checks this but only for the first 2624 // time this interface is called. 2625 if (i == int2->itable_length()) { 2626 VM_JAVA_ERROR(vmSymbols::java_lang_IncompatibleClassChangeError(), "", note_no_trap); 2627 } 2628 int mindex = interface_method->itable_index(); 2629 2630 itableMethodEntry* im = ki->first_method_entry(rcvr->klass()); 2631 callee = im[mindex].method(); 2632 if (callee == NULL) { 2633 VM_JAVA_ERROR(vmSymbols::java_lang_AbstractMethodError(), "", note_no_trap); 2634 } 2635 2636 // Profile virtual call. 2637 BI_PROFILE_UPDATE_VIRTUALCALL(rcvr->klass()); 2638 2639 istate->set_callee(callee); 2640 istate->set_callee_entry_point(callee->from_interpreted_entry()); 2641 #ifdef VM_JVMTI 2642 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) { 2643 istate->set_callee_entry_point(callee->interpreter_entry()); 2644 } 2645 #endif /* VM_JVMTI */ 2646 istate->set_bcp_advance(5); 2647 UPDATE_PC_AND_RETURN(0); // I'll be back... 2648 } 2649 2650 CASE(_invokevirtual): 2651 CASE(_invokespecial): 2652 CASE(_invokestatic): { 2653 u2 index = Bytes::get_native_u2(pc+1); 2654 2655 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2656 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases 2657 // out so c++ compiler has a chance for constant prop to fold everything possible away. 2658 2659 if (!cache->is_resolved((Bytecodes::Code)opcode)) { 2660 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode), 2661 handle_exception); 2662 cache = cp->entry_at(index); 2663 } 2664 2665 istate->set_msg(call_method); 2666 { 2667 Method* callee; 2668 if ((Bytecodes::Code)opcode == Bytecodes::_invokevirtual) { 2669 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); 2670 if (cache->is_vfinal()) { 2671 callee = cache->f2_as_vfinal_method(); 2672 // Profile final call. 2673 BI_PROFILE_UPDATE_FINALCALL(); 2674 } else { 2675 // get receiver 2676 int parms = cache->parameter_size(); 2677 // this works but needs a resourcemark and seems to create a vtable on every call: 2678 // Method* callee = rcvr->klass()->vtable()->method_at(cache->f2_as_index()); 2679 // 2680 // this fails with an assert 2681 // InstanceKlass* rcvrKlass = InstanceKlass::cast(STACK_OBJECT(-parms)->klass()); 2682 // but this works 2683 oop rcvr = STACK_OBJECT(-parms); 2684 VERIFY_OOP(rcvr); 2685 Klass* rcvrKlass = rcvr->klass(); 2686 /* 2687 Executing this code in java.lang.String: 2688 public String(char value[]) { 2689 this.count = value.length; 2690 this.value = (char[])value.clone(); 2691 } 2692 2693 a find on rcvr->klass() reports: 2694 {type array char}{type array class} 2695 - klass: {other class} 2696 2697 but using InstanceKlass::cast(STACK_OBJECT(-parms)->klass()) causes in assertion failure 2698 because rcvr->klass()->is_instance_klass() == 0 2699 However it seems to have a vtable in the right location. Huh? 2700 Because vtables have the same offset for ArrayKlass and InstanceKlass. 2701 */ 2702 callee = (Method*) rcvrKlass->method_at_vtable(cache->f2_as_index()); 2703 // Profile virtual call. 2704 BI_PROFILE_UPDATE_VIRTUALCALL(rcvrKlass); 2705 } 2706 } else { 2707 if ((Bytecodes::Code)opcode == Bytecodes::_invokespecial) { 2708 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); 2709 } 2710 callee = cache->f1_as_method(); 2711 2712 // Profile call. 2713 BI_PROFILE_UPDATE_CALL(); 2714 } 2715 2716 istate->set_callee(callee); 2717 istate->set_callee_entry_point(callee->from_interpreted_entry()); 2718 #ifdef VM_JVMTI 2719 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) { 2720 istate->set_callee_entry_point(callee->interpreter_entry()); 2721 } 2722 #endif /* VM_JVMTI */ 2723 istate->set_bcp_advance(3); 2724 UPDATE_PC_AND_RETURN(0); // I'll be back... 2725 } 2726 } 2727 2728 /* Allocate memory for a new java object. */ 2729 2730 CASE(_newarray): { 2731 BasicType atype = (BasicType) *(pc+1); 2732 jint size = STACK_INT(-1); 2733 CALL_VM(InterpreterRuntime::newarray(THREAD, atype, size), 2734 handle_exception); 2735 // Must prevent reordering of stores for object initialization 2736 // with stores that publish the new object. 2737 OrderAccess::storestore(); 2738 SET_STACK_OBJECT(THREAD->vm_result(), -1); 2739 THREAD->set_vm_result(NULL); 2740 2741 UPDATE_PC_AND_CONTINUE(2); 2742 } 2743 2744 /* Throw an exception. */ 2745 2746 CASE(_athrow): { 2747 oop except_oop = STACK_OBJECT(-1); 2748 CHECK_NULL(except_oop); 2749 // set pending_exception so we use common code 2750 THREAD->set_pending_exception(except_oop, NULL, 0); 2751 goto handle_exception; 2752 } 2753 2754 /* goto and jsr. They are exactly the same except jsr pushes 2755 * the address of the next instruction first. 2756 */ 2757 2758 CASE(_jsr): { 2759 /* push bytecode index on stack */ 2760 SET_STACK_ADDR(((address)pc - (intptr_t)(istate->method()->code_base()) + 3), 0); 2761 MORE_STACK(1); 2762 /* FALL THROUGH */ 2763 } 2764 2765 CASE(_goto): 2766 { 2767 int16_t offset = (int16_t)Bytes::get_Java_u2(pc + 1); 2768 // Profile jump. 2769 BI_PROFILE_UPDATE_JUMP(); 2770 address branch_pc = pc; 2771 UPDATE_PC(offset); 2772 DO_BACKEDGE_CHECKS(offset, branch_pc); 2773 CONTINUE; 2774 } 2775 2776 CASE(_jsr_w): { 2777 /* push return address on the stack */ 2778 SET_STACK_ADDR(((address)pc - (intptr_t)(istate->method()->code_base()) + 5), 0); 2779 MORE_STACK(1); 2780 /* FALL THROUGH */ 2781 } 2782 2783 CASE(_goto_w): 2784 { 2785 int32_t offset = Bytes::get_Java_u4(pc + 1); 2786 // Profile jump. 2787 BI_PROFILE_UPDATE_JUMP(); 2788 address branch_pc = pc; 2789 UPDATE_PC(offset); 2790 DO_BACKEDGE_CHECKS(offset, branch_pc); 2791 CONTINUE; 2792 } 2793 2794 /* return from a jsr or jsr_w */ 2795 2796 CASE(_ret): { 2797 // Profile ret. 2798 BI_PROFILE_UPDATE_RET(/*bci=*/((int)(intptr_t)(LOCALS_ADDR(pc[1])))); 2799 // Now, update the pc. 2800 pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(pc[1])); 2801 UPDATE_PC_AND_CONTINUE(0); 2802 } 2803 2804 /* debugger breakpoint */ 2805 2806 CASE(_breakpoint): { 2807 Bytecodes::Code original_bytecode; 2808 DECACHE_STATE(); 2809 SET_LAST_JAVA_FRAME(); 2810 original_bytecode = InterpreterRuntime::get_original_bytecode_at(THREAD, 2811 METHOD, pc); 2812 RESET_LAST_JAVA_FRAME(); 2813 CACHE_STATE(); 2814 if (THREAD->has_pending_exception()) goto handle_exception; 2815 CALL_VM(InterpreterRuntime::_breakpoint(THREAD, METHOD, pc), 2816 handle_exception); 2817 2818 opcode = (jubyte)original_bytecode; 2819 goto opcode_switch; 2820 } 2821 2822 DEFAULT: 2823 fatal("Unimplemented opcode %d = %s", opcode, 2824 Bytecodes::name((Bytecodes::Code)opcode)); 2825 goto finish; 2826 2827 } /* switch(opc) */ 2828 2829 2830 #ifdef USELABELS 2831 check_for_exception: 2832 #endif 2833 { 2834 if (!THREAD->has_pending_exception()) { 2835 CONTINUE; 2836 } 2837 /* We will be gcsafe soon, so flush our state. */ 2838 DECACHE_PC(); 2839 goto handle_exception; 2840 } 2841 do_continue: ; 2842 2843 } /* while (1) interpreter loop */ 2844 2845 2846 // An exception exists in the thread state see whether this activation can handle it 2847 handle_exception: { 2848 2849 HandleMarkCleaner __hmc(THREAD); 2850 Handle except_oop(THREAD, THREAD->pending_exception()); 2851 // Prevent any subsequent HandleMarkCleaner in the VM 2852 // from freeing the except_oop handle. 2853 HandleMark __hm(THREAD); 2854 2855 THREAD->clear_pending_exception(); 2856 assert(except_oop(), "No exception to process"); 2857 intptr_t continuation_bci; 2858 // expression stack is emptied 2859 topOfStack = istate->stack_base() - Interpreter::stackElementWords; 2860 CALL_VM(continuation_bci = (intptr_t)InterpreterRuntime::exception_handler_for_exception(THREAD, except_oop()), 2861 handle_exception); 2862 2863 except_oop = Handle(THREAD, THREAD->vm_result()); 2864 THREAD->set_vm_result(NULL); 2865 if (continuation_bci >= 0) { 2866 // Place exception on top of stack 2867 SET_STACK_OBJECT(except_oop(), 0); 2868 MORE_STACK(1); 2869 pc = METHOD->code_base() + continuation_bci; 2870 if (log_is_enabled(Info, exceptions)) { 2871 ResourceMark rm(THREAD); 2872 stringStream tempst; 2873 tempst.print("interpreter method <%s>\n" 2874 " at bci %d, continuing at %d for thread " INTPTR_FORMAT, 2875 METHOD->print_value_string(), 2876 (int)(istate->bcp() - METHOD->code_base()), 2877 (int)continuation_bci, p2i(THREAD)); 2878 Exceptions::log_exception(except_oop, tempst); 2879 } 2880 // for AbortVMOnException flag 2881 Exceptions::debug_check_abort(except_oop); 2882 2883 // Update profiling data. 2884 BI_PROFILE_ALIGN_TO_CURRENT_BCI(); 2885 goto run; 2886 } 2887 if (log_is_enabled(Info, exceptions)) { 2888 ResourceMark rm; 2889 stringStream tempst; 2890 tempst.print("interpreter method <%s>\n" 2891 " at bci %d, unwinding for thread " INTPTR_FORMAT, 2892 METHOD->print_value_string(), 2893 (int)(istate->bcp() - METHOD->code_base()), 2894 p2i(THREAD)); 2895 Exceptions::log_exception(except_oop, tempst); 2896 } 2897 // for AbortVMOnException flag 2898 Exceptions::debug_check_abort(except_oop); 2899 2900 // No handler in this activation, unwind and try again 2901 THREAD->set_pending_exception(except_oop(), NULL, 0); 2902 goto handle_return; 2903 } // handle_exception: 2904 2905 // Return from an interpreter invocation with the result of the interpretation 2906 // on the top of the Java Stack (or a pending exception) 2907 2908 handle_Pop_Frame: { 2909 2910 // We don't really do anything special here except we must be aware 2911 // that we can get here without ever locking the method (if sync). 2912 // Also we skip the notification of the exit. 2913 2914 istate->set_msg(popping_frame); 2915 // Clear pending so while the pop is in process 2916 // we don't start another one if a call_vm is done. 2917 THREAD->clr_pop_frame_pending(); 2918 // Let interpreter (only) see the we're in the process of popping a frame 2919 THREAD->set_pop_frame_in_process(); 2920 2921 goto handle_return; 2922 2923 } // handle_Pop_Frame 2924 2925 // ForceEarlyReturn ends a method, and returns to the caller with a return value 2926 // given by the invoker of the early return. 2927 handle_Early_Return: { 2928 2929 istate->set_msg(early_return); 2930 2931 // Clear expression stack. 2932 topOfStack = istate->stack_base() - Interpreter::stackElementWords; 2933 2934 JvmtiThreadState *ts = THREAD->jvmti_thread_state(); 2935 2936 // Push the value to be returned. 2937 switch (istate->method()->result_type()) { 2938 case T_BOOLEAN: 2939 case T_SHORT: 2940 case T_BYTE: 2941 case T_CHAR: 2942 case T_INT: 2943 SET_STACK_INT(ts->earlyret_value().i, 0); 2944 MORE_STACK(1); 2945 break; 2946 case T_LONG: 2947 SET_STACK_LONG(ts->earlyret_value().j, 1); 2948 MORE_STACK(2); 2949 break; 2950 case T_FLOAT: 2951 SET_STACK_FLOAT(ts->earlyret_value().f, 0); 2952 MORE_STACK(1); 2953 break; 2954 case T_DOUBLE: 2955 SET_STACK_DOUBLE(ts->earlyret_value().d, 1); 2956 MORE_STACK(2); 2957 break; 2958 case T_ARRAY: 2959 case T_OBJECT: 2960 SET_STACK_OBJECT(ts->earlyret_oop(), 0); 2961 MORE_STACK(1); 2962 break; 2963 } 2964 2965 ts->clr_earlyret_value(); 2966 ts->set_earlyret_oop(NULL); 2967 ts->clr_earlyret_pending(); 2968 2969 // Fall through to handle_return. 2970 2971 } // handle_Early_Return 2972 2973 handle_return: { 2974 // A storestore barrier is required to order initialization of 2975 // final fields with publishing the reference to the object that 2976 // holds the field. Without the barrier the value of final fields 2977 // can be observed to change. 2978 OrderAccess::storestore(); 2979 2980 DECACHE_STATE(); 2981 2982 bool suppress_error = istate->msg() == popping_frame || istate->msg() == early_return; 2983 bool suppress_exit_event = THREAD->has_pending_exception() || istate->msg() == popping_frame; 2984 Handle original_exception(THREAD, THREAD->pending_exception()); 2985 Handle illegal_state_oop(THREAD, NULL); 2986 2987 // We'd like a HandleMark here to prevent any subsequent HandleMarkCleaner 2988 // in any following VM entries from freeing our live handles, but illegal_state_oop 2989 // isn't really allocated yet and so doesn't become live until later and 2990 // in unpredicatable places. Instead we must protect the places where we enter the 2991 // VM. It would be much simpler (and safer) if we could allocate a real handle with 2992 // a NULL oop in it and then overwrite the oop later as needed. This isn't 2993 // unfortunately isn't possible. 2994 2995 THREAD->clear_pending_exception(); 2996 2997 // 2998 // As far as we are concerned we have returned. If we have a pending exception 2999 // that will be returned as this invocation's result. However if we get any 3000 // exception(s) while checking monitor state one of those IllegalMonitorStateExceptions 3001 // will be our final result (i.e. monitor exception trumps a pending exception). 3002 // 3003 3004 // If we never locked the method (or really passed the point where we would have), 3005 // there is no need to unlock it (or look for other monitors), since that 3006 // could not have happened. 3007 3008 if (THREAD->do_not_unlock()) { 3009 3010 // Never locked, reset the flag now because obviously any caller must 3011 // have passed their point of locking for us to have gotten here. 3012 3013 THREAD->clr_do_not_unlock(); 3014 } else { 3015 // At this point we consider that we have returned. We now check that the 3016 // locks were properly block structured. If we find that they were not 3017 // used properly we will return with an illegal monitor exception. 3018 // The exception is checked by the caller not the callee since this 3019 // checking is considered to be part of the invocation and therefore 3020 // in the callers scope (JVM spec 8.13). 3021 // 3022 // Another weird thing to watch for is if the method was locked 3023 // recursively and then not exited properly. This means we must 3024 // examine all the entries in reverse time(and stack) order and 3025 // unlock as we find them. If we find the method monitor before 3026 // we are at the initial entry then we should throw an exception. 3027 // It is not clear the template based interpreter does this 3028 // correctly 3029 3030 BasicObjectLock* base = istate->monitor_base(); 3031 BasicObjectLock* end = (BasicObjectLock*) istate->stack_base(); 3032 bool method_unlock_needed = METHOD->is_synchronized(); 3033 // We know the initial monitor was used for the method don't check that 3034 // slot in the loop 3035 if (method_unlock_needed) base--; 3036 3037 // Check all the monitors to see they are unlocked. Install exception if found to be locked. 3038 while (end < base) { 3039 oop lockee = end->obj(); 3040 if (lockee != NULL) { 3041 BasicLock* lock = end->lock(); 3042 markOop header = lock->displaced_header(); 3043 end->set_obj(NULL); 3044 3045 if (!lockee->mark()->has_bias_pattern()) { 3046 // If it isn't recursive we either must swap old header or call the runtime 3047 if (header != NULL) { 3048 markOop old_header = markOopDesc::encode(lock); 3049 if (lockee->cas_set_mark(header, old_header) != old_header) { 3050 // restore object for the slow case 3051 end->set_obj(lockee); 3052 { 3053 // Prevent any HandleMarkCleaner from freeing our live handles 3054 HandleMark __hm(THREAD); 3055 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, end)); 3056 } 3057 } 3058 } 3059 } 3060 // One error is plenty 3061 if (illegal_state_oop() == NULL && !suppress_error) { 3062 { 3063 // Prevent any HandleMarkCleaner from freeing our live handles 3064 HandleMark __hm(THREAD); 3065 CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD)); 3066 } 3067 assert(THREAD->has_pending_exception(), "Lost our exception!"); 3068 illegal_state_oop = Handle(THREAD, THREAD->pending_exception()); 3069 THREAD->clear_pending_exception(); 3070 } 3071 } 3072 end++; 3073 } 3074 // Unlock the method if needed 3075 if (method_unlock_needed) { 3076 if (base->obj() == NULL) { 3077 // The method is already unlocked this is not good. 3078 if (illegal_state_oop() == NULL && !suppress_error) { 3079 { 3080 // Prevent any HandleMarkCleaner from freeing our live handles 3081 HandleMark __hm(THREAD); 3082 CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD)); 3083 } 3084 assert(THREAD->has_pending_exception(), "Lost our exception!"); 3085 illegal_state_oop = Handle(THREAD, THREAD->pending_exception()); 3086 THREAD->clear_pending_exception(); 3087 } 3088 } else { 3089 // 3090 // The initial monitor is always used for the method 3091 // However if that slot is no longer the oop for the method it was unlocked 3092 // and reused by something that wasn't unlocked! 3093 // 3094 // deopt can come in with rcvr dead because c2 knows 3095 // its value is preserved in the monitor. So we can't use locals[0] at all 3096 // and must use first monitor slot. 3097 // 3098 oop rcvr = base->obj(); 3099 if (rcvr == NULL) { 3100 if (!suppress_error) { 3101 VM_JAVA_ERROR_NO_JUMP(vmSymbols::java_lang_NullPointerException(), "", note_nullCheck_trap); 3102 illegal_state_oop = Handle(THREAD, THREAD->pending_exception()); 3103 THREAD->clear_pending_exception(); 3104 } 3105 } else if (UseHeavyMonitors) { 3106 { 3107 // Prevent any HandleMarkCleaner from freeing our live handles. 3108 HandleMark __hm(THREAD); 3109 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base)); 3110 } 3111 if (THREAD->has_pending_exception()) { 3112 if (!suppress_error) illegal_state_oop = Handle(THREAD, THREAD->pending_exception()); 3113 THREAD->clear_pending_exception(); 3114 } 3115 } else { 3116 BasicLock* lock = base->lock(); 3117 markOop header = lock->displaced_header(); 3118 base->set_obj(NULL); 3119 3120 if (!rcvr->mark()->has_bias_pattern()) { 3121 base->set_obj(NULL); 3122 // If it isn't recursive we either must swap old header or call the runtime 3123 if (header != NULL) { 3124 markOop old_header = markOopDesc::encode(lock); 3125 if (rcvr->cas_set_mark(header, old_header) != old_header) { 3126 // restore object for the slow case 3127 base->set_obj(rcvr); 3128 { 3129 // Prevent any HandleMarkCleaner from freeing our live handles 3130 HandleMark __hm(THREAD); 3131 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base)); 3132 } 3133 if (THREAD->has_pending_exception()) { 3134 if (!suppress_error) illegal_state_oop = Handle(THREAD, THREAD->pending_exception()); 3135 THREAD->clear_pending_exception(); 3136 } 3137 } 3138 } 3139 } 3140 } 3141 } 3142 } 3143 } 3144 // Clear the do_not_unlock flag now. 3145 THREAD->clr_do_not_unlock(); 3146 3147 // 3148 // Notify jvmti/jvmdi 3149 // 3150 // NOTE: we do not notify a method_exit if we have a pending exception, 3151 // including an exception we generate for unlocking checks. In the former 3152 // case, JVMDI has already been notified by our call for the exception handler 3153 // and in both cases as far as JVMDI is concerned we have already returned. 3154 // If we notify it again JVMDI will be all confused about how many frames 3155 // are still on the stack (4340444). 3156 // 3157 // NOTE Further! It turns out the the JVMTI spec in fact expects to see 3158 // method_exit events whenever we leave an activation unless it was done 3159 // for popframe. This is nothing like jvmdi. However we are passing the 3160 // tests at the moment (apparently because they are jvmdi based) so rather 3161 // than change this code and possibly fail tests we will leave it alone 3162 // (with this note) in anticipation of changing the vm and the tests 3163 // simultaneously. 3164 3165 3166 // 3167 suppress_exit_event = suppress_exit_event || illegal_state_oop() != NULL; 3168 3169 3170 3171 #ifdef VM_JVMTI 3172 if (_jvmti_interp_events) { 3173 // Whenever JVMTI puts a thread in interp_only_mode, method 3174 // entry/exit events are sent for that thread to track stack depth. 3175 if ( !suppress_exit_event && THREAD->is_interp_only_mode() ) { 3176 { 3177 // Prevent any HandleMarkCleaner from freeing our live handles 3178 HandleMark __hm(THREAD); 3179 CALL_VM_NOCHECK(InterpreterRuntime::post_method_exit(THREAD)); 3180 } 3181 } 3182 } 3183 #endif /* VM_JVMTI */ 3184 3185 // 3186 // See if we are returning any exception 3187 // A pending exception that was pending prior to a possible popping frame 3188 // overrides the popping frame. 3189 // 3190 assert(!suppress_error || (suppress_error && illegal_state_oop() == NULL), "Error was not suppressed"); 3191 if (illegal_state_oop() != NULL || original_exception() != NULL) { 3192 // Inform the frame manager we have no result. 3193 istate->set_msg(throwing_exception); 3194 if (illegal_state_oop() != NULL) 3195 THREAD->set_pending_exception(illegal_state_oop(), NULL, 0); 3196 else 3197 THREAD->set_pending_exception(original_exception(), NULL, 0); 3198 UPDATE_PC_AND_RETURN(0); 3199 } 3200 3201 if (istate->msg() == popping_frame) { 3202 // Make it simpler on the assembly code and set the message for the frame pop. 3203 // returns 3204 if (istate->prev() == NULL) { 3205 // We must be returning to a deoptimized frame (because popframe only happens between 3206 // two interpreted frames). We need to save the current arguments in C heap so that 3207 // the deoptimized frame when it restarts can copy the arguments to its expression 3208 // stack and re-execute the call. We also have to notify deoptimization that this 3209 // has occurred and to pick the preserved args copy them to the deoptimized frame's 3210 // java expression stack. Yuck. 3211 // 3212 THREAD->popframe_preserve_args(in_ByteSize(METHOD->size_of_parameters() * wordSize), 3213 LOCALS_SLOT(METHOD->size_of_parameters() - 1)); 3214 THREAD->set_popframe_condition_bit(JavaThread::popframe_force_deopt_reexecution_bit); 3215 } 3216 } else { 3217 istate->set_msg(return_from_method); 3218 } 3219 3220 // Normal return 3221 // Advance the pc and return to frame manager 3222 UPDATE_PC_AND_RETURN(1); 3223 } /* handle_return: */ 3224 3225 // This is really a fatal error return 3226 3227 finish: 3228 DECACHE_TOS(); 3229 DECACHE_PC(); 3230 3231 return; 3232 } 3233 3234 /* 3235 * All the code following this point is only produced once and is not present 3236 * in the JVMTI version of the interpreter 3237 */ 3238 3239 #ifndef VM_JVMTI 3240 3241 // This constructor should only be used to contruct the object to signal 3242 // interpreter initialization. All other instances should be created by 3243 // the frame manager. 3244 BytecodeInterpreter::BytecodeInterpreter(messages msg) { 3245 if (msg != initialize) ShouldNotReachHere(); 3246 _msg = msg; 3247 _self_link = this; 3248 _prev_link = NULL; 3249 } 3250 3251 // Inline static functions for Java Stack and Local manipulation 3252 3253 // The implementations are platform dependent. We have to worry about alignment 3254 // issues on some machines which can change on the same platform depending on 3255 // whether it is an LP64 machine also. 3256 address BytecodeInterpreter::stack_slot(intptr_t *tos, int offset) { 3257 return (address) tos[Interpreter::expr_index_at(-offset)]; 3258 } 3259 3260 jint BytecodeInterpreter::stack_int(intptr_t *tos, int offset) { 3261 return *((jint*) &tos[Interpreter::expr_index_at(-offset)]); 3262 } 3263 3264 jfloat BytecodeInterpreter::stack_float(intptr_t *tos, int offset) { 3265 return *((jfloat *) &tos[Interpreter::expr_index_at(-offset)]); 3266 } 3267 3268 oop BytecodeInterpreter::stack_object(intptr_t *tos, int offset) { 3269 return cast_to_oop(tos [Interpreter::expr_index_at(-offset)]); 3270 } 3271 3272 jdouble BytecodeInterpreter::stack_double(intptr_t *tos, int offset) { 3273 return ((VMJavaVal64*) &tos[Interpreter::expr_index_at(-offset)])->d; 3274 } 3275 3276 jlong BytecodeInterpreter::stack_long(intptr_t *tos, int offset) { 3277 return ((VMJavaVal64 *) &tos[Interpreter::expr_index_at(-offset)])->l; 3278 } 3279 3280 // only used for value types 3281 void BytecodeInterpreter::set_stack_slot(intptr_t *tos, address value, 3282 int offset) { 3283 *((address *)&tos[Interpreter::expr_index_at(-offset)]) = value; 3284 } 3285 3286 void BytecodeInterpreter::set_stack_int(intptr_t *tos, int value, 3287 int offset) { 3288 *((jint *)&tos[Interpreter::expr_index_at(-offset)]) = value; 3289 } 3290 3291 void BytecodeInterpreter::set_stack_float(intptr_t *tos, jfloat value, 3292 int offset) { 3293 *((jfloat *)&tos[Interpreter::expr_index_at(-offset)]) = value; 3294 } 3295 3296 void BytecodeInterpreter::set_stack_object(intptr_t *tos, oop value, 3297 int offset) { 3298 *((oop *)&tos[Interpreter::expr_index_at(-offset)]) = value; 3299 } 3300 3301 // needs to be platform dep for the 32 bit platforms. 3302 void BytecodeInterpreter::set_stack_double(intptr_t *tos, jdouble value, 3303 int offset) { 3304 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->d = value; 3305 } 3306 3307 void BytecodeInterpreter::set_stack_double_from_addr(intptr_t *tos, 3308 address addr, int offset) { 3309 (((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->d = 3310 ((VMJavaVal64*)addr)->d); 3311 } 3312 3313 void BytecodeInterpreter::set_stack_long(intptr_t *tos, jlong value, 3314 int offset) { 3315 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset+1)])->l = 0xdeedbeeb; 3316 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->l = value; 3317 } 3318 3319 void BytecodeInterpreter::set_stack_long_from_addr(intptr_t *tos, 3320 address addr, int offset) { 3321 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset+1)])->l = 0xdeedbeeb; 3322 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->l = 3323 ((VMJavaVal64*)addr)->l; 3324 } 3325 3326 // Locals 3327 3328 address BytecodeInterpreter::locals_slot(intptr_t* locals, int offset) { 3329 return (address)locals[Interpreter::local_index_at(-offset)]; 3330 } 3331 jint BytecodeInterpreter::locals_int(intptr_t* locals, int offset) { 3332 return (jint)locals[Interpreter::local_index_at(-offset)]; 3333 } 3334 jfloat BytecodeInterpreter::locals_float(intptr_t* locals, int offset) { 3335 return (jfloat)locals[Interpreter::local_index_at(-offset)]; 3336 } 3337 oop BytecodeInterpreter::locals_object(intptr_t* locals, int offset) { 3338 return cast_to_oop(locals[Interpreter::local_index_at(-offset)]); 3339 } 3340 jdouble BytecodeInterpreter::locals_double(intptr_t* locals, int offset) { 3341 return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d; 3342 } 3343 jlong BytecodeInterpreter::locals_long(intptr_t* locals, int offset) { 3344 return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l; 3345 } 3346 3347 // Returns the address of locals value. 3348 address BytecodeInterpreter::locals_long_at(intptr_t* locals, int offset) { 3349 return ((address)&locals[Interpreter::local_index_at(-(offset+1))]); 3350 } 3351 address BytecodeInterpreter::locals_double_at(intptr_t* locals, int offset) { 3352 return ((address)&locals[Interpreter::local_index_at(-(offset+1))]); 3353 } 3354 3355 // Used for local value or returnAddress 3356 void BytecodeInterpreter::set_locals_slot(intptr_t *locals, 3357 address value, int offset) { 3358 *((address*)&locals[Interpreter::local_index_at(-offset)]) = value; 3359 } 3360 void BytecodeInterpreter::set_locals_int(intptr_t *locals, 3361 jint value, int offset) { 3362 *((jint *)&locals[Interpreter::local_index_at(-offset)]) = value; 3363 } 3364 void BytecodeInterpreter::set_locals_float(intptr_t *locals, 3365 jfloat value, int offset) { 3366 *((jfloat *)&locals[Interpreter::local_index_at(-offset)]) = value; 3367 } 3368 void BytecodeInterpreter::set_locals_object(intptr_t *locals, 3369 oop value, int offset) { 3370 *((oop *)&locals[Interpreter::local_index_at(-offset)]) = value; 3371 } 3372 void BytecodeInterpreter::set_locals_double(intptr_t *locals, 3373 jdouble value, int offset) { 3374 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d = value; 3375 } 3376 void BytecodeInterpreter::set_locals_long(intptr_t *locals, 3377 jlong value, int offset) { 3378 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l = value; 3379 } 3380 void BytecodeInterpreter::set_locals_double_from_addr(intptr_t *locals, 3381 address addr, int offset) { 3382 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d = ((VMJavaVal64*)addr)->d; 3383 } 3384 void BytecodeInterpreter::set_locals_long_from_addr(intptr_t *locals, 3385 address addr, int offset) { 3386 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l = ((VMJavaVal64*)addr)->l; 3387 } 3388 3389 void BytecodeInterpreter::astore(intptr_t* tos, int stack_offset, 3390 intptr_t* locals, int locals_offset) { 3391 intptr_t value = tos[Interpreter::expr_index_at(-stack_offset)]; 3392 locals[Interpreter::local_index_at(-locals_offset)] = value; 3393 } 3394 3395 3396 void BytecodeInterpreter::copy_stack_slot(intptr_t *tos, int from_offset, 3397 int to_offset) { 3398 tos[Interpreter::expr_index_at(-to_offset)] = 3399 (intptr_t)tos[Interpreter::expr_index_at(-from_offset)]; 3400 } 3401 3402 void BytecodeInterpreter::dup(intptr_t *tos) { 3403 copy_stack_slot(tos, -1, 0); 3404 } 3405 void BytecodeInterpreter::dup2(intptr_t *tos) { 3406 copy_stack_slot(tos, -2, 0); 3407 copy_stack_slot(tos, -1, 1); 3408 } 3409 3410 void BytecodeInterpreter::dup_x1(intptr_t *tos) { 3411 /* insert top word two down */ 3412 copy_stack_slot(tos, -1, 0); 3413 copy_stack_slot(tos, -2, -1); 3414 copy_stack_slot(tos, 0, -2); 3415 } 3416 3417 void BytecodeInterpreter::dup_x2(intptr_t *tos) { 3418 /* insert top word three down */ 3419 copy_stack_slot(tos, -1, 0); 3420 copy_stack_slot(tos, -2, -1); 3421 copy_stack_slot(tos, -3, -2); 3422 copy_stack_slot(tos, 0, -3); 3423 } 3424 void BytecodeInterpreter::dup2_x1(intptr_t *tos) { 3425 /* insert top 2 slots three down */ 3426 copy_stack_slot(tos, -1, 1); 3427 copy_stack_slot(tos, -2, 0); 3428 copy_stack_slot(tos, -3, -1); 3429 copy_stack_slot(tos, 1, -2); 3430 copy_stack_slot(tos, 0, -3); 3431 } 3432 void BytecodeInterpreter::dup2_x2(intptr_t *tos) { 3433 /* insert top 2 slots four down */ 3434 copy_stack_slot(tos, -1, 1); 3435 copy_stack_slot(tos, -2, 0); 3436 copy_stack_slot(tos, -3, -1); 3437 copy_stack_slot(tos, -4, -2); 3438 copy_stack_slot(tos, 1, -3); 3439 copy_stack_slot(tos, 0, -4); 3440 } 3441 3442 3443 void BytecodeInterpreter::swap(intptr_t *tos) { 3444 // swap top two elements 3445 intptr_t val = tos[Interpreter::expr_index_at(1)]; 3446 // Copy -2 entry to -1 3447 copy_stack_slot(tos, -2, -1); 3448 // Store saved -1 entry into -2 3449 tos[Interpreter::expr_index_at(2)] = val; 3450 } 3451 // -------------------------------------------------------------------------------- 3452 // Non-product code 3453 #ifndef PRODUCT 3454 3455 const char* BytecodeInterpreter::C_msg(BytecodeInterpreter::messages msg) { 3456 switch (msg) { 3457 case BytecodeInterpreter::no_request: return("no_request"); 3458 case BytecodeInterpreter::initialize: return("initialize"); 3459 // status message to C++ interpreter 3460 case BytecodeInterpreter::method_entry: return("method_entry"); 3461 case BytecodeInterpreter::method_resume: return("method_resume"); 3462 case BytecodeInterpreter::got_monitors: return("got_monitors"); 3463 case BytecodeInterpreter::rethrow_exception: return("rethrow_exception"); 3464 // requests to frame manager from C++ interpreter 3465 case BytecodeInterpreter::call_method: return("call_method"); 3466 case BytecodeInterpreter::return_from_method: return("return_from_method"); 3467 case BytecodeInterpreter::more_monitors: return("more_monitors"); 3468 case BytecodeInterpreter::throwing_exception: return("throwing_exception"); 3469 case BytecodeInterpreter::popping_frame: return("popping_frame"); 3470 case BytecodeInterpreter::do_osr: return("do_osr"); 3471 // deopt 3472 case BytecodeInterpreter::deopt_resume: return("deopt_resume"); 3473 case BytecodeInterpreter::deopt_resume2: return("deopt_resume2"); 3474 default: return("BAD MSG"); 3475 } 3476 } 3477 void 3478 BytecodeInterpreter::print() { 3479 tty->print_cr("thread: " INTPTR_FORMAT, (uintptr_t) this->_thread); 3480 tty->print_cr("bcp: " INTPTR_FORMAT, (uintptr_t) this->_bcp); 3481 tty->print_cr("locals: " INTPTR_FORMAT, (uintptr_t) this->_locals); 3482 tty->print_cr("constants: " INTPTR_FORMAT, (uintptr_t) this->_constants); 3483 { 3484 ResourceMark rm; 3485 char *method_name = _method->name_and_sig_as_C_string(); 3486 tty->print_cr("method: " INTPTR_FORMAT "[ %s ]", (uintptr_t) this->_method, method_name); 3487 } 3488 tty->print_cr("mdx: " INTPTR_FORMAT, (uintptr_t) this->_mdx); 3489 tty->print_cr("stack: " INTPTR_FORMAT, (uintptr_t) this->_stack); 3490 tty->print_cr("msg: %s", C_msg(this->_msg)); 3491 tty->print_cr("result_to_call._callee: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee); 3492 tty->print_cr("result_to_call._callee_entry_point: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee_entry_point); 3493 tty->print_cr("result_to_call._bcp_advance: %d ", this->_result._to_call._bcp_advance); 3494 tty->print_cr("osr._osr_buf: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_buf); 3495 tty->print_cr("osr._osr_entry: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_entry); 3496 tty->print_cr("prev_link: " INTPTR_FORMAT, (uintptr_t) this->_prev_link); 3497 tty->print_cr("native_mirror: " INTPTR_FORMAT, (uintptr_t) p2i(this->_oop_temp)); 3498 tty->print_cr("stack_base: " INTPTR_FORMAT, (uintptr_t) this->_stack_base); 3499 tty->print_cr("stack_limit: " INTPTR_FORMAT, (uintptr_t) this->_stack_limit); 3500 tty->print_cr("monitor_base: " INTPTR_FORMAT, (uintptr_t) this->_monitor_base); 3501 #ifdef SPARC 3502 tty->print_cr("last_Java_pc: " INTPTR_FORMAT, (uintptr_t) this->_last_Java_pc); 3503 tty->print_cr("frame_bottom: " INTPTR_FORMAT, (uintptr_t) this->_frame_bottom); 3504 tty->print_cr("&native_fresult: " INTPTR_FORMAT, (uintptr_t) &this->_native_fresult); 3505 tty->print_cr("native_lresult: " INTPTR_FORMAT, (uintptr_t) this->_native_lresult); 3506 #endif 3507 #if !defined(ZERO) && defined(PPC) 3508 tty->print_cr("last_Java_fp: " INTPTR_FORMAT, (uintptr_t) this->_last_Java_fp); 3509 #endif // !ZERO 3510 tty->print_cr("self_link: " INTPTR_FORMAT, (uintptr_t) this->_self_link); 3511 } 3512 3513 extern "C" { 3514 void PI(uintptr_t arg) { 3515 ((BytecodeInterpreter*)arg)->print(); 3516 } 3517 } 3518 #endif // PRODUCT 3519 3520 #endif // JVMTI 3521 #endif // CC_INTERP