1 /* 2 * Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // no precompiled headers 26 #include "classfile/vmSymbols.hpp" 27 #include "gc_interface/collectedHeap.hpp" 28 #include "interpreter/bytecodeHistogram.hpp" 29 #include "interpreter/bytecodeInterpreter.hpp" 30 #include "interpreter/bytecodeInterpreter.inline.hpp" 31 #include "interpreter/bytecodeInterpreterProfiling.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "interpreter/interpreterRuntime.hpp" 34 #include "memory/resourceArea.hpp" 35 #include "oops/methodCounters.hpp" 36 #include "oops/objArrayKlass.hpp" 37 #include "oops/oop.inline.hpp" 38 #include "prims/jvmtiExport.hpp" 39 #include "prims/jvmtiThreadState.hpp" 40 #include "runtime/biasedLocking.hpp" 41 #include "runtime/frame.inline.hpp" 42 #include "runtime/handles.inline.hpp" 43 #include "runtime/interfaceSupport.hpp" 44 #include "runtime/orderAccess.inline.hpp" 45 #include "runtime/sharedRuntime.hpp" 46 #include "runtime/threadCritical.hpp" 47 #include "utilities/exceptions.hpp" 48 49 // no precompiled headers 50 #ifdef CC_INTERP 51 52 /* 53 * USELABELS - If using GCC, then use labels for the opcode dispatching 54 * rather -then a switch statement. This improves performance because it 55 * gives us the oportunity to have the instructions that calculate the 56 * next opcode to jump to be intermixed with the rest of the instructions 57 * that implement the opcode (see UPDATE_PC_AND_TOS_AND_CONTINUE macro). 58 */ 59 #undef USELABELS 60 #ifdef __GNUC__ 61 /* 62 ASSERT signifies debugging. It is much easier to step thru bytecodes if we 63 don't use the computed goto approach. 64 */ 65 #ifndef ASSERT 66 #define USELABELS 67 #endif 68 #endif 69 70 #undef CASE 71 #ifdef USELABELS 72 #define CASE(opcode) opc ## opcode 73 #define DEFAULT opc_default 74 #else 75 #define CASE(opcode) case Bytecodes:: opcode 76 #define DEFAULT default 77 #endif 78 79 /* 80 * PREFETCH_OPCCODE - Some compilers do better if you prefetch the next 81 * opcode before going back to the top of the while loop, rather then having 82 * the top of the while loop handle it. This provides a better opportunity 83 * for instruction scheduling. Some compilers just do this prefetch 84 * automatically. Some actually end up with worse performance if you 85 * force the prefetch. Solaris gcc seems to do better, but cc does worse. 86 */ 87 #undef PREFETCH_OPCCODE 88 #define PREFETCH_OPCCODE 89 90 /* 91 Interpreter safepoint: it is expected that the interpreter will have no live 92 handles of its own creation live at an interpreter safepoint. Therefore we 93 run a HandleMarkCleaner and trash all handles allocated in the call chain 94 since the JavaCalls::call_helper invocation that initiated the chain. 95 There really shouldn't be any handles remaining to trash but this is cheap 96 in relation to a safepoint. 97 */ 98 #define SAFEPOINT \ 99 if ( SafepointSynchronize::is_synchronizing()) { \ 100 { \ 101 /* zap freed handles rather than GC'ing them */ \ 102 HandleMarkCleaner __hmc(THREAD); \ 103 } \ 104 CALL_VM(SafepointSynchronize::block(THREAD), handle_exception); \ 105 } 106 107 /* 108 * VM_JAVA_ERROR - Macro for throwing a java exception from 109 * the interpreter loop. Should really be a CALL_VM but there 110 * is no entry point to do the transition to vm so we just 111 * do it by hand here. 112 */ 113 #define VM_JAVA_ERROR_NO_JUMP(name, msg, note_a_trap) \ 114 DECACHE_STATE(); \ 115 SET_LAST_JAVA_FRAME(); \ 116 { \ 117 InterpreterRuntime::note_a_trap(THREAD, istate->method(), BCI()); \ 118 ThreadInVMfromJava trans(THREAD); \ 119 Exceptions::_throw_msg(THREAD, __FILE__, __LINE__, name, msg); \ 120 } \ 121 RESET_LAST_JAVA_FRAME(); \ 122 CACHE_STATE(); 123 124 // Normal throw of a java error. 125 #define VM_JAVA_ERROR(name, msg, note_a_trap) \ 126 VM_JAVA_ERROR_NO_JUMP(name, msg, note_a_trap) \ 127 goto handle_exception; 128 129 #ifdef PRODUCT 130 #define DO_UPDATE_INSTRUCTION_COUNT(opcode) 131 #else 132 #define DO_UPDATE_INSTRUCTION_COUNT(opcode) \ 133 { \ 134 BytecodeCounter::_counter_value++; \ 135 BytecodeHistogram::_counters[(Bytecodes::Code)opcode]++; \ 136 if (StopInterpreterAt && StopInterpreterAt == BytecodeCounter::_counter_value) os::breakpoint(); \ 137 if (TraceBytecodes) { \ 138 CALL_VM((void)SharedRuntime::trace_bytecode(THREAD, 0, \ 139 topOfStack[Interpreter::expr_index_at(1)], \ 140 topOfStack[Interpreter::expr_index_at(2)]), \ 141 handle_exception); \ 142 } \ 143 } 144 #endif 145 146 #undef DEBUGGER_SINGLE_STEP_NOTIFY 147 #ifdef VM_JVMTI 148 /* NOTE: (kbr) This macro must be called AFTER the PC has been 149 incremented. JvmtiExport::at_single_stepping_point() may cause a 150 breakpoint opcode to get inserted at the current PC to allow the 151 debugger to coalesce single-step events. 152 153 As a result if we call at_single_stepping_point() we refetch opcode 154 to get the current opcode. This will override any other prefetching 155 that might have occurred. 156 */ 157 #define DEBUGGER_SINGLE_STEP_NOTIFY() \ 158 { \ 159 if (_jvmti_interp_events) { \ 160 if (JvmtiExport::should_post_single_step()) { \ 161 DECACHE_STATE(); \ 162 SET_LAST_JAVA_FRAME(); \ 163 ThreadInVMfromJava trans(THREAD); \ 164 JvmtiExport::at_single_stepping_point(THREAD, \ 165 istate->method(), \ 166 pc); \ 167 RESET_LAST_JAVA_FRAME(); \ 168 CACHE_STATE(); \ 169 if (THREAD->pop_frame_pending() && \ 170 !THREAD->pop_frame_in_process()) { \ 171 goto handle_Pop_Frame; \ 172 } \ 173 if (THREAD->jvmti_thread_state() && \ 174 THREAD->jvmti_thread_state()->is_earlyret_pending()) { \ 175 goto handle_Early_Return; \ 176 } \ 177 opcode = *pc; \ 178 } \ 179 } \ 180 } 181 #else 182 #define DEBUGGER_SINGLE_STEP_NOTIFY() 183 #endif 184 185 /* 186 * CONTINUE - Macro for executing the next opcode. 187 */ 188 #undef CONTINUE 189 #ifdef USELABELS 190 // Have to do this dispatch this way in C++ because otherwise gcc complains about crossing an 191 // initialization (which is is the initialization of the table pointer...) 192 #define DISPATCH(opcode) goto *(void*)dispatch_table[opcode] 193 #define CONTINUE { \ 194 opcode = *pc; \ 195 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 196 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 197 DISPATCH(opcode); \ 198 } 199 #else 200 #ifdef PREFETCH_OPCCODE 201 #define CONTINUE { \ 202 opcode = *pc; \ 203 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 204 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 205 continue; \ 206 } 207 #else 208 #define CONTINUE { \ 209 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 210 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 211 continue; \ 212 } 213 #endif 214 #endif 215 216 217 #define UPDATE_PC(opsize) {pc += opsize; } 218 /* 219 * UPDATE_PC_AND_TOS - Macro for updating the pc and topOfStack. 220 */ 221 #undef UPDATE_PC_AND_TOS 222 #define UPDATE_PC_AND_TOS(opsize, stack) \ 223 {pc += opsize; MORE_STACK(stack); } 224 225 /* 226 * UPDATE_PC_AND_TOS_AND_CONTINUE - Macro for updating the pc and topOfStack, 227 * and executing the next opcode. It's somewhat similar to the combination 228 * of UPDATE_PC_AND_TOS and CONTINUE, but with some minor optimizations. 229 */ 230 #undef UPDATE_PC_AND_TOS_AND_CONTINUE 231 #ifdef USELABELS 232 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \ 233 pc += opsize; opcode = *pc; MORE_STACK(stack); \ 234 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 235 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 236 DISPATCH(opcode); \ 237 } 238 239 #define UPDATE_PC_AND_CONTINUE(opsize) { \ 240 pc += opsize; opcode = *pc; \ 241 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 242 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 243 DISPATCH(opcode); \ 244 } 245 #else 246 #ifdef PREFETCH_OPCCODE 247 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \ 248 pc += opsize; opcode = *pc; MORE_STACK(stack); \ 249 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 250 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 251 goto do_continue; \ 252 } 253 254 #define UPDATE_PC_AND_CONTINUE(opsize) { \ 255 pc += opsize; opcode = *pc; \ 256 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 257 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 258 goto do_continue; \ 259 } 260 #else 261 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \ 262 pc += opsize; MORE_STACK(stack); \ 263 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 264 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 265 goto do_continue; \ 266 } 267 268 #define UPDATE_PC_AND_CONTINUE(opsize) { \ 269 pc += opsize; \ 270 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 271 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 272 goto do_continue; \ 273 } 274 #endif /* PREFETCH_OPCCODE */ 275 #endif /* USELABELS */ 276 277 // About to call a new method, update the save the adjusted pc and return to frame manager 278 #define UPDATE_PC_AND_RETURN(opsize) \ 279 DECACHE_TOS(); \ 280 istate->set_bcp(pc+opsize); \ 281 return; 282 283 284 #define METHOD istate->method() 285 #define GET_METHOD_COUNTERS(res) \ 286 res = METHOD->method_counters(); \ 287 if (res == NULL) { \ 288 CALL_VM(res = InterpreterRuntime::build_method_counters(THREAD, METHOD), handle_exception); \ 289 } 290 291 #define OSR_REQUEST(res, branch_pc) \ 292 CALL_VM(res=InterpreterRuntime::frequency_counter_overflow(THREAD, branch_pc), handle_exception); 293 /* 294 * For those opcodes that need to have a GC point on a backwards branch 295 */ 296 297 // Backedge counting is kind of strange. The asm interpreter will increment 298 // the backedge counter as a separate counter but it does it's comparisons 299 // to the sum (scaled) of invocation counter and backedge count to make 300 // a decision. Seems kind of odd to sum them together like that 301 302 // skip is delta from current bcp/bci for target, branch_pc is pre-branch bcp 303 304 305 #define DO_BACKEDGE_CHECKS(skip, branch_pc) \ 306 if ((skip) <= 0) { \ 307 MethodCounters* mcs; \ 308 GET_METHOD_COUNTERS(mcs); \ 309 if (UseLoopCounter) { \ 310 bool do_OSR = UseOnStackReplacement; \ 311 mcs->backedge_counter()->increment(); \ 312 if (ProfileInterpreter) { \ 313 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); \ 314 /* Check for overflow against MDO count. */ \ 315 do_OSR = do_OSR \ 316 && (mdo_last_branch_taken_count >= (uint)InvocationCounter::InterpreterBackwardBranchLimit)\ 317 /* When ProfileInterpreter is on, the backedge_count comes */ \ 318 /* from the methodDataOop, which value does not get reset on */ \ 319 /* the call to frequency_counter_overflow(). To avoid */ \ 320 /* excessive calls to the overflow routine while the method is */ \ 321 /* being compiled, add a second test to make sure the overflow */ \ 322 /* function is called only once every overflow_frequency. */ \ 323 && (!(mdo_last_branch_taken_count & 1023)); \ 324 } else { \ 325 /* check for overflow of backedge counter */ \ 326 do_OSR = do_OSR \ 327 && mcs->invocation_counter()->reached_InvocationLimit(mcs->backedge_counter()); \ 328 } \ 329 if (do_OSR) { \ 330 nmethod* osr_nmethod; \ 331 OSR_REQUEST(osr_nmethod, branch_pc); \ 332 if (osr_nmethod != NULL && osr_nmethod->osr_entry_bci() != InvalidOSREntryBci) { \ 333 intptr_t* buf; \ 334 /* Call OSR migration with last java frame only, no checks. */ \ 335 CALL_VM_NAKED_LJF(buf=SharedRuntime::OSR_migration_begin(THREAD)); \ 336 istate->set_msg(do_osr); \ 337 istate->set_osr_buf((address)buf); \ 338 istate->set_osr_entry(osr_nmethod->osr_entry()); \ 339 return; \ 340 } \ 341 } \ 342 } /* UseCompiler ... */ \ 343 SAFEPOINT; \ 344 } 345 346 /* 347 * For those opcodes that need to have a GC point on a backwards branch 348 */ 349 350 /* 351 * Macros for caching and flushing the interpreter state. Some local 352 * variables need to be flushed out to the frame before we do certain 353 * things (like pushing frames or becomming gc safe) and some need to 354 * be recached later (like after popping a frame). We could use one 355 * macro to cache or decache everything, but this would be less then 356 * optimal because we don't always need to cache or decache everything 357 * because some things we know are already cached or decached. 358 */ 359 #undef DECACHE_TOS 360 #undef CACHE_TOS 361 #undef CACHE_PREV_TOS 362 #define DECACHE_TOS() istate->set_stack(topOfStack); 363 364 #define CACHE_TOS() topOfStack = (intptr_t *)istate->stack(); 365 366 #undef DECACHE_PC 367 #undef CACHE_PC 368 #define DECACHE_PC() istate->set_bcp(pc); 369 #define CACHE_PC() pc = istate->bcp(); 370 #define CACHE_CP() cp = istate->constants(); 371 #define CACHE_LOCALS() locals = istate->locals(); 372 #undef CACHE_FRAME 373 #define CACHE_FRAME() 374 375 // BCI() returns the current bytecode-index. 376 #undef BCI 377 #define BCI() ((int)(intptr_t)(pc - (intptr_t)istate->method()->code_base())) 378 379 /* 380 * CHECK_NULL - Macro for throwing a NullPointerException if the object 381 * passed is a null ref. 382 * On some architectures/platforms it should be possible to do this implicitly 383 */ 384 #undef CHECK_NULL 385 #define CHECK_NULL(obj_) \ 386 if ((obj_) == NULL) { \ 387 VM_JAVA_ERROR(vmSymbols::java_lang_NullPointerException(), NULL, note_nullCheck_trap); \ 388 } \ 389 VERIFY_OOP(obj_) 390 391 #define VMdoubleConstZero() 0.0 392 #define VMdoubleConstOne() 1.0 393 #define VMlongConstZero() (max_jlong-max_jlong) 394 #define VMlongConstOne() ((max_jlong-max_jlong)+1) 395 396 /* 397 * Alignment 398 */ 399 #define VMalignWordUp(val) (((uintptr_t)(val) + 3) & ~3) 400 401 // Decache the interpreter state that interpreter modifies directly (i.e. GC is indirect mod) 402 #define DECACHE_STATE() DECACHE_PC(); DECACHE_TOS(); 403 404 // Reload interpreter state after calling the VM or a possible GC 405 #define CACHE_STATE() \ 406 CACHE_TOS(); \ 407 CACHE_PC(); \ 408 CACHE_CP(); \ 409 CACHE_LOCALS(); 410 411 // Call the VM with last java frame only. 412 #define CALL_VM_NAKED_LJF(func) \ 413 DECACHE_STATE(); \ 414 SET_LAST_JAVA_FRAME(); \ 415 func; \ 416 RESET_LAST_JAVA_FRAME(); \ 417 CACHE_STATE(); 418 419 // Call the VM. Don't check for pending exceptions. 420 #define CALL_VM_NOCHECK(func) \ 421 CALL_VM_NAKED_LJF(func) \ 422 if (THREAD->pop_frame_pending() && \ 423 !THREAD->pop_frame_in_process()) { \ 424 goto handle_Pop_Frame; \ 425 } \ 426 if (THREAD->jvmti_thread_state() && \ 427 THREAD->jvmti_thread_state()->is_earlyret_pending()) { \ 428 goto handle_Early_Return; \ 429 } 430 431 // Call the VM and check for pending exceptions 432 #define CALL_VM(func, label) { \ 433 CALL_VM_NOCHECK(func); \ 434 if (THREAD->has_pending_exception()) goto label; \ 435 } 436 437 /* 438 * BytecodeInterpreter::run(interpreterState istate) 439 * BytecodeInterpreter::runWithChecks(interpreterState istate) 440 * 441 * The real deal. This is where byte codes actually get interpreted. 442 * Basically it's a big while loop that iterates until we return from 443 * the method passed in. 444 * 445 * The runWithChecks is used if JVMTI is enabled. 446 * 447 */ 448 #if defined(VM_JVMTI) 449 void 450 BytecodeInterpreter::runWithChecks(interpreterState istate) { 451 #else 452 void 453 BytecodeInterpreter::run(interpreterState istate) { 454 #endif 455 456 // In order to simplify some tests based on switches set at runtime 457 // we invoke the interpreter a single time after switches are enabled 458 // and set simpler to to test variables rather than method calls or complex 459 // boolean expressions. 460 461 static int initialized = 0; 462 static int checkit = 0; 463 static intptr_t* c_addr = NULL; 464 static intptr_t c_value; 465 466 if (checkit && *c_addr != c_value) { 467 os::breakpoint(); 468 } 469 #ifdef VM_JVMTI 470 static bool _jvmti_interp_events = 0; 471 #endif 472 473 static int _compiling; // (UseCompiler || CountCompiledCalls) 474 475 #ifdef ASSERT 476 if (istate->_msg != initialize) { 477 // We have a problem here if we are running with a pre-hsx24 JDK (for example during bootstrap) 478 // because in that case, EnableInvokeDynamic is true by default but will be later switched off 479 // if java_lang_invoke_MethodHandle::compute_offsets() detects that the JDK only has the classes 480 // for the old JSR292 implementation. 481 // This leads to a situation where 'istate->_stack_limit' always accounts for 482 // methodOopDesc::extra_stack_entries() because it is computed in 483 // CppInterpreterGenerator::generate_compute_interpreter_state() which was generated while 484 // EnableInvokeDynamic was still true. On the other hand, istate->_method->max_stack() doesn't 485 // account for extra_stack_entries() anymore because at the time when it is called 486 // EnableInvokeDynamic was already set to false. 487 // So we have a second version of the assertion which handles the case where EnableInvokeDynamic was 488 // switched off because of the wrong classes. 489 if (EnableInvokeDynamic || FLAG_IS_CMDLINE(EnableInvokeDynamic)) { 490 assert(labs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + 1), "bad stack limit"); 491 } else { 492 const int extra_stack_entries = Method::extra_stack_entries_for_jsr292; 493 assert(labs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + extra_stack_entries 494 + 1), "bad stack limit"); 495 } 496 #ifndef SHARK 497 IA32_ONLY(assert(istate->_stack_limit == istate->_thread->last_Java_sp() + 1, "wrong")); 498 #endif // !SHARK 499 } 500 // Verify linkages. 501 interpreterState l = istate; 502 do { 503 assert(l == l->_self_link, "bad link"); 504 l = l->_prev_link; 505 } while (l != NULL); 506 // Screwups with stack management usually cause us to overwrite istate 507 // save a copy so we can verify it. 508 interpreterState orig = istate; 509 #endif 510 511 register intptr_t* topOfStack = (intptr_t *)istate->stack(); /* access with STACK macros */ 512 register address pc = istate->bcp(); 513 register jubyte opcode; 514 register intptr_t* locals = istate->locals(); 515 register ConstantPoolCache* cp = istate->constants(); // method()->constants()->cache() 516 #ifdef LOTS_OF_REGS 517 register JavaThread* THREAD = istate->thread(); 518 #else 519 #undef THREAD 520 #define THREAD istate->thread() 521 #endif 522 523 #ifdef USELABELS 524 const static void* const opclabels_data[256] = { 525 /* 0x00 */ &&opc_nop, &&opc_aconst_null,&&opc_iconst_m1,&&opc_iconst_0, 526 /* 0x04 */ &&opc_iconst_1,&&opc_iconst_2, &&opc_iconst_3, &&opc_iconst_4, 527 /* 0x08 */ &&opc_iconst_5,&&opc_lconst_0, &&opc_lconst_1, &&opc_fconst_0, 528 /* 0x0C */ &&opc_fconst_1,&&opc_fconst_2, &&opc_dconst_0, &&opc_dconst_1, 529 530 /* 0x10 */ &&opc_bipush, &&opc_sipush, &&opc_ldc, &&opc_ldc_w, 531 /* 0x14 */ &&opc_ldc2_w, &&opc_iload, &&opc_lload, &&opc_fload, 532 /* 0x18 */ &&opc_dload, &&opc_aload, &&opc_iload_0,&&opc_iload_1, 533 /* 0x1C */ &&opc_iload_2,&&opc_iload_3,&&opc_lload_0,&&opc_lload_1, 534 535 /* 0x20 */ &&opc_lload_2,&&opc_lload_3,&&opc_fload_0,&&opc_fload_1, 536 /* 0x24 */ &&opc_fload_2,&&opc_fload_3,&&opc_dload_0,&&opc_dload_1, 537 /* 0x28 */ &&opc_dload_2,&&opc_dload_3,&&opc_aload_0,&&opc_aload_1, 538 /* 0x2C */ &&opc_aload_2,&&opc_aload_3,&&opc_iaload, &&opc_laload, 539 540 /* 0x30 */ &&opc_faload, &&opc_daload, &&opc_aaload, &&opc_baload, 541 /* 0x34 */ &&opc_caload, &&opc_saload, &&opc_istore, &&opc_lstore, 542 /* 0x38 */ &&opc_fstore, &&opc_dstore, &&opc_astore, &&opc_istore_0, 543 /* 0x3C */ &&opc_istore_1,&&opc_istore_2,&&opc_istore_3,&&opc_lstore_0, 544 545 /* 0x40 */ &&opc_lstore_1,&&opc_lstore_2,&&opc_lstore_3,&&opc_fstore_0, 546 /* 0x44 */ &&opc_fstore_1,&&opc_fstore_2,&&opc_fstore_3,&&opc_dstore_0, 547 /* 0x48 */ &&opc_dstore_1,&&opc_dstore_2,&&opc_dstore_3,&&opc_astore_0, 548 /* 0x4C */ &&opc_astore_1,&&opc_astore_2,&&opc_astore_3,&&opc_iastore, 549 550 /* 0x50 */ &&opc_lastore,&&opc_fastore,&&opc_dastore,&&opc_aastore, 551 /* 0x54 */ &&opc_bastore,&&opc_castore,&&opc_sastore,&&opc_pop, 552 /* 0x58 */ &&opc_pop2, &&opc_dup, &&opc_dup_x1, &&opc_dup_x2, 553 /* 0x5C */ &&opc_dup2, &&opc_dup2_x1,&&opc_dup2_x2,&&opc_swap, 554 555 /* 0x60 */ &&opc_iadd,&&opc_ladd,&&opc_fadd,&&opc_dadd, 556 /* 0x64 */ &&opc_isub,&&opc_lsub,&&opc_fsub,&&opc_dsub, 557 /* 0x68 */ &&opc_imul,&&opc_lmul,&&opc_fmul,&&opc_dmul, 558 /* 0x6C */ &&opc_idiv,&&opc_ldiv,&&opc_fdiv,&&opc_ddiv, 559 560 /* 0x70 */ &&opc_irem, &&opc_lrem, &&opc_frem,&&opc_drem, 561 /* 0x74 */ &&opc_ineg, &&opc_lneg, &&opc_fneg,&&opc_dneg, 562 /* 0x78 */ &&opc_ishl, &&opc_lshl, &&opc_ishr,&&opc_lshr, 563 /* 0x7C */ &&opc_iushr,&&opc_lushr,&&opc_iand,&&opc_land, 564 565 /* 0x80 */ &&opc_ior, &&opc_lor,&&opc_ixor,&&opc_lxor, 566 /* 0x84 */ &&opc_iinc,&&opc_i2l,&&opc_i2f, &&opc_i2d, 567 /* 0x88 */ &&opc_l2i, &&opc_l2f,&&opc_l2d, &&opc_f2i, 568 /* 0x8C */ &&opc_f2l, &&opc_f2d,&&opc_d2i, &&opc_d2l, 569 570 /* 0x90 */ &&opc_d2f, &&opc_i2b, &&opc_i2c, &&opc_i2s, 571 /* 0x94 */ &&opc_lcmp, &&opc_fcmpl,&&opc_fcmpg,&&opc_dcmpl, 572 /* 0x98 */ &&opc_dcmpg,&&opc_ifeq, &&opc_ifne, &&opc_iflt, 573 /* 0x9C */ &&opc_ifge, &&opc_ifgt, &&opc_ifle, &&opc_if_icmpeq, 574 575 /* 0xA0 */ &&opc_if_icmpne,&&opc_if_icmplt,&&opc_if_icmpge, &&opc_if_icmpgt, 576 /* 0xA4 */ &&opc_if_icmple,&&opc_if_acmpeq,&&opc_if_acmpne, &&opc_goto, 577 /* 0xA8 */ &&opc_jsr, &&opc_ret, &&opc_tableswitch,&&opc_lookupswitch, 578 /* 0xAC */ &&opc_ireturn, &&opc_lreturn, &&opc_freturn, &&opc_dreturn, 579 580 /* 0xB0 */ &&opc_areturn, &&opc_return, &&opc_getstatic, &&opc_putstatic, 581 /* 0xB4 */ &&opc_getfield, &&opc_putfield, &&opc_invokevirtual,&&opc_invokespecial, 582 /* 0xB8 */ &&opc_invokestatic,&&opc_invokeinterface,&&opc_invokedynamic,&&opc_new, 583 /* 0xBC */ &&opc_newarray, &&opc_anewarray, &&opc_arraylength, &&opc_athrow, 584 585 /* 0xC0 */ &&opc_checkcast, &&opc_instanceof, &&opc_monitorenter, &&opc_monitorexit, 586 /* 0xC4 */ &&opc_wide, &&opc_multianewarray, &&opc_ifnull, &&opc_ifnonnull, 587 /* 0xC8 */ &&opc_goto_w, &&opc_jsr_w, &&opc_breakpoint, &&opc_default, 588 /* 0xCC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 589 590 /* 0xD0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 591 /* 0xD4 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 592 /* 0xD8 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 593 /* 0xDC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 594 595 /* 0xE0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 596 /* 0xE4 */ &&opc_default, &&opc_fast_aldc, &&opc_fast_aldc_w, &&opc_return_register_finalizer, 597 /* 0xE8 */ &&opc_invokehandle,&&opc_default, &&opc_default, &&opc_default, 598 /* 0xEC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 599 600 /* 0xF0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 601 /* 0xF4 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 602 /* 0xF8 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 603 /* 0xFC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default 604 }; 605 register uintptr_t *dispatch_table = (uintptr_t*)&opclabels_data[0]; 606 #endif /* USELABELS */ 607 608 #ifdef ASSERT 609 // this will trigger a VERIFY_OOP on entry 610 if (istate->msg() != initialize && ! METHOD->is_static()) { 611 oop rcvr = LOCALS_OBJECT(0); 612 VERIFY_OOP(rcvr); 613 } 614 #endif 615 // #define HACK 616 #ifdef HACK 617 bool interesting = false; 618 #endif // HACK 619 620 /* QQQ this should be a stack method so we don't know actual direction */ 621 guarantee(istate->msg() == initialize || 622 topOfStack >= istate->stack_limit() && 623 topOfStack < istate->stack_base(), 624 "Stack top out of range"); 625 626 #ifdef CC_INTERP_PROFILE 627 // MethodData's last branch taken count. 628 uint mdo_last_branch_taken_count = 0; 629 #else 630 const uint mdo_last_branch_taken_count = 0; 631 #endif 632 633 switch (istate->msg()) { 634 case initialize: { 635 if (initialized++) ShouldNotReachHere(); // Only one initialize call. 636 _compiling = (UseCompiler || CountCompiledCalls); 637 #ifdef VM_JVMTI 638 _jvmti_interp_events = JvmtiExport::can_post_interpreter_events(); 639 #endif 640 return; 641 } 642 break; 643 case method_entry: { 644 THREAD->set_do_not_unlock(); 645 // count invocations 646 assert(initialized, "Interpreter not initialized"); 647 if (_compiling) { 648 MethodCounters* mcs; 649 GET_METHOD_COUNTERS(mcs); 650 if (ProfileInterpreter) { 651 METHOD->increment_interpreter_invocation_count(THREAD); 652 } 653 mcs->invocation_counter()->increment(); 654 if (mcs->invocation_counter()->reached_InvocationLimit(mcs->backedge_counter())) { 655 CALL_VM((void)InterpreterRuntime::frequency_counter_overflow(THREAD, NULL), handle_exception); 656 // We no longer retry on a counter overflow. 657 } 658 // Get or create profile data. Check for pending (async) exceptions. 659 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); 660 SAFEPOINT; 661 } 662 663 if ((istate->_stack_base - istate->_stack_limit) != istate->method()->max_stack() + 1) { 664 // initialize 665 os::breakpoint(); 666 } 667 668 #ifdef HACK 669 { 670 ResourceMark rm; 671 char *method_name = istate->method()->name_and_sig_as_C_string(); 672 if (strstr(method_name, "runThese$TestRunner.run()V") != NULL) { 673 tty->print_cr("entering: depth %d bci: %d", 674 (istate->_stack_base - istate->_stack), 675 istate->_bcp - istate->_method->code_base()); 676 interesting = true; 677 } 678 } 679 #endif // HACK 680 681 // Lock method if synchronized. 682 if (METHOD->is_synchronized()) { 683 // oop rcvr = locals[0].j.r; 684 oop rcvr; 685 if (METHOD->is_static()) { 686 rcvr = METHOD->constants()->pool_holder()->java_mirror(); 687 } else { 688 rcvr = LOCALS_OBJECT(0); 689 VERIFY_OOP(rcvr); 690 } 691 // The initial monitor is ours for the taking. 692 // Monitor not filled in frame manager any longer as this caused race condition with biased locking. 693 BasicObjectLock* mon = &istate->monitor_base()[-1]; 694 mon->set_obj(rcvr); 695 bool success = false; 696 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place; 697 markOop mark = rcvr->mark(); 698 intptr_t hash = (intptr_t) markOopDesc::no_hash; 699 // Implies UseBiasedLocking. 700 if (mark->has_bias_pattern()) { 701 uintptr_t thread_ident; 702 uintptr_t anticipated_bias_locking_value; 703 thread_ident = (uintptr_t)istate->thread(); 704 anticipated_bias_locking_value = 705 (((uintptr_t)rcvr->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) & 706 ~((uintptr_t) markOopDesc::age_mask_in_place); 707 708 if (anticipated_bias_locking_value == 0) { 709 // Already biased towards this thread, nothing to do. 710 if (PrintBiasedLockingStatistics) { 711 (* BiasedLocking::biased_lock_entry_count_addr())++; 712 } 713 success = true; 714 } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) { 715 // Try to revoke bias. 716 markOop header = rcvr->klass()->prototype_header(); 717 if (hash != markOopDesc::no_hash) { 718 header = header->copy_set_hash(hash); 719 } 720 if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), mark) == mark) { 721 if (PrintBiasedLockingStatistics) 722 (*BiasedLocking::revoked_lock_entry_count_addr())++; 723 } 724 } else if ((anticipated_bias_locking_value & epoch_mask_in_place) != 0) { 725 // Try to rebias. 726 markOop new_header = (markOop) ( (intptr_t) rcvr->klass()->prototype_header() | thread_ident); 727 if (hash != markOopDesc::no_hash) { 728 new_header = new_header->copy_set_hash(hash); 729 } 730 if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), mark) == mark) { 731 if (PrintBiasedLockingStatistics) { 732 (* BiasedLocking::rebiased_lock_entry_count_addr())++; 733 } 734 } else { 735 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception); 736 } 737 success = true; 738 } else { 739 // Try to bias towards thread in case object is anonymously biased. 740 markOop header = (markOop) ((uintptr_t) mark & 741 ((uintptr_t)markOopDesc::biased_lock_mask_in_place | 742 (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place)); 743 if (hash != markOopDesc::no_hash) { 744 header = header->copy_set_hash(hash); 745 } 746 markOop new_header = (markOop) ((uintptr_t) header | thread_ident); 747 // Debugging hint. 748 DEBUG_ONLY(mon->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);) 749 if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), header) == header) { 750 if (PrintBiasedLockingStatistics) { 751 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++; 752 } 753 } else { 754 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception); 755 } 756 success = true; 757 } 758 } 759 760 // Traditional lightweight locking. 761 if (!success) { 762 markOop displaced = rcvr->mark()->set_unlocked(); 763 mon->lock()->set_displaced_header(displaced); 764 bool call_vm = UseHeavyMonitors; 765 if (call_vm || Atomic::cmpxchg_ptr(mon, rcvr->mark_addr(), displaced) != displaced) { 766 // Is it simple recursive case? 767 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { 768 mon->lock()->set_displaced_header(NULL); 769 } else { 770 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception); 771 } 772 } 773 } 774 } 775 THREAD->clr_do_not_unlock(); 776 777 // Notify jvmti 778 #ifdef VM_JVMTI 779 if (_jvmti_interp_events) { 780 // Whenever JVMTI puts a thread in interp_only_mode, method 781 // entry/exit events are sent for that thread to track stack depth. 782 if (THREAD->is_interp_only_mode()) { 783 CALL_VM(InterpreterRuntime::post_method_entry(THREAD), 784 handle_exception); 785 } 786 } 787 #endif /* VM_JVMTI */ 788 789 goto run; 790 } 791 792 case popping_frame: { 793 // returned from a java call to pop the frame, restart the call 794 // clear the message so we don't confuse ourselves later 795 assert(THREAD->pop_frame_in_process(), "wrong frame pop state"); 796 istate->set_msg(no_request); 797 if (_compiling) { 798 // Set MDX back to the ProfileData of the invoke bytecode that will be 799 // restarted. 800 SET_MDX(NULL); 801 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); 802 } 803 THREAD->clr_pop_frame_in_process(); 804 goto run; 805 } 806 807 case method_resume: { 808 if ((istate->_stack_base - istate->_stack_limit) != istate->method()->max_stack() + 1) { 809 // resume 810 os::breakpoint(); 811 } 812 #ifdef HACK 813 { 814 ResourceMark rm; 815 char *method_name = istate->method()->name_and_sig_as_C_string(); 816 if (strstr(method_name, "runThese$TestRunner.run()V") != NULL) { 817 tty->print_cr("resume: depth %d bci: %d", 818 (istate->_stack_base - istate->_stack) , 819 istate->_bcp - istate->_method->code_base()); 820 interesting = true; 821 } 822 } 823 #endif // HACK 824 // returned from a java call, continue executing. 825 if (THREAD->pop_frame_pending() && !THREAD->pop_frame_in_process()) { 826 goto handle_Pop_Frame; 827 } 828 if (THREAD->jvmti_thread_state() && 829 THREAD->jvmti_thread_state()->is_earlyret_pending()) { 830 goto handle_Early_Return; 831 } 832 833 if (THREAD->has_pending_exception()) goto handle_exception; 834 // Update the pc by the saved amount of the invoke bytecode size 835 UPDATE_PC(istate->bcp_advance()); 836 837 if (_compiling) { 838 // Get or create profile data. Check for pending (async) exceptions. 839 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); 840 } 841 goto run; 842 } 843 844 case deopt_resume2: { 845 // Returned from an opcode that will reexecute. Deopt was 846 // a result of a PopFrame request. 847 // 848 849 if (_compiling) { 850 // Get or create profile data. Check for pending (async) exceptions. 851 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); 852 } 853 goto run; 854 } 855 856 case deopt_resume: { 857 // Returned from an opcode that has completed. The stack has 858 // the result all we need to do is skip across the bytecode 859 // and continue (assuming there is no exception pending) 860 // 861 // compute continuation length 862 // 863 // Note: it is possible to deopt at a return_register_finalizer opcode 864 // because this requires entering the vm to do the registering. While the 865 // opcode is complete we can't advance because there are no more opcodes 866 // much like trying to deopt at a poll return. In that has we simply 867 // get out of here 868 // 869 if ( Bytecodes::code_at(METHOD, pc) == Bytecodes::_return_register_finalizer) { 870 // this will do the right thing even if an exception is pending. 871 goto handle_return; 872 } 873 UPDATE_PC(Bytecodes::length_at(METHOD, pc)); 874 if (THREAD->has_pending_exception()) goto handle_exception; 875 876 if (_compiling) { 877 // Get or create profile data. Check for pending (async) exceptions. 878 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); 879 } 880 goto run; 881 } 882 case got_monitors: { 883 // continue locking now that we have a monitor to use 884 // we expect to find newly allocated monitor at the "top" of the monitor stack. 885 oop lockee = STACK_OBJECT(-1); 886 VERIFY_OOP(lockee); 887 // derefing's lockee ought to provoke implicit null check 888 // find a free monitor 889 BasicObjectLock* entry = (BasicObjectLock*) istate->stack_base(); 890 assert(entry->obj() == NULL, "Frame manager didn't allocate the monitor"); 891 entry->set_obj(lockee); 892 bool success = false; 893 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place; 894 895 markOop mark = lockee->mark(); 896 intptr_t hash = (intptr_t) markOopDesc::no_hash; 897 // implies UseBiasedLocking 898 if (mark->has_bias_pattern()) { 899 uintptr_t thread_ident; 900 uintptr_t anticipated_bias_locking_value; 901 thread_ident = (uintptr_t)istate->thread(); 902 anticipated_bias_locking_value = 903 (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) & 904 ~((uintptr_t) markOopDesc::age_mask_in_place); 905 906 if (anticipated_bias_locking_value == 0) { 907 // already biased towards this thread, nothing to do 908 if (PrintBiasedLockingStatistics) { 909 (* BiasedLocking::biased_lock_entry_count_addr())++; 910 } 911 success = true; 912 } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) { 913 // try revoke bias 914 markOop header = lockee->klass()->prototype_header(); 915 if (hash != markOopDesc::no_hash) { 916 header = header->copy_set_hash(hash); 917 } 918 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) { 919 if (PrintBiasedLockingStatistics) { 920 (*BiasedLocking::revoked_lock_entry_count_addr())++; 921 } 922 } 923 } else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) { 924 // try rebias 925 markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident); 926 if (hash != markOopDesc::no_hash) { 927 new_header = new_header->copy_set_hash(hash); 928 } 929 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) { 930 if (PrintBiasedLockingStatistics) { 931 (* BiasedLocking::rebiased_lock_entry_count_addr())++; 932 } 933 } else { 934 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 935 } 936 success = true; 937 } else { 938 // try to bias towards thread in case object is anonymously biased 939 markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place | 940 (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place)); 941 if (hash != markOopDesc::no_hash) { 942 header = header->copy_set_hash(hash); 943 } 944 markOop new_header = (markOop) ((uintptr_t) header | thread_ident); 945 // debugging hint 946 DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);) 947 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) { 948 if (PrintBiasedLockingStatistics) { 949 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++; 950 } 951 } else { 952 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 953 } 954 success = true; 955 } 956 } 957 958 // traditional lightweight locking 959 if (!success) { 960 markOop displaced = lockee->mark()->set_unlocked(); 961 entry->lock()->set_displaced_header(displaced); 962 bool call_vm = UseHeavyMonitors; 963 if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) { 964 // Is it simple recursive case? 965 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { 966 entry->lock()->set_displaced_header(NULL); 967 } else { 968 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 969 } 970 } 971 } 972 UPDATE_PC_AND_TOS(1, -1); 973 goto run; 974 } 975 default: { 976 fatal("Unexpected message from frame manager"); 977 } 978 } 979 980 run: 981 982 DO_UPDATE_INSTRUCTION_COUNT(*pc) 983 DEBUGGER_SINGLE_STEP_NOTIFY(); 984 #ifdef PREFETCH_OPCCODE 985 opcode = *pc; /* prefetch first opcode */ 986 #endif 987 988 #ifndef USELABELS 989 while (1) 990 #endif 991 { 992 #ifndef PREFETCH_OPCCODE 993 opcode = *pc; 994 #endif 995 // Seems like this happens twice per opcode. At worst this is only 996 // need at entry to the loop. 997 // DEBUGGER_SINGLE_STEP_NOTIFY(); 998 /* Using this labels avoids double breakpoints when quickening and 999 * when returing from transition frames. 1000 */ 1001 opcode_switch: 1002 assert(istate == orig, "Corrupted istate"); 1003 /* QQQ Hmm this has knowledge of direction, ought to be a stack method */ 1004 assert(topOfStack >= istate->stack_limit(), "Stack overrun"); 1005 assert(topOfStack < istate->stack_base(), "Stack underrun"); 1006 1007 #ifdef USELABELS 1008 DISPATCH(opcode); 1009 #else 1010 switch (opcode) 1011 #endif 1012 { 1013 CASE(_nop): 1014 UPDATE_PC_AND_CONTINUE(1); 1015 1016 /* Push miscellaneous constants onto the stack. */ 1017 1018 CASE(_aconst_null): 1019 SET_STACK_OBJECT(NULL, 0); 1020 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1021 1022 #undef OPC_CONST_n 1023 #define OPC_CONST_n(opcode, const_type, value) \ 1024 CASE(opcode): \ 1025 SET_STACK_ ## const_type(value, 0); \ 1026 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1027 1028 OPC_CONST_n(_iconst_m1, INT, -1); 1029 OPC_CONST_n(_iconst_0, INT, 0); 1030 OPC_CONST_n(_iconst_1, INT, 1); 1031 OPC_CONST_n(_iconst_2, INT, 2); 1032 OPC_CONST_n(_iconst_3, INT, 3); 1033 OPC_CONST_n(_iconst_4, INT, 4); 1034 OPC_CONST_n(_iconst_5, INT, 5); 1035 OPC_CONST_n(_fconst_0, FLOAT, 0.0); 1036 OPC_CONST_n(_fconst_1, FLOAT, 1.0); 1037 OPC_CONST_n(_fconst_2, FLOAT, 2.0); 1038 1039 #undef OPC_CONST2_n 1040 #define OPC_CONST2_n(opcname, value, key, kind) \ 1041 CASE(_##opcname): \ 1042 { \ 1043 SET_STACK_ ## kind(VM##key##Const##value(), 1); \ 1044 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); \ 1045 } 1046 OPC_CONST2_n(dconst_0, Zero, double, DOUBLE); 1047 OPC_CONST2_n(dconst_1, One, double, DOUBLE); 1048 OPC_CONST2_n(lconst_0, Zero, long, LONG); 1049 OPC_CONST2_n(lconst_1, One, long, LONG); 1050 1051 /* Load constant from constant pool: */ 1052 1053 /* Push a 1-byte signed integer value onto the stack. */ 1054 CASE(_bipush): 1055 SET_STACK_INT((jbyte)(pc[1]), 0); 1056 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1); 1057 1058 /* Push a 2-byte signed integer constant onto the stack. */ 1059 CASE(_sipush): 1060 SET_STACK_INT((int16_t)Bytes::get_Java_u2(pc + 1), 0); 1061 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1); 1062 1063 /* load from local variable */ 1064 1065 CASE(_aload): 1066 VERIFY_OOP(LOCALS_OBJECT(pc[1])); 1067 SET_STACK_OBJECT(LOCALS_OBJECT(pc[1]), 0); 1068 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1); 1069 1070 CASE(_iload): 1071 CASE(_fload): 1072 SET_STACK_SLOT(LOCALS_SLOT(pc[1]), 0); 1073 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1); 1074 1075 CASE(_lload): 1076 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(pc[1]), 1); 1077 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 2); 1078 1079 CASE(_dload): 1080 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_DOUBLE_AT(pc[1]), 1); 1081 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 2); 1082 1083 #undef OPC_LOAD_n 1084 #define OPC_LOAD_n(num) \ 1085 CASE(_aload_##num): \ 1086 VERIFY_OOP(LOCALS_OBJECT(num)); \ 1087 SET_STACK_OBJECT(LOCALS_OBJECT(num), 0); \ 1088 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); \ 1089 \ 1090 CASE(_iload_##num): \ 1091 CASE(_fload_##num): \ 1092 SET_STACK_SLOT(LOCALS_SLOT(num), 0); \ 1093 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); \ 1094 \ 1095 CASE(_lload_##num): \ 1096 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(num), 1); \ 1097 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); \ 1098 CASE(_dload_##num): \ 1099 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_DOUBLE_AT(num), 1); \ 1100 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1101 1102 OPC_LOAD_n(0); 1103 OPC_LOAD_n(1); 1104 OPC_LOAD_n(2); 1105 OPC_LOAD_n(3); 1106 1107 /* store to a local variable */ 1108 1109 CASE(_astore): 1110 astore(topOfStack, -1, locals, pc[1]); 1111 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -1); 1112 1113 CASE(_istore): 1114 CASE(_fstore): 1115 SET_LOCALS_SLOT(STACK_SLOT(-1), pc[1]); 1116 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -1); 1117 1118 CASE(_lstore): 1119 SET_LOCALS_LONG(STACK_LONG(-1), pc[1]); 1120 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -2); 1121 1122 CASE(_dstore): 1123 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), pc[1]); 1124 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -2); 1125 1126 CASE(_wide): { 1127 uint16_t reg = Bytes::get_Java_u2(pc + 2); 1128 1129 opcode = pc[1]; 1130 1131 // Wide and it's sub-bytecode are counted as separate instructions. If we 1132 // don't account for this here, the bytecode trace skips the next bytecode. 1133 DO_UPDATE_INSTRUCTION_COUNT(opcode); 1134 1135 switch(opcode) { 1136 case Bytecodes::_aload: 1137 VERIFY_OOP(LOCALS_OBJECT(reg)); 1138 SET_STACK_OBJECT(LOCALS_OBJECT(reg), 0); 1139 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1); 1140 1141 case Bytecodes::_iload: 1142 case Bytecodes::_fload: 1143 SET_STACK_SLOT(LOCALS_SLOT(reg), 0); 1144 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1); 1145 1146 case Bytecodes::_lload: 1147 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(reg), 1); 1148 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2); 1149 1150 case Bytecodes::_dload: 1151 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_LONG_AT(reg), 1); 1152 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2); 1153 1154 case Bytecodes::_astore: 1155 astore(topOfStack, -1, locals, reg); 1156 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -1); 1157 1158 case Bytecodes::_istore: 1159 case Bytecodes::_fstore: 1160 SET_LOCALS_SLOT(STACK_SLOT(-1), reg); 1161 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -1); 1162 1163 case Bytecodes::_lstore: 1164 SET_LOCALS_LONG(STACK_LONG(-1), reg); 1165 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2); 1166 1167 case Bytecodes::_dstore: 1168 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), reg); 1169 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2); 1170 1171 case Bytecodes::_iinc: { 1172 int16_t offset = (int16_t)Bytes::get_Java_u2(pc+4); 1173 // Be nice to see what this generates.... QQQ 1174 SET_LOCALS_INT(LOCALS_INT(reg) + offset, reg); 1175 UPDATE_PC_AND_CONTINUE(6); 1176 } 1177 case Bytecodes::_ret: 1178 // Profile ret. 1179 BI_PROFILE_UPDATE_RET(/*bci=*/((int)(intptr_t)(LOCALS_ADDR(reg)))); 1180 // Now, update the pc. 1181 pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(reg)); 1182 UPDATE_PC_AND_CONTINUE(0); 1183 default: 1184 VM_JAVA_ERROR(vmSymbols::java_lang_InternalError(), "undefined opcode", note_no_trap); 1185 } 1186 } 1187 1188 1189 #undef OPC_STORE_n 1190 #define OPC_STORE_n(num) \ 1191 CASE(_astore_##num): \ 1192 astore(topOfStack, -1, locals, num); \ 1193 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1194 CASE(_istore_##num): \ 1195 CASE(_fstore_##num): \ 1196 SET_LOCALS_SLOT(STACK_SLOT(-1), num); \ 1197 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1198 1199 OPC_STORE_n(0); 1200 OPC_STORE_n(1); 1201 OPC_STORE_n(2); 1202 OPC_STORE_n(3); 1203 1204 #undef OPC_DSTORE_n 1205 #define OPC_DSTORE_n(num) \ 1206 CASE(_dstore_##num): \ 1207 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), num); \ 1208 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \ 1209 CASE(_lstore_##num): \ 1210 SET_LOCALS_LONG(STACK_LONG(-1), num); \ 1211 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); 1212 1213 OPC_DSTORE_n(0); 1214 OPC_DSTORE_n(1); 1215 OPC_DSTORE_n(2); 1216 OPC_DSTORE_n(3); 1217 1218 /* stack pop, dup, and insert opcodes */ 1219 1220 1221 CASE(_pop): /* Discard the top item on the stack */ 1222 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1223 1224 1225 CASE(_pop2): /* Discard the top 2 items on the stack */ 1226 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); 1227 1228 1229 CASE(_dup): /* Duplicate the top item on the stack */ 1230 dup(topOfStack); 1231 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1232 1233 CASE(_dup2): /* Duplicate the top 2 items on the stack */ 1234 dup2(topOfStack); 1235 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1236 1237 CASE(_dup_x1): /* insert top word two down */ 1238 dup_x1(topOfStack); 1239 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1240 1241 CASE(_dup_x2): /* insert top word three down */ 1242 dup_x2(topOfStack); 1243 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1244 1245 CASE(_dup2_x1): /* insert top 2 slots three down */ 1246 dup2_x1(topOfStack); 1247 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1248 1249 CASE(_dup2_x2): /* insert top 2 slots four down */ 1250 dup2_x2(topOfStack); 1251 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1252 1253 CASE(_swap): { /* swap top two elements on the stack */ 1254 swap(topOfStack); 1255 UPDATE_PC_AND_CONTINUE(1); 1256 } 1257 1258 /* Perform various binary integer operations */ 1259 1260 #undef OPC_INT_BINARY 1261 #define OPC_INT_BINARY(opcname, opname, test) \ 1262 CASE(_i##opcname): \ 1263 if (test && (STACK_INT(-1) == 0)) { \ 1264 VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \ 1265 "/ by zero", note_div0Check_trap); \ 1266 } \ 1267 SET_STACK_INT(VMint##opname(STACK_INT(-2), \ 1268 STACK_INT(-1)), \ 1269 -2); \ 1270 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1271 CASE(_l##opcname): \ 1272 { \ 1273 if (test) { \ 1274 jlong l1 = STACK_LONG(-1); \ 1275 if (VMlongEqz(l1)) { \ 1276 VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \ 1277 "/ by long zero", note_div0Check_trap); \ 1278 } \ 1279 } \ 1280 /* First long at (-1,-2) next long at (-3,-4) */ \ 1281 SET_STACK_LONG(VMlong##opname(STACK_LONG(-3), \ 1282 STACK_LONG(-1)), \ 1283 -3); \ 1284 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \ 1285 } 1286 1287 OPC_INT_BINARY(add, Add, 0); 1288 OPC_INT_BINARY(sub, Sub, 0); 1289 OPC_INT_BINARY(mul, Mul, 0); 1290 OPC_INT_BINARY(and, And, 0); 1291 OPC_INT_BINARY(or, Or, 0); 1292 OPC_INT_BINARY(xor, Xor, 0); 1293 OPC_INT_BINARY(div, Div, 1); 1294 OPC_INT_BINARY(rem, Rem, 1); 1295 1296 1297 /* Perform various binary floating number operations */ 1298 /* On some machine/platforms/compilers div zero check can be implicit */ 1299 1300 #undef OPC_FLOAT_BINARY 1301 #define OPC_FLOAT_BINARY(opcname, opname) \ 1302 CASE(_d##opcname): { \ 1303 SET_STACK_DOUBLE(VMdouble##opname(STACK_DOUBLE(-3), \ 1304 STACK_DOUBLE(-1)), \ 1305 -3); \ 1306 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \ 1307 } \ 1308 CASE(_f##opcname): \ 1309 SET_STACK_FLOAT(VMfloat##opname(STACK_FLOAT(-2), \ 1310 STACK_FLOAT(-1)), \ 1311 -2); \ 1312 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1313 1314 1315 OPC_FLOAT_BINARY(add, Add); 1316 OPC_FLOAT_BINARY(sub, Sub); 1317 OPC_FLOAT_BINARY(mul, Mul); 1318 OPC_FLOAT_BINARY(div, Div); 1319 OPC_FLOAT_BINARY(rem, Rem); 1320 1321 /* Shift operations 1322 * Shift left int and long: ishl, lshl 1323 * Logical shift right int and long w/zero extension: iushr, lushr 1324 * Arithmetic shift right int and long w/sign extension: ishr, lshr 1325 */ 1326 1327 #undef OPC_SHIFT_BINARY 1328 #define OPC_SHIFT_BINARY(opcname, opname) \ 1329 CASE(_i##opcname): \ 1330 SET_STACK_INT(VMint##opname(STACK_INT(-2), \ 1331 STACK_INT(-1)), \ 1332 -2); \ 1333 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1334 CASE(_l##opcname): \ 1335 { \ 1336 SET_STACK_LONG(VMlong##opname(STACK_LONG(-2), \ 1337 STACK_INT(-1)), \ 1338 -2); \ 1339 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1340 } 1341 1342 OPC_SHIFT_BINARY(shl, Shl); 1343 OPC_SHIFT_BINARY(shr, Shr); 1344 OPC_SHIFT_BINARY(ushr, Ushr); 1345 1346 /* Increment local variable by constant */ 1347 CASE(_iinc): 1348 { 1349 // locals[pc[1]].j.i += (jbyte)(pc[2]); 1350 SET_LOCALS_INT(LOCALS_INT(pc[1]) + (jbyte)(pc[2]), pc[1]); 1351 UPDATE_PC_AND_CONTINUE(3); 1352 } 1353 1354 /* negate the value on the top of the stack */ 1355 1356 CASE(_ineg): 1357 SET_STACK_INT(VMintNeg(STACK_INT(-1)), -1); 1358 UPDATE_PC_AND_CONTINUE(1); 1359 1360 CASE(_fneg): 1361 SET_STACK_FLOAT(VMfloatNeg(STACK_FLOAT(-1)), -1); 1362 UPDATE_PC_AND_CONTINUE(1); 1363 1364 CASE(_lneg): 1365 { 1366 SET_STACK_LONG(VMlongNeg(STACK_LONG(-1)), -1); 1367 UPDATE_PC_AND_CONTINUE(1); 1368 } 1369 1370 CASE(_dneg): 1371 { 1372 SET_STACK_DOUBLE(VMdoubleNeg(STACK_DOUBLE(-1)), -1); 1373 UPDATE_PC_AND_CONTINUE(1); 1374 } 1375 1376 /* Conversion operations */ 1377 1378 CASE(_i2f): /* convert top of stack int to float */ 1379 SET_STACK_FLOAT(VMint2Float(STACK_INT(-1)), -1); 1380 UPDATE_PC_AND_CONTINUE(1); 1381 1382 CASE(_i2l): /* convert top of stack int to long */ 1383 { 1384 // this is ugly QQQ 1385 jlong r = VMint2Long(STACK_INT(-1)); 1386 MORE_STACK(-1); // Pop 1387 SET_STACK_LONG(r, 1); 1388 1389 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1390 } 1391 1392 CASE(_i2d): /* convert top of stack int to double */ 1393 { 1394 // this is ugly QQQ (why cast to jlong?? ) 1395 jdouble r = (jlong)STACK_INT(-1); 1396 MORE_STACK(-1); // Pop 1397 SET_STACK_DOUBLE(r, 1); 1398 1399 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1400 } 1401 1402 CASE(_l2i): /* convert top of stack long to int */ 1403 { 1404 jint r = VMlong2Int(STACK_LONG(-1)); 1405 MORE_STACK(-2); // Pop 1406 SET_STACK_INT(r, 0); 1407 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1408 } 1409 1410 CASE(_l2f): /* convert top of stack long to float */ 1411 { 1412 jlong r = STACK_LONG(-1); 1413 MORE_STACK(-2); // Pop 1414 SET_STACK_FLOAT(VMlong2Float(r), 0); 1415 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1416 } 1417 1418 CASE(_l2d): /* convert top of stack long to double */ 1419 { 1420 jlong r = STACK_LONG(-1); 1421 MORE_STACK(-2); // Pop 1422 SET_STACK_DOUBLE(VMlong2Double(r), 1); 1423 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1424 } 1425 1426 CASE(_f2i): /* Convert top of stack float to int */ 1427 SET_STACK_INT(SharedRuntime::f2i(STACK_FLOAT(-1)), -1); 1428 UPDATE_PC_AND_CONTINUE(1); 1429 1430 CASE(_f2l): /* convert top of stack float to long */ 1431 { 1432 jlong r = SharedRuntime::f2l(STACK_FLOAT(-1)); 1433 MORE_STACK(-1); // POP 1434 SET_STACK_LONG(r, 1); 1435 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1436 } 1437 1438 CASE(_f2d): /* convert top of stack float to double */ 1439 { 1440 jfloat f; 1441 jdouble r; 1442 f = STACK_FLOAT(-1); 1443 r = (jdouble) f; 1444 MORE_STACK(-1); // POP 1445 SET_STACK_DOUBLE(r, 1); 1446 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1447 } 1448 1449 CASE(_d2i): /* convert top of stack double to int */ 1450 { 1451 jint r1 = SharedRuntime::d2i(STACK_DOUBLE(-1)); 1452 MORE_STACK(-2); 1453 SET_STACK_INT(r1, 0); 1454 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1455 } 1456 1457 CASE(_d2f): /* convert top of stack double to float */ 1458 { 1459 jfloat r1 = VMdouble2Float(STACK_DOUBLE(-1)); 1460 MORE_STACK(-2); 1461 SET_STACK_FLOAT(r1, 0); 1462 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1463 } 1464 1465 CASE(_d2l): /* convert top of stack double to long */ 1466 { 1467 jlong r1 = SharedRuntime::d2l(STACK_DOUBLE(-1)); 1468 MORE_STACK(-2); 1469 SET_STACK_LONG(r1, 1); 1470 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1471 } 1472 1473 CASE(_i2b): 1474 SET_STACK_INT(VMint2Byte(STACK_INT(-1)), -1); 1475 UPDATE_PC_AND_CONTINUE(1); 1476 1477 CASE(_i2c): 1478 SET_STACK_INT(VMint2Char(STACK_INT(-1)), -1); 1479 UPDATE_PC_AND_CONTINUE(1); 1480 1481 CASE(_i2s): 1482 SET_STACK_INT(VMint2Short(STACK_INT(-1)), -1); 1483 UPDATE_PC_AND_CONTINUE(1); 1484 1485 /* comparison operators */ 1486 1487 1488 #define COMPARISON_OP(name, comparison) \ 1489 CASE(_if_icmp##name): { \ 1490 const bool cmp = (STACK_INT(-2) comparison STACK_INT(-1)); \ 1491 int skip = cmp \ 1492 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1493 address branch_pc = pc; \ 1494 /* Profile branch. */ \ 1495 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \ 1496 UPDATE_PC_AND_TOS(skip, -2); \ 1497 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1498 CONTINUE; \ 1499 } \ 1500 CASE(_if##name): { \ 1501 const bool cmp = (STACK_INT(-1) comparison 0); \ 1502 int skip = cmp \ 1503 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1504 address branch_pc = pc; \ 1505 /* Profile branch. */ \ 1506 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \ 1507 UPDATE_PC_AND_TOS(skip, -1); \ 1508 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1509 CONTINUE; \ 1510 } 1511 1512 #define COMPARISON_OP2(name, comparison) \ 1513 COMPARISON_OP(name, comparison) \ 1514 CASE(_if_acmp##name): { \ 1515 const bool cmp = (STACK_OBJECT(-2) comparison STACK_OBJECT(-1)); \ 1516 int skip = cmp \ 1517 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1518 address branch_pc = pc; \ 1519 /* Profile branch. */ \ 1520 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \ 1521 UPDATE_PC_AND_TOS(skip, -2); \ 1522 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1523 CONTINUE; \ 1524 } 1525 1526 #define NULL_COMPARISON_NOT_OP(name) \ 1527 CASE(_if##name): { \ 1528 const bool cmp = (!(STACK_OBJECT(-1) == NULL)); \ 1529 int skip = cmp \ 1530 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1531 address branch_pc = pc; \ 1532 /* Profile branch. */ \ 1533 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \ 1534 UPDATE_PC_AND_TOS(skip, -1); \ 1535 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1536 CONTINUE; \ 1537 } 1538 1539 #define NULL_COMPARISON_OP(name) \ 1540 CASE(_if##name): { \ 1541 const bool cmp = ((STACK_OBJECT(-1) == NULL)); \ 1542 int skip = cmp \ 1543 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1544 address branch_pc = pc; \ 1545 /* Profile branch. */ \ 1546 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \ 1547 UPDATE_PC_AND_TOS(skip, -1); \ 1548 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1549 CONTINUE; \ 1550 } 1551 COMPARISON_OP(lt, <); 1552 COMPARISON_OP(gt, >); 1553 COMPARISON_OP(le, <=); 1554 COMPARISON_OP(ge, >=); 1555 COMPARISON_OP2(eq, ==); /* include ref comparison */ 1556 COMPARISON_OP2(ne, !=); /* include ref comparison */ 1557 NULL_COMPARISON_OP(null); 1558 NULL_COMPARISON_NOT_OP(nonnull); 1559 1560 /* Goto pc at specified offset in switch table. */ 1561 1562 CASE(_tableswitch): { 1563 jint* lpc = (jint*)VMalignWordUp(pc+1); 1564 int32_t key = STACK_INT(-1); 1565 int32_t low = Bytes::get_Java_u4((address)&lpc[1]); 1566 int32_t high = Bytes::get_Java_u4((address)&lpc[2]); 1567 int32_t skip; 1568 key -= low; 1569 if (((uint32_t) key > (uint32_t)(high - low))) { 1570 key = -1; 1571 skip = Bytes::get_Java_u4((address)&lpc[0]); 1572 } else { 1573 skip = Bytes::get_Java_u4((address)&lpc[key + 3]); 1574 } 1575 // Profile switch. 1576 BI_PROFILE_UPDATE_SWITCH(/*switch_index=*/key); 1577 // Does this really need a full backedge check (osr)? 1578 address branch_pc = pc; 1579 UPDATE_PC_AND_TOS(skip, -1); 1580 DO_BACKEDGE_CHECKS(skip, branch_pc); 1581 CONTINUE; 1582 } 1583 1584 /* Goto pc whose table entry matches specified key. */ 1585 1586 CASE(_lookupswitch): { 1587 jint* lpc = (jint*)VMalignWordUp(pc+1); 1588 int32_t key = STACK_INT(-1); 1589 int32_t skip = Bytes::get_Java_u4((address) lpc); /* default amount */ 1590 // Remember index. 1591 int index = -1; 1592 int newindex = 0; 1593 int32_t npairs = Bytes::get_Java_u4((address) &lpc[1]); 1594 while (--npairs >= 0) { 1595 lpc += 2; 1596 if (key == (int32_t)Bytes::get_Java_u4((address)lpc)) { 1597 skip = Bytes::get_Java_u4((address)&lpc[1]); 1598 index = newindex; 1599 break; 1600 } 1601 newindex += 1; 1602 } 1603 // Profile switch. 1604 BI_PROFILE_UPDATE_SWITCH(/*switch_index=*/index); 1605 address branch_pc = pc; 1606 UPDATE_PC_AND_TOS(skip, -1); 1607 DO_BACKEDGE_CHECKS(skip, branch_pc); 1608 CONTINUE; 1609 } 1610 1611 CASE(_fcmpl): 1612 CASE(_fcmpg): 1613 { 1614 SET_STACK_INT(VMfloatCompare(STACK_FLOAT(-2), 1615 STACK_FLOAT(-1), 1616 (opcode == Bytecodes::_fcmpl ? -1 : 1)), 1617 -2); 1618 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1619 } 1620 1621 CASE(_dcmpl): 1622 CASE(_dcmpg): 1623 { 1624 int r = VMdoubleCompare(STACK_DOUBLE(-3), 1625 STACK_DOUBLE(-1), 1626 (opcode == Bytecodes::_dcmpl ? -1 : 1)); 1627 MORE_STACK(-4); // Pop 1628 SET_STACK_INT(r, 0); 1629 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1630 } 1631 1632 CASE(_lcmp): 1633 { 1634 int r = VMlongCompare(STACK_LONG(-3), STACK_LONG(-1)); 1635 MORE_STACK(-4); 1636 SET_STACK_INT(r, 0); 1637 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1638 } 1639 1640 1641 /* Return from a method */ 1642 1643 CASE(_areturn): 1644 CASE(_ireturn): 1645 CASE(_freturn): 1646 { 1647 // Allow a safepoint before returning to frame manager. 1648 SAFEPOINT; 1649 1650 goto handle_return; 1651 } 1652 1653 CASE(_lreturn): 1654 CASE(_dreturn): 1655 { 1656 // Allow a safepoint before returning to frame manager. 1657 SAFEPOINT; 1658 goto handle_return; 1659 } 1660 1661 CASE(_return_register_finalizer): { 1662 1663 oop rcvr = LOCALS_OBJECT(0); 1664 VERIFY_OOP(rcvr); 1665 if (rcvr->klass()->has_finalizer()) { 1666 CALL_VM(InterpreterRuntime::register_finalizer(THREAD, rcvr), handle_exception); 1667 } 1668 goto handle_return; 1669 } 1670 CASE(_return): { 1671 1672 // Allow a safepoint before returning to frame manager. 1673 SAFEPOINT; 1674 goto handle_return; 1675 } 1676 1677 /* Array access byte-codes */ 1678 1679 /* Every array access byte-code starts out like this */ 1680 // arrayOopDesc* arrObj = (arrayOopDesc*)STACK_OBJECT(arrayOff); 1681 #define ARRAY_INTRO(arrayOff) \ 1682 arrayOop arrObj = (arrayOop)STACK_OBJECT(arrayOff); \ 1683 jint index = STACK_INT(arrayOff + 1); \ 1684 char message[jintAsStringSize]; \ 1685 CHECK_NULL(arrObj); \ 1686 if ((uint32_t)index >= (uint32_t)arrObj->length()) { \ 1687 sprintf(message, "%d", index); \ 1688 VM_JAVA_ERROR(vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), \ 1689 message, note_rangeCheck_trap); \ 1690 } 1691 1692 /* 32-bit loads. These handle conversion from < 32-bit types */ 1693 #define ARRAY_LOADTO32(T, T2, format, stackRes, extra) \ 1694 { \ 1695 ARRAY_INTRO(-2); \ 1696 (void)extra; \ 1697 SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), \ 1698 -2); \ 1699 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1700 } 1701 1702 /* 64-bit loads */ 1703 #define ARRAY_LOADTO64(T,T2, stackRes, extra) \ 1704 { \ 1705 ARRAY_INTRO(-2); \ 1706 SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), -1); \ 1707 (void)extra; \ 1708 UPDATE_PC_AND_CONTINUE(1); \ 1709 } 1710 1711 CASE(_iaload): 1712 ARRAY_LOADTO32(T_INT, jint, "%d", STACK_INT, 0); 1713 CASE(_faload): 1714 ARRAY_LOADTO32(T_FLOAT, jfloat, "%f", STACK_FLOAT, 0); 1715 CASE(_aaload): { 1716 ARRAY_INTRO(-2); 1717 SET_STACK_OBJECT(((objArrayOop) arrObj)->obj_at(index), -2); 1718 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1719 } 1720 CASE(_baload): 1721 ARRAY_LOADTO32(T_BYTE, jbyte, "%d", STACK_INT, 0); 1722 CASE(_caload): 1723 ARRAY_LOADTO32(T_CHAR, jchar, "%d", STACK_INT, 0); 1724 CASE(_saload): 1725 ARRAY_LOADTO32(T_SHORT, jshort, "%d", STACK_INT, 0); 1726 CASE(_laload): 1727 ARRAY_LOADTO64(T_LONG, jlong, STACK_LONG, 0); 1728 CASE(_daload): 1729 ARRAY_LOADTO64(T_DOUBLE, jdouble, STACK_DOUBLE, 0); 1730 1731 /* 32-bit stores. These handle conversion to < 32-bit types */ 1732 #define ARRAY_STOREFROM32(T, T2, format, stackSrc, extra) \ 1733 { \ 1734 ARRAY_INTRO(-3); \ 1735 (void)extra; \ 1736 *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \ 1737 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3); \ 1738 } 1739 1740 /* 64-bit stores */ 1741 #define ARRAY_STOREFROM64(T, T2, stackSrc, extra) \ 1742 { \ 1743 ARRAY_INTRO(-4); \ 1744 (void)extra; \ 1745 *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \ 1746 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -4); \ 1747 } 1748 1749 CASE(_iastore): 1750 ARRAY_STOREFROM32(T_INT, jint, "%d", STACK_INT, 0); 1751 CASE(_fastore): 1752 ARRAY_STOREFROM32(T_FLOAT, jfloat, "%f", STACK_FLOAT, 0); 1753 /* 1754 * This one looks different because of the assignability check 1755 */ 1756 CASE(_aastore): { 1757 oop rhsObject = STACK_OBJECT(-1); 1758 VERIFY_OOP(rhsObject); 1759 ARRAY_INTRO( -3); 1760 // arrObj, index are set 1761 if (rhsObject != NULL) { 1762 /* Check assignability of rhsObject into arrObj */ 1763 Klass* rhsKlass = rhsObject->klass(); // EBX (subclass) 1764 Klass* elemKlass = ObjArrayKlass::cast(arrObj->klass())->element_klass(); // superklass EAX 1765 // 1766 // Check for compatibilty. This check must not GC!! 1767 // Seems way more expensive now that we must dispatch 1768 // 1769 if (rhsKlass != elemKlass && !rhsKlass->is_subtype_of(elemKlass)) { // ebx->is... 1770 // Decrement counter if subtype check failed. 1771 BI_PROFILE_SUBTYPECHECK_FAILED(rhsKlass); 1772 VM_JAVA_ERROR(vmSymbols::java_lang_ArrayStoreException(), "", note_arrayCheck_trap); 1773 } 1774 // Profile checkcast with null_seen and receiver. 1775 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/false, rhsKlass); 1776 } else { 1777 // Profile checkcast with null_seen and receiver. 1778 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/true, NULL); 1779 } 1780 ((objArrayOop) arrObj)->obj_at_put(index, rhsObject); 1781 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3); 1782 } 1783 CASE(_bastore): 1784 ARRAY_STOREFROM32(T_BYTE, jbyte, "%d", STACK_INT, 0); 1785 CASE(_castore): 1786 ARRAY_STOREFROM32(T_CHAR, jchar, "%d", STACK_INT, 0); 1787 CASE(_sastore): 1788 ARRAY_STOREFROM32(T_SHORT, jshort, "%d", STACK_INT, 0); 1789 CASE(_lastore): 1790 ARRAY_STOREFROM64(T_LONG, jlong, STACK_LONG, 0); 1791 CASE(_dastore): 1792 ARRAY_STOREFROM64(T_DOUBLE, jdouble, STACK_DOUBLE, 0); 1793 1794 CASE(_arraylength): 1795 { 1796 arrayOop ary = (arrayOop) STACK_OBJECT(-1); 1797 CHECK_NULL(ary); 1798 SET_STACK_INT(ary->length(), -1); 1799 UPDATE_PC_AND_CONTINUE(1); 1800 } 1801 1802 /* monitorenter and monitorexit for locking/unlocking an object */ 1803 1804 CASE(_monitorenter): { 1805 oop lockee = STACK_OBJECT(-1); 1806 // derefing's lockee ought to provoke implicit null check 1807 CHECK_NULL(lockee); 1808 // find a free monitor or one already allocated for this object 1809 // if we find a matching object then we need a new monitor 1810 // since this is recursive enter 1811 BasicObjectLock* limit = istate->monitor_base(); 1812 BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base(); 1813 BasicObjectLock* entry = NULL; 1814 while (most_recent != limit ) { 1815 if (most_recent->obj() == NULL) entry = most_recent; 1816 else if (most_recent->obj() == lockee) break; 1817 most_recent++; 1818 } 1819 if (entry != NULL) { 1820 entry->set_obj(lockee); 1821 int success = false; 1822 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place; 1823 1824 markOop mark = lockee->mark(); 1825 intptr_t hash = (intptr_t) markOopDesc::no_hash; 1826 // implies UseBiasedLocking 1827 if (mark->has_bias_pattern()) { 1828 uintptr_t thread_ident; 1829 uintptr_t anticipated_bias_locking_value; 1830 thread_ident = (uintptr_t)istate->thread(); 1831 anticipated_bias_locking_value = 1832 (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) & 1833 ~((uintptr_t) markOopDesc::age_mask_in_place); 1834 1835 if (anticipated_bias_locking_value == 0) { 1836 // already biased towards this thread, nothing to do 1837 if (PrintBiasedLockingStatistics) { 1838 (* BiasedLocking::biased_lock_entry_count_addr())++; 1839 } 1840 success = true; 1841 } 1842 else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) { 1843 // try revoke bias 1844 markOop header = lockee->klass()->prototype_header(); 1845 if (hash != markOopDesc::no_hash) { 1846 header = header->copy_set_hash(hash); 1847 } 1848 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) { 1849 if (PrintBiasedLockingStatistics) 1850 (*BiasedLocking::revoked_lock_entry_count_addr())++; 1851 } 1852 } 1853 else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) { 1854 // try rebias 1855 markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident); 1856 if (hash != markOopDesc::no_hash) { 1857 new_header = new_header->copy_set_hash(hash); 1858 } 1859 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) { 1860 if (PrintBiasedLockingStatistics) 1861 (* BiasedLocking::rebiased_lock_entry_count_addr())++; 1862 } 1863 else { 1864 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 1865 } 1866 success = true; 1867 } 1868 else { 1869 // try to bias towards thread in case object is anonymously biased 1870 markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place | 1871 (uintptr_t)markOopDesc::age_mask_in_place | 1872 epoch_mask_in_place)); 1873 if (hash != markOopDesc::no_hash) { 1874 header = header->copy_set_hash(hash); 1875 } 1876 markOop new_header = (markOop) ((uintptr_t) header | thread_ident); 1877 // debugging hint 1878 DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);) 1879 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) { 1880 if (PrintBiasedLockingStatistics) 1881 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++; 1882 } 1883 else { 1884 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 1885 } 1886 success = true; 1887 } 1888 } 1889 1890 // traditional lightweight locking 1891 if (!success) { 1892 markOop displaced = lockee->mark()->set_unlocked(); 1893 entry->lock()->set_displaced_header(displaced); 1894 bool call_vm = UseHeavyMonitors; 1895 if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) { 1896 // Is it simple recursive case? 1897 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { 1898 entry->lock()->set_displaced_header(NULL); 1899 } else { 1900 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 1901 } 1902 } 1903 } 1904 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1905 } else { 1906 istate->set_msg(more_monitors); 1907 UPDATE_PC_AND_RETURN(0); // Re-execute 1908 } 1909 } 1910 1911 CASE(_monitorexit): { 1912 oop lockee = STACK_OBJECT(-1); 1913 CHECK_NULL(lockee); 1914 // derefing's lockee ought to provoke implicit null check 1915 // find our monitor slot 1916 BasicObjectLock* limit = istate->monitor_base(); 1917 BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base(); 1918 while (most_recent != limit ) { 1919 if ((most_recent)->obj() == lockee) { 1920 BasicLock* lock = most_recent->lock(); 1921 markOop header = lock->displaced_header(); 1922 most_recent->set_obj(NULL); 1923 if (!lockee->mark()->has_bias_pattern()) { 1924 bool call_vm = UseHeavyMonitors; 1925 // If it isn't recursive we either must swap old header or call the runtime 1926 if (header != NULL || call_vm) { 1927 if (call_vm || Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) { 1928 // restore object for the slow case 1929 most_recent->set_obj(lockee); 1930 CALL_VM(InterpreterRuntime::monitorexit(THREAD, most_recent), handle_exception); 1931 } 1932 } 1933 } 1934 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1935 } 1936 most_recent++; 1937 } 1938 // Need to throw illegal monitor state exception 1939 CALL_VM(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD), handle_exception); 1940 ShouldNotReachHere(); 1941 } 1942 1943 /* All of the non-quick opcodes. */ 1944 1945 /* -Set clobbersCpIndex true if the quickened opcode clobbers the 1946 * constant pool index in the instruction. 1947 */ 1948 CASE(_getfield): 1949 CASE(_getstatic): 1950 { 1951 u2 index; 1952 ConstantPoolCacheEntry* cache; 1953 index = Bytes::get_native_u2(pc+1); 1954 1955 // QQQ Need to make this as inlined as possible. Probably need to 1956 // split all the bytecode cases out so c++ compiler has a chance 1957 // for constant prop to fold everything possible away. 1958 1959 cache = cp->entry_at(index); 1960 if (!cache->is_resolved((Bytecodes::Code)opcode)) { 1961 CALL_VM(InterpreterRuntime::resolve_get_put(THREAD, (Bytecodes::Code)opcode), 1962 handle_exception); 1963 cache = cp->entry_at(index); 1964 } 1965 1966 #ifdef VM_JVMTI 1967 if (_jvmti_interp_events) { 1968 int *count_addr; 1969 oop obj; 1970 // Check to see if a field modification watch has been set 1971 // before we take the time to call into the VM. 1972 count_addr = (int *)JvmtiExport::get_field_access_count_addr(); 1973 if ( *count_addr > 0 ) { 1974 if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) { 1975 obj = (oop)NULL; 1976 } else { 1977 obj = (oop) STACK_OBJECT(-1); 1978 VERIFY_OOP(obj); 1979 } 1980 CALL_VM(InterpreterRuntime::post_field_access(THREAD, 1981 obj, 1982 cache), 1983 handle_exception); 1984 } 1985 } 1986 #endif /* VM_JVMTI */ 1987 1988 oop obj; 1989 if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) { 1990 Klass* k = cache->f1_as_klass(); 1991 obj = k->java_mirror(); 1992 MORE_STACK(1); // Assume single slot push 1993 } else { 1994 obj = (oop) STACK_OBJECT(-1); 1995 CHECK_NULL(obj); 1996 } 1997 1998 // 1999 // Now store the result on the stack 2000 // 2001 TosState tos_type = cache->flag_state(); 2002 int field_offset = cache->f2_as_index(); 2003 if (cache->is_volatile()) { 2004 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 2005 OrderAccess::fence(); 2006 } 2007 if (tos_type == atos) { 2008 VERIFY_OOP(obj->obj_field_acquire(field_offset)); 2009 SET_STACK_OBJECT(obj->obj_field_acquire(field_offset), -1); 2010 } else if (tos_type == itos) { 2011 SET_STACK_INT(obj->int_field_acquire(field_offset), -1); 2012 } else if (tos_type == ltos) { 2013 SET_STACK_LONG(obj->long_field_acquire(field_offset), 0); 2014 MORE_STACK(1); 2015 } else if (tos_type == btos) { 2016 SET_STACK_INT(obj->byte_field_acquire(field_offset), -1); 2017 } else if (tos_type == ctos) { 2018 SET_STACK_INT(obj->char_field_acquire(field_offset), -1); 2019 } else if (tos_type == stos) { 2020 SET_STACK_INT(obj->short_field_acquire(field_offset), -1); 2021 } else if (tos_type == ftos) { 2022 SET_STACK_FLOAT(obj->float_field_acquire(field_offset), -1); 2023 } else { 2024 SET_STACK_DOUBLE(obj->double_field_acquire(field_offset), 0); 2025 MORE_STACK(1); 2026 } 2027 } else { 2028 if (tos_type == atos) { 2029 VERIFY_OOP(obj->obj_field(field_offset)); 2030 SET_STACK_OBJECT(obj->obj_field(field_offset), -1); 2031 } else if (tos_type == itos) { 2032 SET_STACK_INT(obj->int_field(field_offset), -1); 2033 } else if (tos_type == ltos) { 2034 SET_STACK_LONG(obj->long_field(field_offset), 0); 2035 MORE_STACK(1); 2036 } else if (tos_type == btos) { 2037 SET_STACK_INT(obj->byte_field(field_offset), -1); 2038 } else if (tos_type == ctos) { 2039 SET_STACK_INT(obj->char_field(field_offset), -1); 2040 } else if (tos_type == stos) { 2041 SET_STACK_INT(obj->short_field(field_offset), -1); 2042 } else if (tos_type == ftos) { 2043 SET_STACK_FLOAT(obj->float_field(field_offset), -1); 2044 } else { 2045 SET_STACK_DOUBLE(obj->double_field(field_offset), 0); 2046 MORE_STACK(1); 2047 } 2048 } 2049 2050 UPDATE_PC_AND_CONTINUE(3); 2051 } 2052 2053 CASE(_putfield): 2054 CASE(_putstatic): 2055 { 2056 u2 index = Bytes::get_native_u2(pc+1); 2057 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2058 if (!cache->is_resolved((Bytecodes::Code)opcode)) { 2059 CALL_VM(InterpreterRuntime::resolve_get_put(THREAD, (Bytecodes::Code)opcode), 2060 handle_exception); 2061 cache = cp->entry_at(index); 2062 } 2063 2064 #ifdef VM_JVMTI 2065 if (_jvmti_interp_events) { 2066 int *count_addr; 2067 oop obj; 2068 // Check to see if a field modification watch has been set 2069 // before we take the time to call into the VM. 2070 count_addr = (int *)JvmtiExport::get_field_modification_count_addr(); 2071 if ( *count_addr > 0 ) { 2072 if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) { 2073 obj = (oop)NULL; 2074 } 2075 else { 2076 if (cache->is_long() || cache->is_double()) { 2077 obj = (oop) STACK_OBJECT(-3); 2078 } else { 2079 obj = (oop) STACK_OBJECT(-2); 2080 } 2081 VERIFY_OOP(obj); 2082 } 2083 2084 CALL_VM(InterpreterRuntime::post_field_modification(THREAD, 2085 obj, 2086 cache, 2087 (jvalue *)STACK_SLOT(-1)), 2088 handle_exception); 2089 } 2090 } 2091 #endif /* VM_JVMTI */ 2092 2093 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases 2094 // out so c++ compiler has a chance for constant prop to fold everything possible away. 2095 2096 oop obj; 2097 int count; 2098 TosState tos_type = cache->flag_state(); 2099 2100 count = -1; 2101 if (tos_type == ltos || tos_type == dtos) { 2102 --count; 2103 } 2104 if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) { 2105 Klass* k = cache->f1_as_klass(); 2106 obj = k->java_mirror(); 2107 } else { 2108 --count; 2109 obj = (oop) STACK_OBJECT(count); 2110 CHECK_NULL(obj); 2111 } 2112 2113 // 2114 // Now store the result 2115 // 2116 int field_offset = cache->f2_as_index(); 2117 if (cache->is_volatile()) { 2118 if (tos_type == itos) { 2119 obj->release_int_field_put(field_offset, STACK_INT(-1)); 2120 } else if (tos_type == atos) { 2121 VERIFY_OOP(STACK_OBJECT(-1)); 2122 obj->release_obj_field_put(field_offset, STACK_OBJECT(-1)); 2123 } else if (tos_type == btos) { 2124 obj->release_byte_field_put(field_offset, STACK_INT(-1)); 2125 } else if (tos_type == ltos) { 2126 obj->release_long_field_put(field_offset, STACK_LONG(-1)); 2127 } else if (tos_type == ctos) { 2128 obj->release_char_field_put(field_offset, STACK_INT(-1)); 2129 } else if (tos_type == stos) { 2130 obj->release_short_field_put(field_offset, STACK_INT(-1)); 2131 } else if (tos_type == ftos) { 2132 obj->release_float_field_put(field_offset, STACK_FLOAT(-1)); 2133 } else { 2134 obj->release_double_field_put(field_offset, STACK_DOUBLE(-1)); 2135 } 2136 OrderAccess::storeload(); 2137 } else { 2138 if (tos_type == itos) { 2139 obj->int_field_put(field_offset, STACK_INT(-1)); 2140 } else if (tos_type == atos) { 2141 VERIFY_OOP(STACK_OBJECT(-1)); 2142 obj->obj_field_put(field_offset, STACK_OBJECT(-1)); 2143 } else if (tos_type == btos) { 2144 obj->byte_field_put(field_offset, STACK_INT(-1)); 2145 } else if (tos_type == ltos) { 2146 obj->long_field_put(field_offset, STACK_LONG(-1)); 2147 } else if (tos_type == ctos) { 2148 obj->char_field_put(field_offset, STACK_INT(-1)); 2149 } else if (tos_type == stos) { 2150 obj->short_field_put(field_offset, STACK_INT(-1)); 2151 } else if (tos_type == ftos) { 2152 obj->float_field_put(field_offset, STACK_FLOAT(-1)); 2153 } else { 2154 obj->double_field_put(field_offset, STACK_DOUBLE(-1)); 2155 } 2156 } 2157 2158 UPDATE_PC_AND_TOS_AND_CONTINUE(3, count); 2159 } 2160 2161 CASE(_new): { 2162 u2 index = Bytes::get_Java_u2(pc+1); 2163 ConstantPool* constants = istate->method()->constants(); 2164 if (!constants->tag_at(index).is_unresolved_klass()) { 2165 // Make sure klass is initialized and doesn't have a finalizer 2166 Klass* entry = constants->slot_at(index).get_klass(); 2167 assert(entry->is_klass(), "Should be resolved klass"); 2168 Klass* k_entry = (Klass*) entry; 2169 assert(k_entry->oop_is_instance(), "Should be InstanceKlass"); 2170 InstanceKlass* ik = (InstanceKlass*) k_entry; 2171 if ( ik->is_initialized() && ik->can_be_fastpath_allocated() ) { 2172 size_t obj_size = ik->size_helper(); 2173 oop result = NULL; 2174 // If the TLAB isn't pre-zeroed then we'll have to do it 2175 bool need_zero = !ZeroTLAB; 2176 if (UseTLAB) { 2177 result = (oop) THREAD->tlab().allocate(obj_size); 2178 } 2179 // Disable non-TLAB-based fast-path, because profiling requires that all 2180 // allocations go through InterpreterRuntime::_new() if THREAD->tlab().allocate 2181 // returns NULL. 2182 #ifndef CC_INTERP_PROFILE 2183 if (result == NULL) { 2184 need_zero = true; 2185 // Try allocate in shared eden 2186 retry: 2187 HeapWord* compare_to = *Universe::heap()->top_addr(); 2188 HeapWord* new_top = compare_to + obj_size; 2189 if (new_top <= *Universe::heap()->end_addr()) { 2190 if (Atomic::cmpxchg_ptr(new_top, Universe::heap()->top_addr(), compare_to) != compare_to) { 2191 goto retry; 2192 } 2193 result = (oop) compare_to; 2194 } 2195 } 2196 #endif 2197 if (result != NULL) { 2198 // Initialize object (if nonzero size and need) and then the header 2199 if (need_zero ) { 2200 HeapWord* to_zero = (HeapWord*) result + sizeof(oopDesc) / oopSize; 2201 obj_size -= sizeof(oopDesc) / oopSize; 2202 if (obj_size > 0 ) { 2203 memset(to_zero, 0, obj_size * HeapWordSize); 2204 } 2205 } 2206 if (UseBiasedLocking) { 2207 result->set_mark(ik->prototype_header()); 2208 } else { 2209 result->set_mark(markOopDesc::prototype()); 2210 } 2211 result->set_klass_gap(0); 2212 result->set_klass(k_entry); 2213 // Must prevent reordering of stores for object initialization 2214 // with stores that publish the new object. 2215 OrderAccess::storestore(); 2216 SET_STACK_OBJECT(result, 0); 2217 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1); 2218 } 2219 } 2220 } 2221 // Slow case allocation 2222 CALL_VM(InterpreterRuntime::_new(THREAD, METHOD->constants(), index), 2223 handle_exception); 2224 // Must prevent reordering of stores for object initialization 2225 // with stores that publish the new object. 2226 OrderAccess::storestore(); 2227 SET_STACK_OBJECT(THREAD->vm_result(), 0); 2228 THREAD->set_vm_result(NULL); 2229 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1); 2230 } 2231 CASE(_anewarray): { 2232 u2 index = Bytes::get_Java_u2(pc+1); 2233 jint size = STACK_INT(-1); 2234 CALL_VM(InterpreterRuntime::anewarray(THREAD, METHOD->constants(), index, size), 2235 handle_exception); 2236 // Must prevent reordering of stores for object initialization 2237 // with stores that publish the new object. 2238 OrderAccess::storestore(); 2239 SET_STACK_OBJECT(THREAD->vm_result(), -1); 2240 THREAD->set_vm_result(NULL); 2241 UPDATE_PC_AND_CONTINUE(3); 2242 } 2243 CASE(_multianewarray): { 2244 jint dims = *(pc+3); 2245 jint size = STACK_INT(-1); 2246 // stack grows down, dimensions are up! 2247 jint *dimarray = 2248 (jint*)&topOfStack[dims * Interpreter::stackElementWords+ 2249 Interpreter::stackElementWords-1]; 2250 //adjust pointer to start of stack element 2251 CALL_VM(InterpreterRuntime::multianewarray(THREAD, dimarray), 2252 handle_exception); 2253 // Must prevent reordering of stores for object initialization 2254 // with stores that publish the new object. 2255 OrderAccess::storestore(); 2256 SET_STACK_OBJECT(THREAD->vm_result(), -dims); 2257 THREAD->set_vm_result(NULL); 2258 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -(dims-1)); 2259 } 2260 CASE(_checkcast): 2261 if (STACK_OBJECT(-1) != NULL) { 2262 VERIFY_OOP(STACK_OBJECT(-1)); 2263 u2 index = Bytes::get_Java_u2(pc+1); 2264 // Constant pool may have actual klass or unresolved klass. If it is 2265 // unresolved we must resolve it. 2266 if (METHOD->constants()->tag_at(index).is_unresolved_klass()) { 2267 CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception); 2268 } 2269 Klass* klassOf = (Klass*) METHOD->constants()->slot_at(index).get_klass(); 2270 Klass* objKlass = STACK_OBJECT(-1)->klass(); // ebx 2271 // 2272 // Check for compatibilty. This check must not GC!! 2273 // Seems way more expensive now that we must dispatch. 2274 // 2275 if (objKlass != klassOf && !objKlass->is_subtype_of(klassOf)) { 2276 // Decrement counter at checkcast. 2277 BI_PROFILE_SUBTYPECHECK_FAILED(objKlass); 2278 ResourceMark rm(THREAD); 2279 const char* objName = objKlass->external_name(); 2280 const char* klassName = klassOf->external_name(); 2281 char* message = SharedRuntime::generate_class_cast_message( 2282 objName, klassName); 2283 VM_JAVA_ERROR(vmSymbols::java_lang_ClassCastException(), message, note_classCheck_trap); 2284 } 2285 // Profile checkcast with null_seen and receiver. 2286 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/false, objKlass); 2287 } else { 2288 // Profile checkcast with null_seen and receiver. 2289 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/true, NULL); 2290 } 2291 UPDATE_PC_AND_CONTINUE(3); 2292 2293 CASE(_instanceof): 2294 if (STACK_OBJECT(-1) == NULL) { 2295 SET_STACK_INT(0, -1); 2296 // Profile instanceof with null_seen and receiver. 2297 BI_PROFILE_UPDATE_INSTANCEOF(/*null_seen=*/true, NULL); 2298 } else { 2299 VERIFY_OOP(STACK_OBJECT(-1)); 2300 u2 index = Bytes::get_Java_u2(pc+1); 2301 // Constant pool may have actual klass or unresolved klass. If it is 2302 // unresolved we must resolve it. 2303 if (METHOD->constants()->tag_at(index).is_unresolved_klass()) { 2304 CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception); 2305 } 2306 Klass* klassOf = (Klass*) METHOD->constants()->slot_at(index).get_klass(); 2307 Klass* objKlass = STACK_OBJECT(-1)->klass(); 2308 // 2309 // Check for compatibilty. This check must not GC!! 2310 // Seems way more expensive now that we must dispatch. 2311 // 2312 if ( objKlass == klassOf || objKlass->is_subtype_of(klassOf)) { 2313 SET_STACK_INT(1, -1); 2314 } else { 2315 SET_STACK_INT(0, -1); 2316 // Decrement counter at checkcast. 2317 BI_PROFILE_SUBTYPECHECK_FAILED(objKlass); 2318 } 2319 // Profile instanceof with null_seen and receiver. 2320 BI_PROFILE_UPDATE_INSTANCEOF(/*null_seen=*/false, objKlass); 2321 } 2322 UPDATE_PC_AND_CONTINUE(3); 2323 2324 CASE(_ldc_w): 2325 CASE(_ldc): 2326 { 2327 u2 index; 2328 bool wide = false; 2329 int incr = 2; // frequent case 2330 if (opcode == Bytecodes::_ldc) { 2331 index = pc[1]; 2332 } else { 2333 index = Bytes::get_Java_u2(pc+1); 2334 incr = 3; 2335 wide = true; 2336 } 2337 2338 ConstantPool* constants = METHOD->constants(); 2339 switch (constants->tag_at(index).value()) { 2340 case JVM_CONSTANT_Integer: 2341 SET_STACK_INT(constants->int_at(index), 0); 2342 break; 2343 2344 case JVM_CONSTANT_Float: 2345 SET_STACK_FLOAT(constants->float_at(index), 0); 2346 break; 2347 2348 case JVM_CONSTANT_String: 2349 { 2350 oop result = constants->resolved_references()->obj_at(index); 2351 if (result == NULL) { 2352 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), handle_exception); 2353 SET_STACK_OBJECT(THREAD->vm_result(), 0); 2354 THREAD->set_vm_result(NULL); 2355 } else { 2356 VERIFY_OOP(result); 2357 SET_STACK_OBJECT(result, 0); 2358 } 2359 break; 2360 } 2361 2362 case JVM_CONSTANT_Class: 2363 VERIFY_OOP(constants->resolved_klass_at(index)->java_mirror()); 2364 SET_STACK_OBJECT(constants->resolved_klass_at(index)->java_mirror(), 0); 2365 break; 2366 2367 case JVM_CONSTANT_UnresolvedClass: 2368 case JVM_CONSTANT_UnresolvedClassInError: 2369 CALL_VM(InterpreterRuntime::ldc(THREAD, wide), handle_exception); 2370 SET_STACK_OBJECT(THREAD->vm_result(), 0); 2371 THREAD->set_vm_result(NULL); 2372 break; 2373 2374 default: ShouldNotReachHere(); 2375 } 2376 UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1); 2377 } 2378 2379 CASE(_ldc2_w): 2380 { 2381 u2 index = Bytes::get_Java_u2(pc+1); 2382 2383 ConstantPool* constants = METHOD->constants(); 2384 switch (constants->tag_at(index).value()) { 2385 2386 case JVM_CONSTANT_Long: 2387 SET_STACK_LONG(constants->long_at(index), 1); 2388 break; 2389 2390 case JVM_CONSTANT_Double: 2391 SET_STACK_DOUBLE(constants->double_at(index), 1); 2392 break; 2393 default: ShouldNotReachHere(); 2394 } 2395 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 2); 2396 } 2397 2398 CASE(_fast_aldc_w): 2399 CASE(_fast_aldc): { 2400 u2 index; 2401 int incr; 2402 if (opcode == Bytecodes::_fast_aldc) { 2403 index = pc[1]; 2404 incr = 2; 2405 } else { 2406 index = Bytes::get_native_u2(pc+1); 2407 incr = 3; 2408 } 2409 2410 // We are resolved if the f1 field contains a non-null object (CallSite, etc.) 2411 // This kind of CP cache entry does not need to match the flags byte, because 2412 // there is a 1-1 relation between bytecode type and CP entry type. 2413 ConstantPool* constants = METHOD->constants(); 2414 oop result = constants->resolved_references()->obj_at(index); 2415 if (result == NULL) { 2416 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), 2417 handle_exception); 2418 result = THREAD->vm_result(); 2419 } 2420 2421 VERIFY_OOP(result); 2422 SET_STACK_OBJECT(result, 0); 2423 UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1); 2424 } 2425 2426 CASE(_invokedynamic): { 2427 2428 if (!EnableInvokeDynamic) { 2429 // We should not encounter this bytecode if !EnableInvokeDynamic. 2430 // The verifier will stop it. However, if we get past the verifier, 2431 // this will stop the thread in a reasonable way, without crashing the JVM. 2432 CALL_VM(InterpreterRuntime::throw_IncompatibleClassChangeError(THREAD), 2433 handle_exception); 2434 ShouldNotReachHere(); 2435 } 2436 2437 u4 index = Bytes::get_native_u4(pc+1); 2438 ConstantPoolCacheEntry* cache = cp->constant_pool()->invokedynamic_cp_cache_entry_at(index); 2439 2440 // We are resolved if the resolved_references field contains a non-null object (CallSite, etc.) 2441 // This kind of CP cache entry does not need to match the flags byte, because 2442 // there is a 1-1 relation between bytecode type and CP entry type. 2443 if (! cache->is_resolved((Bytecodes::Code) opcode)) { 2444 CALL_VM(InterpreterRuntime::resolve_invokedynamic(THREAD), 2445 handle_exception); 2446 cache = cp->constant_pool()->invokedynamic_cp_cache_entry_at(index); 2447 } 2448 2449 Method* method = cache->f1_as_method(); 2450 if (VerifyOops) method->verify(); 2451 2452 if (cache->has_appendix()) { 2453 ConstantPool* constants = METHOD->constants(); 2454 SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0); 2455 MORE_STACK(1); 2456 } 2457 2458 istate->set_msg(call_method); 2459 istate->set_callee(method); 2460 istate->set_callee_entry_point(method->from_interpreted_entry()); 2461 istate->set_bcp_advance(5); 2462 2463 // Invokedynamic has got a call counter, just like an invokestatic -> increment! 2464 BI_PROFILE_UPDATE_CALL(); 2465 2466 UPDATE_PC_AND_RETURN(0); // I'll be back... 2467 } 2468 2469 CASE(_invokehandle): { 2470 2471 if (!EnableInvokeDynamic) { 2472 ShouldNotReachHere(); 2473 } 2474 2475 u2 index = Bytes::get_native_u2(pc+1); 2476 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2477 2478 if (! cache->is_resolved((Bytecodes::Code) opcode)) { 2479 CALL_VM(InterpreterRuntime::resolve_invokehandle(THREAD), 2480 handle_exception); 2481 cache = cp->entry_at(index); 2482 } 2483 2484 Method* method = cache->f1_as_method(); 2485 if (VerifyOops) method->verify(); 2486 2487 if (cache->has_appendix()) { 2488 ConstantPool* constants = METHOD->constants(); 2489 SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0); 2490 MORE_STACK(1); 2491 } 2492 2493 istate->set_msg(call_method); 2494 istate->set_callee(method); 2495 istate->set_callee_entry_point(method->from_interpreted_entry()); 2496 istate->set_bcp_advance(3); 2497 2498 // Invokehandle has got a call counter, just like a final call -> increment! 2499 BI_PROFILE_UPDATE_FINALCALL(); 2500 2501 UPDATE_PC_AND_RETURN(0); // I'll be back... 2502 } 2503 2504 CASE(_invokeinterface): { 2505 u2 index = Bytes::get_native_u2(pc+1); 2506 2507 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases 2508 // out so c++ compiler has a chance for constant prop to fold everything possible away. 2509 2510 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2511 if (!cache->is_resolved((Bytecodes::Code)opcode)) { 2512 CALL_VM(InterpreterRuntime::resolve_invoke(THREAD, (Bytecodes::Code)opcode), 2513 handle_exception); 2514 cache = cp->entry_at(index); 2515 } 2516 2517 istate->set_msg(call_method); 2518 2519 // Special case of invokeinterface called for virtual method of 2520 // java.lang.Object. See cpCacheOop.cpp for details. 2521 // This code isn't produced by javac, but could be produced by 2522 // another compliant java compiler. 2523 if (cache->is_forced_virtual()) { 2524 Method* callee; 2525 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); 2526 if (cache->is_vfinal()) { 2527 callee = cache->f2_as_vfinal_method(); 2528 // Profile 'special case of invokeinterface' final call. 2529 BI_PROFILE_UPDATE_FINALCALL(); 2530 } else { 2531 // Get receiver. 2532 int parms = cache->parameter_size(); 2533 // Same comments as invokevirtual apply here. 2534 oop rcvr = STACK_OBJECT(-parms); 2535 VERIFY_OOP(rcvr); 2536 InstanceKlass* rcvrKlass = (InstanceKlass*)rcvr->klass(); 2537 callee = (Method*) rcvrKlass->start_of_vtable()[ cache->f2_as_index()]; 2538 // Profile 'special case of invokeinterface' virtual call. 2539 BI_PROFILE_UPDATE_VIRTUALCALL(rcvr->klass()); 2540 } 2541 istate->set_callee(callee); 2542 istate->set_callee_entry_point(callee->from_interpreted_entry()); 2543 #ifdef VM_JVMTI 2544 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) { 2545 istate->set_callee_entry_point(callee->interpreter_entry()); 2546 } 2547 #endif /* VM_JVMTI */ 2548 istate->set_bcp_advance(5); 2549 UPDATE_PC_AND_RETURN(0); // I'll be back... 2550 } 2551 2552 // this could definitely be cleaned up QQQ 2553 Method* callee; 2554 Klass* iclass = cache->f1_as_klass(); 2555 // InstanceKlass* interface = (InstanceKlass*) iclass; 2556 // get receiver 2557 int parms = cache->parameter_size(); 2558 oop rcvr = STACK_OBJECT(-parms); 2559 CHECK_NULL(rcvr); 2560 InstanceKlass* int2 = (InstanceKlass*) rcvr->klass(); 2561 itableOffsetEntry* ki = (itableOffsetEntry*) int2->start_of_itable(); 2562 int i; 2563 for ( i = 0 ; i < int2->itable_length() ; i++, ki++ ) { 2564 if (ki->interface_klass() == iclass) break; 2565 } 2566 // If the interface isn't found, this class doesn't implement this 2567 // interface. The link resolver checks this but only for the first 2568 // time this interface is called. 2569 if (i == int2->itable_length()) { 2570 VM_JAVA_ERROR(vmSymbols::java_lang_IncompatibleClassChangeError(), "", note_no_trap); 2571 } 2572 int mindex = cache->f2_as_index(); 2573 itableMethodEntry* im = ki->first_method_entry(rcvr->klass()); 2574 callee = im[mindex].method(); 2575 if (callee == NULL) { 2576 VM_JAVA_ERROR(vmSymbols::java_lang_AbstractMethodError(), "", note_no_trap); 2577 } 2578 2579 // Profile virtual call. 2580 BI_PROFILE_UPDATE_VIRTUALCALL(rcvr->klass()); 2581 2582 istate->set_callee(callee); 2583 istate->set_callee_entry_point(callee->from_interpreted_entry()); 2584 #ifdef VM_JVMTI 2585 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) { 2586 istate->set_callee_entry_point(callee->interpreter_entry()); 2587 } 2588 #endif /* VM_JVMTI */ 2589 istate->set_bcp_advance(5); 2590 UPDATE_PC_AND_RETURN(0); // I'll be back... 2591 } 2592 2593 CASE(_invokevirtual): 2594 CASE(_invokespecial): 2595 CASE(_invokestatic): { 2596 u2 index = Bytes::get_native_u2(pc+1); 2597 2598 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2599 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases 2600 // out so c++ compiler has a chance for constant prop to fold everything possible away. 2601 2602 if (!cache->is_resolved((Bytecodes::Code)opcode)) { 2603 CALL_VM(InterpreterRuntime::resolve_invoke(THREAD, (Bytecodes::Code)opcode), 2604 handle_exception); 2605 cache = cp->entry_at(index); 2606 } 2607 2608 istate->set_msg(call_method); 2609 { 2610 Method* callee; 2611 if ((Bytecodes::Code)opcode == Bytecodes::_invokevirtual) { 2612 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); 2613 if (cache->is_vfinal()) { 2614 callee = cache->f2_as_vfinal_method(); 2615 // Profile final call. 2616 BI_PROFILE_UPDATE_FINALCALL(); 2617 } else { 2618 // get receiver 2619 int parms = cache->parameter_size(); 2620 // this works but needs a resourcemark and seems to create a vtable on every call: 2621 // Method* callee = rcvr->klass()->vtable()->method_at(cache->f2_as_index()); 2622 // 2623 // this fails with an assert 2624 // InstanceKlass* rcvrKlass = InstanceKlass::cast(STACK_OBJECT(-parms)->klass()); 2625 // but this works 2626 oop rcvr = STACK_OBJECT(-parms); 2627 VERIFY_OOP(rcvr); 2628 InstanceKlass* rcvrKlass = (InstanceKlass*)rcvr->klass(); 2629 /* 2630 Executing this code in java.lang.String: 2631 public String(char value[]) { 2632 this.count = value.length; 2633 this.value = (char[])value.clone(); 2634 } 2635 2636 a find on rcvr->klass() reports: 2637 {type array char}{type array class} 2638 - klass: {other class} 2639 2640 but using InstanceKlass::cast(STACK_OBJECT(-parms)->klass()) causes in assertion failure 2641 because rcvr->klass()->oop_is_instance() == 0 2642 However it seems to have a vtable in the right location. Huh? 2643 2644 */ 2645 callee = (Method*) rcvrKlass->start_of_vtable()[ cache->f2_as_index()]; 2646 // Profile virtual call. 2647 BI_PROFILE_UPDATE_VIRTUALCALL(rcvr->klass()); 2648 } 2649 } else { 2650 if ((Bytecodes::Code)opcode == Bytecodes::_invokespecial) { 2651 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); 2652 } 2653 callee = cache->f1_as_method(); 2654 2655 // Profile call. 2656 BI_PROFILE_UPDATE_CALL(); 2657 } 2658 2659 istate->set_callee(callee); 2660 istate->set_callee_entry_point(callee->from_interpreted_entry()); 2661 #ifdef VM_JVMTI 2662 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) { 2663 istate->set_callee_entry_point(callee->interpreter_entry()); 2664 } 2665 #endif /* VM_JVMTI */ 2666 istate->set_bcp_advance(3); 2667 UPDATE_PC_AND_RETURN(0); // I'll be back... 2668 } 2669 } 2670 2671 /* Allocate memory for a new java object. */ 2672 2673 CASE(_newarray): { 2674 BasicType atype = (BasicType) *(pc+1); 2675 jint size = STACK_INT(-1); 2676 CALL_VM(InterpreterRuntime::newarray(THREAD, atype, size), 2677 handle_exception); 2678 // Must prevent reordering of stores for object initialization 2679 // with stores that publish the new object. 2680 OrderAccess::storestore(); 2681 SET_STACK_OBJECT(THREAD->vm_result(), -1); 2682 THREAD->set_vm_result(NULL); 2683 2684 UPDATE_PC_AND_CONTINUE(2); 2685 } 2686 2687 /* Throw an exception. */ 2688 2689 CASE(_athrow): { 2690 oop except_oop = STACK_OBJECT(-1); 2691 CHECK_NULL(except_oop); 2692 // set pending_exception so we use common code 2693 THREAD->set_pending_exception(except_oop, NULL, 0); 2694 goto handle_exception; 2695 } 2696 2697 /* goto and jsr. They are exactly the same except jsr pushes 2698 * the address of the next instruction first. 2699 */ 2700 2701 CASE(_jsr): { 2702 /* push bytecode index on stack */ 2703 SET_STACK_ADDR(((address)pc - (intptr_t)(istate->method()->code_base()) + 3), 0); 2704 MORE_STACK(1); 2705 /* FALL THROUGH */ 2706 } 2707 2708 CASE(_goto): 2709 { 2710 int16_t offset = (int16_t)Bytes::get_Java_u2(pc + 1); 2711 // Profile jump. 2712 BI_PROFILE_UPDATE_JUMP(); 2713 address branch_pc = pc; 2714 UPDATE_PC(offset); 2715 DO_BACKEDGE_CHECKS(offset, branch_pc); 2716 CONTINUE; 2717 } 2718 2719 CASE(_jsr_w): { 2720 /* push return address on the stack */ 2721 SET_STACK_ADDR(((address)pc - (intptr_t)(istate->method()->code_base()) + 5), 0); 2722 MORE_STACK(1); 2723 /* FALL THROUGH */ 2724 } 2725 2726 CASE(_goto_w): 2727 { 2728 int32_t offset = Bytes::get_Java_u4(pc + 1); 2729 // Profile jump. 2730 BI_PROFILE_UPDATE_JUMP(); 2731 address branch_pc = pc; 2732 UPDATE_PC(offset); 2733 DO_BACKEDGE_CHECKS(offset, branch_pc); 2734 CONTINUE; 2735 } 2736 2737 /* return from a jsr or jsr_w */ 2738 2739 CASE(_ret): { 2740 // Profile ret. 2741 BI_PROFILE_UPDATE_RET(/*bci=*/((int)(intptr_t)(LOCALS_ADDR(pc[1])))); 2742 // Now, update the pc. 2743 pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(pc[1])); 2744 UPDATE_PC_AND_CONTINUE(0); 2745 } 2746 2747 /* debugger breakpoint */ 2748 2749 CASE(_breakpoint): { 2750 Bytecodes::Code original_bytecode; 2751 DECACHE_STATE(); 2752 SET_LAST_JAVA_FRAME(); 2753 original_bytecode = InterpreterRuntime::get_original_bytecode_at(THREAD, 2754 METHOD, pc); 2755 RESET_LAST_JAVA_FRAME(); 2756 CACHE_STATE(); 2757 if (THREAD->has_pending_exception()) goto handle_exception; 2758 CALL_VM(InterpreterRuntime::_breakpoint(THREAD, METHOD, pc), 2759 handle_exception); 2760 2761 opcode = (jubyte)original_bytecode; 2762 goto opcode_switch; 2763 } 2764 2765 DEFAULT: 2766 fatal(err_msg("Unimplemented opcode %d = %s", opcode, 2767 Bytecodes::name((Bytecodes::Code)opcode))); 2768 goto finish; 2769 2770 } /* switch(opc) */ 2771 2772 2773 #ifdef USELABELS 2774 check_for_exception: 2775 #endif 2776 { 2777 if (!THREAD->has_pending_exception()) { 2778 CONTINUE; 2779 } 2780 /* We will be gcsafe soon, so flush our state. */ 2781 DECACHE_PC(); 2782 goto handle_exception; 2783 } 2784 do_continue: ; 2785 2786 } /* while (1) interpreter loop */ 2787 2788 2789 // An exception exists in the thread state see whether this activation can handle it 2790 handle_exception: { 2791 2792 HandleMarkCleaner __hmc(THREAD); 2793 Handle except_oop(THREAD, THREAD->pending_exception()); 2794 // Prevent any subsequent HandleMarkCleaner in the VM 2795 // from freeing the except_oop handle. 2796 HandleMark __hm(THREAD); 2797 2798 THREAD->clear_pending_exception(); 2799 assert(except_oop(), "No exception to process"); 2800 intptr_t continuation_bci; 2801 // expression stack is emptied 2802 topOfStack = istate->stack_base() - Interpreter::stackElementWords; 2803 CALL_VM(continuation_bci = (intptr_t)InterpreterRuntime::exception_handler_for_exception(THREAD, except_oop()), 2804 handle_exception); 2805 2806 except_oop = THREAD->vm_result(); 2807 THREAD->set_vm_result(NULL); 2808 if (continuation_bci >= 0) { 2809 // Place exception on top of stack 2810 SET_STACK_OBJECT(except_oop(), 0); 2811 MORE_STACK(1); 2812 pc = METHOD->code_base() + continuation_bci; 2813 if (TraceExceptions) { 2814 ttyLocker ttyl; 2815 ResourceMark rm; 2816 tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), p2i(except_oop())); 2817 tty->print_cr(" thrown in interpreter method <%s>", METHOD->print_value_string()); 2818 tty->print_cr(" at bci %d, continuing at %d for thread " INTPTR_FORMAT, 2819 (int)(istate->bcp() - METHOD->code_base()), 2820 (int)continuation_bci, p2i(THREAD)); 2821 } 2822 // for AbortVMOnException flag 2823 NOT_PRODUCT(Exceptions::debug_check_abort(except_oop)); 2824 2825 // Update profiling data. 2826 BI_PROFILE_ALIGN_TO_CURRENT_BCI(); 2827 goto run; 2828 } 2829 if (TraceExceptions) { 2830 ttyLocker ttyl; 2831 ResourceMark rm; 2832 tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), p2i(except_oop())); 2833 tty->print_cr(" thrown in interpreter method <%s>", METHOD->print_value_string()); 2834 tty->print_cr(" at bci %d, unwinding for thread " INTPTR_FORMAT, 2835 (int)(istate->bcp() - METHOD->code_base()), 2836 p2i(THREAD)); 2837 } 2838 // for AbortVMOnException flag 2839 NOT_PRODUCT(Exceptions::debug_check_abort(except_oop)); 2840 // No handler in this activation, unwind and try again 2841 THREAD->set_pending_exception(except_oop(), NULL, 0); 2842 goto handle_return; 2843 } // handle_exception: 2844 2845 // Return from an interpreter invocation with the result of the interpretation 2846 // on the top of the Java Stack (or a pending exception) 2847 2848 handle_Pop_Frame: { 2849 2850 // We don't really do anything special here except we must be aware 2851 // that we can get here without ever locking the method (if sync). 2852 // Also we skip the notification of the exit. 2853 2854 istate->set_msg(popping_frame); 2855 // Clear pending so while the pop is in process 2856 // we don't start another one if a call_vm is done. 2857 THREAD->clr_pop_frame_pending(); 2858 // Let interpreter (only) see the we're in the process of popping a frame 2859 THREAD->set_pop_frame_in_process(); 2860 2861 goto handle_return; 2862 2863 } // handle_Pop_Frame 2864 2865 // ForceEarlyReturn ends a method, and returns to the caller with a return value 2866 // given by the invoker of the early return. 2867 handle_Early_Return: { 2868 2869 istate->set_msg(early_return); 2870 2871 // Clear expression stack. 2872 topOfStack = istate->stack_base() - Interpreter::stackElementWords; 2873 2874 JvmtiThreadState *ts = THREAD->jvmti_thread_state(); 2875 2876 // Push the value to be returned. 2877 switch (istate->method()->result_type()) { 2878 case T_BOOLEAN: 2879 case T_SHORT: 2880 case T_BYTE: 2881 case T_CHAR: 2882 case T_INT: 2883 SET_STACK_INT(ts->earlyret_value().i, 0); 2884 MORE_STACK(1); 2885 break; 2886 case T_LONG: 2887 SET_STACK_LONG(ts->earlyret_value().j, 1); 2888 MORE_STACK(2); 2889 break; 2890 case T_FLOAT: 2891 SET_STACK_FLOAT(ts->earlyret_value().f, 0); 2892 MORE_STACK(1); 2893 break; 2894 case T_DOUBLE: 2895 SET_STACK_DOUBLE(ts->earlyret_value().d, 1); 2896 MORE_STACK(2); 2897 break; 2898 case T_ARRAY: 2899 case T_OBJECT: 2900 SET_STACK_OBJECT(ts->earlyret_oop(), 0); 2901 MORE_STACK(1); 2902 break; 2903 } 2904 2905 ts->clr_earlyret_value(); 2906 ts->set_earlyret_oop(NULL); 2907 ts->clr_earlyret_pending(); 2908 2909 // Fall through to handle_return. 2910 2911 } // handle_Early_Return 2912 2913 handle_return: { 2914 // A storestore barrier is required to order initialization of 2915 // final fields with publishing the reference to the object that 2916 // holds the field. Without the barrier the value of final fields 2917 // can be observed to change. 2918 OrderAccess::storestore(); 2919 2920 DECACHE_STATE(); 2921 2922 bool suppress_error = istate->msg() == popping_frame || istate->msg() == early_return; 2923 bool suppress_exit_event = THREAD->has_pending_exception() || istate->msg() == popping_frame; 2924 Handle original_exception(THREAD, THREAD->pending_exception()); 2925 Handle illegal_state_oop(THREAD, NULL); 2926 2927 // We'd like a HandleMark here to prevent any subsequent HandleMarkCleaner 2928 // in any following VM entries from freeing our live handles, but illegal_state_oop 2929 // isn't really allocated yet and so doesn't become live until later and 2930 // in unpredicatable places. Instead we must protect the places where we enter the 2931 // VM. It would be much simpler (and safer) if we could allocate a real handle with 2932 // a NULL oop in it and then overwrite the oop later as needed. This isn't 2933 // unfortunately isn't possible. 2934 2935 THREAD->clear_pending_exception(); 2936 2937 // 2938 // As far as we are concerned we have returned. If we have a pending exception 2939 // that will be returned as this invocation's result. However if we get any 2940 // exception(s) while checking monitor state one of those IllegalMonitorStateExceptions 2941 // will be our final result (i.e. monitor exception trumps a pending exception). 2942 // 2943 2944 // If we never locked the method (or really passed the point where we would have), 2945 // there is no need to unlock it (or look for other monitors), since that 2946 // could not have happened. 2947 2948 if (THREAD->do_not_unlock()) { 2949 2950 // Never locked, reset the flag now because obviously any caller must 2951 // have passed their point of locking for us to have gotten here. 2952 2953 THREAD->clr_do_not_unlock(); 2954 } else { 2955 // At this point we consider that we have returned. We now check that the 2956 // locks were properly block structured. If we find that they were not 2957 // used properly we will return with an illegal monitor exception. 2958 // The exception is checked by the caller not the callee since this 2959 // checking is considered to be part of the invocation and therefore 2960 // in the callers scope (JVM spec 8.13). 2961 // 2962 // Another weird thing to watch for is if the method was locked 2963 // recursively and then not exited properly. This means we must 2964 // examine all the entries in reverse time(and stack) order and 2965 // unlock as we find them. If we find the method monitor before 2966 // we are at the initial entry then we should throw an exception. 2967 // It is not clear the template based interpreter does this 2968 // correctly 2969 2970 BasicObjectLock* base = istate->monitor_base(); 2971 BasicObjectLock* end = (BasicObjectLock*) istate->stack_base(); 2972 bool method_unlock_needed = METHOD->is_synchronized(); 2973 // We know the initial monitor was used for the method don't check that 2974 // slot in the loop 2975 if (method_unlock_needed) base--; 2976 2977 // Check all the monitors to see they are unlocked. Install exception if found to be locked. 2978 while (end < base) { 2979 oop lockee = end->obj(); 2980 if (lockee != NULL) { 2981 BasicLock* lock = end->lock(); 2982 markOop header = lock->displaced_header(); 2983 end->set_obj(NULL); 2984 2985 if (!lockee->mark()->has_bias_pattern()) { 2986 // If it isn't recursive we either must swap old header or call the runtime 2987 if (header != NULL) { 2988 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) { 2989 // restore object for the slow case 2990 end->set_obj(lockee); 2991 { 2992 // Prevent any HandleMarkCleaner from freeing our live handles 2993 HandleMark __hm(THREAD); 2994 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, end)); 2995 } 2996 } 2997 } 2998 } 2999 // One error is plenty 3000 if (illegal_state_oop() == NULL && !suppress_error) { 3001 { 3002 // Prevent any HandleMarkCleaner from freeing our live handles 3003 HandleMark __hm(THREAD); 3004 CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD)); 3005 } 3006 assert(THREAD->has_pending_exception(), "Lost our exception!"); 3007 illegal_state_oop = THREAD->pending_exception(); 3008 THREAD->clear_pending_exception(); 3009 } 3010 } 3011 end++; 3012 } 3013 // Unlock the method if needed 3014 if (method_unlock_needed) { 3015 if (base->obj() == NULL) { 3016 // The method is already unlocked this is not good. 3017 if (illegal_state_oop() == NULL && !suppress_error) { 3018 { 3019 // Prevent any HandleMarkCleaner from freeing our live handles 3020 HandleMark __hm(THREAD); 3021 CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD)); 3022 } 3023 assert(THREAD->has_pending_exception(), "Lost our exception!"); 3024 illegal_state_oop = THREAD->pending_exception(); 3025 THREAD->clear_pending_exception(); 3026 } 3027 } else { 3028 // 3029 // The initial monitor is always used for the method 3030 // However if that slot is no longer the oop for the method it was unlocked 3031 // and reused by something that wasn't unlocked! 3032 // 3033 // deopt can come in with rcvr dead because c2 knows 3034 // its value is preserved in the monitor. So we can't use locals[0] at all 3035 // and must use first monitor slot. 3036 // 3037 oop rcvr = base->obj(); 3038 if (rcvr == NULL) { 3039 if (!suppress_error) { 3040 VM_JAVA_ERROR_NO_JUMP(vmSymbols::java_lang_NullPointerException(), "", note_nullCheck_trap); 3041 illegal_state_oop = THREAD->pending_exception(); 3042 THREAD->clear_pending_exception(); 3043 } 3044 } else if (UseHeavyMonitors) { 3045 { 3046 // Prevent any HandleMarkCleaner from freeing our live handles. 3047 HandleMark __hm(THREAD); 3048 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base)); 3049 } 3050 if (THREAD->has_pending_exception()) { 3051 if (!suppress_error) illegal_state_oop = THREAD->pending_exception(); 3052 THREAD->clear_pending_exception(); 3053 } 3054 } else { 3055 BasicLock* lock = base->lock(); 3056 markOop header = lock->displaced_header(); 3057 base->set_obj(NULL); 3058 3059 if (!rcvr->mark()->has_bias_pattern()) { 3060 base->set_obj(NULL); 3061 // If it isn't recursive we either must swap old header or call the runtime 3062 if (header != NULL) { 3063 if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), lock) != lock) { 3064 // restore object for the slow case 3065 base->set_obj(rcvr); 3066 { 3067 // Prevent any HandleMarkCleaner from freeing our live handles 3068 HandleMark __hm(THREAD); 3069 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base)); 3070 } 3071 if (THREAD->has_pending_exception()) { 3072 if (!suppress_error) illegal_state_oop = THREAD->pending_exception(); 3073 THREAD->clear_pending_exception(); 3074 } 3075 } 3076 } 3077 } 3078 } 3079 } 3080 } 3081 } 3082 // Clear the do_not_unlock flag now. 3083 THREAD->clr_do_not_unlock(); 3084 3085 // 3086 // Notify jvmti/jvmdi 3087 // 3088 // NOTE: we do not notify a method_exit if we have a pending exception, 3089 // including an exception we generate for unlocking checks. In the former 3090 // case, JVMDI has already been notified by our call for the exception handler 3091 // and in both cases as far as JVMDI is concerned we have already returned. 3092 // If we notify it again JVMDI will be all confused about how many frames 3093 // are still on the stack (4340444). 3094 // 3095 // NOTE Further! It turns out the the JVMTI spec in fact expects to see 3096 // method_exit events whenever we leave an activation unless it was done 3097 // for popframe. This is nothing like jvmdi. However we are passing the 3098 // tests at the moment (apparently because they are jvmdi based) so rather 3099 // than change this code and possibly fail tests we will leave it alone 3100 // (with this note) in anticipation of changing the vm and the tests 3101 // simultaneously. 3102 3103 3104 // 3105 suppress_exit_event = suppress_exit_event || illegal_state_oop() != NULL; 3106 3107 3108 3109 #ifdef VM_JVMTI 3110 if (_jvmti_interp_events) { 3111 // Whenever JVMTI puts a thread in interp_only_mode, method 3112 // entry/exit events are sent for that thread to track stack depth. 3113 if ( !suppress_exit_event && THREAD->is_interp_only_mode() ) { 3114 { 3115 // Prevent any HandleMarkCleaner from freeing our live handles 3116 HandleMark __hm(THREAD); 3117 CALL_VM_NOCHECK(InterpreterRuntime::post_method_exit(THREAD)); 3118 } 3119 } 3120 } 3121 #endif /* VM_JVMTI */ 3122 3123 // 3124 // See if we are returning any exception 3125 // A pending exception that was pending prior to a possible popping frame 3126 // overrides the popping frame. 3127 // 3128 assert(!suppress_error || (suppress_error && illegal_state_oop() == NULL), "Error was not suppressed"); 3129 if (illegal_state_oop() != NULL || original_exception() != NULL) { 3130 // Inform the frame manager we have no result. 3131 istate->set_msg(throwing_exception); 3132 if (illegal_state_oop() != NULL) 3133 THREAD->set_pending_exception(illegal_state_oop(), NULL, 0); 3134 else 3135 THREAD->set_pending_exception(original_exception(), NULL, 0); 3136 UPDATE_PC_AND_RETURN(0); 3137 } 3138 3139 if (istate->msg() == popping_frame) { 3140 // Make it simpler on the assembly code and set the message for the frame pop. 3141 // returns 3142 if (istate->prev() == NULL) { 3143 // We must be returning to a deoptimized frame (because popframe only happens between 3144 // two interpreted frames). We need to save the current arguments in C heap so that 3145 // the deoptimized frame when it restarts can copy the arguments to its expression 3146 // stack and re-execute the call. We also have to notify deoptimization that this 3147 // has occurred and to pick the preserved args copy them to the deoptimized frame's 3148 // java expression stack. Yuck. 3149 // 3150 THREAD->popframe_preserve_args(in_ByteSize(METHOD->size_of_parameters() * wordSize), 3151 LOCALS_SLOT(METHOD->size_of_parameters() - 1)); 3152 THREAD->set_popframe_condition_bit(JavaThread::popframe_force_deopt_reexecution_bit); 3153 } 3154 } else { 3155 istate->set_msg(return_from_method); 3156 } 3157 3158 // Normal return 3159 // Advance the pc and return to frame manager 3160 UPDATE_PC_AND_RETURN(1); 3161 } /* handle_return: */ 3162 3163 // This is really a fatal error return 3164 3165 finish: 3166 DECACHE_TOS(); 3167 DECACHE_PC(); 3168 3169 return; 3170 } 3171 3172 /* 3173 * All the code following this point is only produced once and is not present 3174 * in the JVMTI version of the interpreter 3175 */ 3176 3177 #ifndef VM_JVMTI 3178 3179 // This constructor should only be used to contruct the object to signal 3180 // interpreter initialization. All other instances should be created by 3181 // the frame manager. 3182 BytecodeInterpreter::BytecodeInterpreter(messages msg) { 3183 if (msg != initialize) ShouldNotReachHere(); 3184 _msg = msg; 3185 _self_link = this; 3186 _prev_link = NULL; 3187 } 3188 3189 // Inline static functions for Java Stack and Local manipulation 3190 3191 // The implementations are platform dependent. We have to worry about alignment 3192 // issues on some machines which can change on the same platform depending on 3193 // whether it is an LP64 machine also. 3194 address BytecodeInterpreter::stack_slot(intptr_t *tos, int offset) { 3195 return (address) tos[Interpreter::expr_index_at(-offset)]; 3196 } 3197 3198 jint BytecodeInterpreter::stack_int(intptr_t *tos, int offset) { 3199 return *((jint*) &tos[Interpreter::expr_index_at(-offset)]); 3200 } 3201 3202 jfloat BytecodeInterpreter::stack_float(intptr_t *tos, int offset) { 3203 return *((jfloat *) &tos[Interpreter::expr_index_at(-offset)]); 3204 } 3205 3206 oop BytecodeInterpreter::stack_object(intptr_t *tos, int offset) { 3207 return cast_to_oop(tos [Interpreter::expr_index_at(-offset)]); 3208 } 3209 3210 jdouble BytecodeInterpreter::stack_double(intptr_t *tos, int offset) { 3211 return ((VMJavaVal64*) &tos[Interpreter::expr_index_at(-offset)])->d; 3212 } 3213 3214 jlong BytecodeInterpreter::stack_long(intptr_t *tos, int offset) { 3215 return ((VMJavaVal64 *) &tos[Interpreter::expr_index_at(-offset)])->l; 3216 } 3217 3218 // only used for value types 3219 void BytecodeInterpreter::set_stack_slot(intptr_t *tos, address value, 3220 int offset) { 3221 *((address *)&tos[Interpreter::expr_index_at(-offset)]) = value; 3222 } 3223 3224 void BytecodeInterpreter::set_stack_int(intptr_t *tos, int value, 3225 int offset) { 3226 *((jint *)&tos[Interpreter::expr_index_at(-offset)]) = value; 3227 } 3228 3229 void BytecodeInterpreter::set_stack_float(intptr_t *tos, jfloat value, 3230 int offset) { 3231 *((jfloat *)&tos[Interpreter::expr_index_at(-offset)]) = value; 3232 } 3233 3234 void BytecodeInterpreter::set_stack_object(intptr_t *tos, oop value, 3235 int offset) { 3236 *((oop *)&tos[Interpreter::expr_index_at(-offset)]) = value; 3237 } 3238 3239 // needs to be platform dep for the 32 bit platforms. 3240 void BytecodeInterpreter::set_stack_double(intptr_t *tos, jdouble value, 3241 int offset) { 3242 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->d = value; 3243 } 3244 3245 void BytecodeInterpreter::set_stack_double_from_addr(intptr_t *tos, 3246 address addr, int offset) { 3247 (((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->d = 3248 ((VMJavaVal64*)addr)->d); 3249 } 3250 3251 void BytecodeInterpreter::set_stack_long(intptr_t *tos, jlong value, 3252 int offset) { 3253 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset+1)])->l = 0xdeedbeeb; 3254 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->l = value; 3255 } 3256 3257 void BytecodeInterpreter::set_stack_long_from_addr(intptr_t *tos, 3258 address addr, int offset) { 3259 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset+1)])->l = 0xdeedbeeb; 3260 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->l = 3261 ((VMJavaVal64*)addr)->l; 3262 } 3263 3264 // Locals 3265 3266 address BytecodeInterpreter::locals_slot(intptr_t* locals, int offset) { 3267 return (address)locals[Interpreter::local_index_at(-offset)]; 3268 } 3269 jint BytecodeInterpreter::locals_int(intptr_t* locals, int offset) { 3270 return (jint)locals[Interpreter::local_index_at(-offset)]; 3271 } 3272 jfloat BytecodeInterpreter::locals_float(intptr_t* locals, int offset) { 3273 return (jfloat)locals[Interpreter::local_index_at(-offset)]; 3274 } 3275 oop BytecodeInterpreter::locals_object(intptr_t* locals, int offset) { 3276 return cast_to_oop(locals[Interpreter::local_index_at(-offset)]); 3277 } 3278 jdouble BytecodeInterpreter::locals_double(intptr_t* locals, int offset) { 3279 return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d; 3280 } 3281 jlong BytecodeInterpreter::locals_long(intptr_t* locals, int offset) { 3282 return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l; 3283 } 3284 3285 // Returns the address of locals value. 3286 address BytecodeInterpreter::locals_long_at(intptr_t* locals, int offset) { 3287 return ((address)&locals[Interpreter::local_index_at(-(offset+1))]); 3288 } 3289 address BytecodeInterpreter::locals_double_at(intptr_t* locals, int offset) { 3290 return ((address)&locals[Interpreter::local_index_at(-(offset+1))]); 3291 } 3292 3293 // Used for local value or returnAddress 3294 void BytecodeInterpreter::set_locals_slot(intptr_t *locals, 3295 address value, int offset) { 3296 *((address*)&locals[Interpreter::local_index_at(-offset)]) = value; 3297 } 3298 void BytecodeInterpreter::set_locals_int(intptr_t *locals, 3299 jint value, int offset) { 3300 *((jint *)&locals[Interpreter::local_index_at(-offset)]) = value; 3301 } 3302 void BytecodeInterpreter::set_locals_float(intptr_t *locals, 3303 jfloat value, int offset) { 3304 *((jfloat *)&locals[Interpreter::local_index_at(-offset)]) = value; 3305 } 3306 void BytecodeInterpreter::set_locals_object(intptr_t *locals, 3307 oop value, int offset) { 3308 *((oop *)&locals[Interpreter::local_index_at(-offset)]) = value; 3309 } 3310 void BytecodeInterpreter::set_locals_double(intptr_t *locals, 3311 jdouble value, int offset) { 3312 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d = value; 3313 } 3314 void BytecodeInterpreter::set_locals_long(intptr_t *locals, 3315 jlong value, int offset) { 3316 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l = value; 3317 } 3318 void BytecodeInterpreter::set_locals_double_from_addr(intptr_t *locals, 3319 address addr, int offset) { 3320 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d = ((VMJavaVal64*)addr)->d; 3321 } 3322 void BytecodeInterpreter::set_locals_long_from_addr(intptr_t *locals, 3323 address addr, int offset) { 3324 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l = ((VMJavaVal64*)addr)->l; 3325 } 3326 3327 void BytecodeInterpreter::astore(intptr_t* tos, int stack_offset, 3328 intptr_t* locals, int locals_offset) { 3329 intptr_t value = tos[Interpreter::expr_index_at(-stack_offset)]; 3330 locals[Interpreter::local_index_at(-locals_offset)] = value; 3331 } 3332 3333 3334 void BytecodeInterpreter::copy_stack_slot(intptr_t *tos, int from_offset, 3335 int to_offset) { 3336 tos[Interpreter::expr_index_at(-to_offset)] = 3337 (intptr_t)tos[Interpreter::expr_index_at(-from_offset)]; 3338 } 3339 3340 void BytecodeInterpreter::dup(intptr_t *tos) { 3341 copy_stack_slot(tos, -1, 0); 3342 } 3343 void BytecodeInterpreter::dup2(intptr_t *tos) { 3344 copy_stack_slot(tos, -2, 0); 3345 copy_stack_slot(tos, -1, 1); 3346 } 3347 3348 void BytecodeInterpreter::dup_x1(intptr_t *tos) { 3349 /* insert top word two down */ 3350 copy_stack_slot(tos, -1, 0); 3351 copy_stack_slot(tos, -2, -1); 3352 copy_stack_slot(tos, 0, -2); 3353 } 3354 3355 void BytecodeInterpreter::dup_x2(intptr_t *tos) { 3356 /* insert top word three down */ 3357 copy_stack_slot(tos, -1, 0); 3358 copy_stack_slot(tos, -2, -1); 3359 copy_stack_slot(tos, -3, -2); 3360 copy_stack_slot(tos, 0, -3); 3361 } 3362 void BytecodeInterpreter::dup2_x1(intptr_t *tos) { 3363 /* insert top 2 slots three down */ 3364 copy_stack_slot(tos, -1, 1); 3365 copy_stack_slot(tos, -2, 0); 3366 copy_stack_slot(tos, -3, -1); 3367 copy_stack_slot(tos, 1, -2); 3368 copy_stack_slot(tos, 0, -3); 3369 } 3370 void BytecodeInterpreter::dup2_x2(intptr_t *tos) { 3371 /* insert top 2 slots four down */ 3372 copy_stack_slot(tos, -1, 1); 3373 copy_stack_slot(tos, -2, 0); 3374 copy_stack_slot(tos, -3, -1); 3375 copy_stack_slot(tos, -4, -2); 3376 copy_stack_slot(tos, 1, -3); 3377 copy_stack_slot(tos, 0, -4); 3378 } 3379 3380 3381 void BytecodeInterpreter::swap(intptr_t *tos) { 3382 // swap top two elements 3383 intptr_t val = tos[Interpreter::expr_index_at(1)]; 3384 // Copy -2 entry to -1 3385 copy_stack_slot(tos, -2, -1); 3386 // Store saved -1 entry into -2 3387 tos[Interpreter::expr_index_at(2)] = val; 3388 } 3389 // -------------------------------------------------------------------------------- 3390 // Non-product code 3391 #ifndef PRODUCT 3392 3393 const char* BytecodeInterpreter::C_msg(BytecodeInterpreter::messages msg) { 3394 switch (msg) { 3395 case BytecodeInterpreter::no_request: return("no_request"); 3396 case BytecodeInterpreter::initialize: return("initialize"); 3397 // status message to C++ interpreter 3398 case BytecodeInterpreter::method_entry: return("method_entry"); 3399 case BytecodeInterpreter::method_resume: return("method_resume"); 3400 case BytecodeInterpreter::got_monitors: return("got_monitors"); 3401 case BytecodeInterpreter::rethrow_exception: return("rethrow_exception"); 3402 // requests to frame manager from C++ interpreter 3403 case BytecodeInterpreter::call_method: return("call_method"); 3404 case BytecodeInterpreter::return_from_method: return("return_from_method"); 3405 case BytecodeInterpreter::more_monitors: return("more_monitors"); 3406 case BytecodeInterpreter::throwing_exception: return("throwing_exception"); 3407 case BytecodeInterpreter::popping_frame: return("popping_frame"); 3408 case BytecodeInterpreter::do_osr: return("do_osr"); 3409 // deopt 3410 case BytecodeInterpreter::deopt_resume: return("deopt_resume"); 3411 case BytecodeInterpreter::deopt_resume2: return("deopt_resume2"); 3412 default: return("BAD MSG"); 3413 } 3414 } 3415 void 3416 BytecodeInterpreter::print() { 3417 tty->print_cr("thread: " INTPTR_FORMAT, (uintptr_t) this->_thread); 3418 tty->print_cr("bcp: " INTPTR_FORMAT, (uintptr_t) this->_bcp); 3419 tty->print_cr("locals: " INTPTR_FORMAT, (uintptr_t) this->_locals); 3420 tty->print_cr("constants: " INTPTR_FORMAT, (uintptr_t) this->_constants); 3421 { 3422 ResourceMark rm; 3423 char *method_name = _method->name_and_sig_as_C_string(); 3424 tty->print_cr("method: " INTPTR_FORMAT "[ %s ]", (uintptr_t) this->_method, method_name); 3425 } 3426 tty->print_cr("mdx: " INTPTR_FORMAT, (uintptr_t) this->_mdx); 3427 tty->print_cr("stack: " INTPTR_FORMAT, (uintptr_t) this->_stack); 3428 tty->print_cr("msg: %s", C_msg(this->_msg)); 3429 tty->print_cr("result_to_call._callee: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee); 3430 tty->print_cr("result_to_call._callee_entry_point: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee_entry_point); 3431 tty->print_cr("result_to_call._bcp_advance: %d ", this->_result._to_call._bcp_advance); 3432 tty->print_cr("osr._osr_buf: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_buf); 3433 tty->print_cr("osr._osr_entry: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_entry); 3434 tty->print_cr("prev_link: " INTPTR_FORMAT, (uintptr_t) this->_prev_link); 3435 tty->print_cr("native_mirror: " INTPTR_FORMAT, (uintptr_t) this->_oop_temp); 3436 tty->print_cr("stack_base: " INTPTR_FORMAT, (uintptr_t) this->_stack_base); 3437 tty->print_cr("stack_limit: " INTPTR_FORMAT, (uintptr_t) this->_stack_limit); 3438 tty->print_cr("monitor_base: " INTPTR_FORMAT, (uintptr_t) this->_monitor_base); 3439 #ifdef SPARC 3440 tty->print_cr("last_Java_pc: " INTPTR_FORMAT, (uintptr_t) this->_last_Java_pc); 3441 tty->print_cr("frame_bottom: " INTPTR_FORMAT, (uintptr_t) this->_frame_bottom); 3442 tty->print_cr("&native_fresult: " INTPTR_FORMAT, (uintptr_t) &this->_native_fresult); 3443 tty->print_cr("native_lresult: " INTPTR_FORMAT, (uintptr_t) this->_native_lresult); 3444 #endif 3445 #if !defined(ZERO) 3446 tty->print_cr("last_Java_fp: " INTPTR_FORMAT, (uintptr_t) this->_last_Java_fp); 3447 #endif // !ZERO 3448 tty->print_cr("self_link: " INTPTR_FORMAT, (uintptr_t) this->_self_link); 3449 } 3450 3451 extern "C" { 3452 void PI(uintptr_t arg) { 3453 ((BytecodeInterpreter*)arg)->print(); 3454 } 3455 } 3456 #endif // PRODUCT 3457 3458 #endif // JVMTI 3459 #endif // CC_INTERP