1 /* 2 * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // no precompiled headers 26 #include "classfile/vmSymbols.hpp" 27 #include "gc_interface/collectedHeap.hpp" 28 #include "interpreter/bytecodeHistogram.hpp" 29 #include "interpreter/bytecodeInterpreter.hpp" 30 #include "interpreter/bytecodeInterpreter.inline.hpp" 31 #include "interpreter/interpreter.hpp" 32 #include "interpreter/interpreterRuntime.hpp" 33 #include "memory/cardTableModRefBS.hpp" 34 #include "memory/resourceArea.hpp" 35 #include "oops/objArrayKlass.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "prims/jvmtiExport.hpp" 38 #include "runtime/frame.inline.hpp" 39 #include "runtime/handles.inline.hpp" 40 #include "runtime/interfaceSupport.hpp" 41 #include "runtime/sharedRuntime.hpp" 42 #include "runtime/threadCritical.hpp" 43 #include "utilities/exceptions.hpp" 44 #ifdef TARGET_OS_ARCH_linux_x86 45 # include "orderAccess_linux_x86.inline.hpp" 46 #endif 47 #ifdef TARGET_OS_ARCH_linux_sparc 48 # include "orderAccess_linux_sparc.inline.hpp" 49 #endif 50 #ifdef TARGET_OS_ARCH_linux_zero 51 # include "orderAccess_linux_zero.inline.hpp" 52 #endif 53 #ifdef TARGET_OS_ARCH_solaris_x86 54 # include "orderAccess_solaris_x86.inline.hpp" 55 #endif 56 #ifdef TARGET_OS_ARCH_solaris_sparc 57 # include "orderAccess_solaris_sparc.inline.hpp" 58 #endif 59 #ifdef TARGET_OS_ARCH_windows_x86 60 # include "orderAccess_windows_x86.inline.hpp" 61 #endif 62 #ifdef TARGET_OS_ARCH_linux_arm 63 # include "orderAccess_linux_arm.inline.hpp" 64 #endif 65 #ifdef TARGET_OS_ARCH_linux_ppc 66 # include "orderAccess_linux_ppc.inline.hpp" 67 #endif 68 #ifdef TARGET_OS_ARCH_bsd_x86 69 # include "orderAccess_bsd_x86.inline.hpp" 70 #endif 71 #ifdef TARGET_OS_ARCH_bsd_zero 72 # include "orderAccess_bsd_zero.inline.hpp" 73 #endif 74 75 76 // no precompiled headers 77 #ifdef CC_INTERP 78 79 /* 80 * USELABELS - If using GCC, then use labels for the opcode dispatching 81 * rather -then a switch statement. This improves performance because it 82 * gives us the oportunity to have the instructions that calculate the 83 * next opcode to jump to be intermixed with the rest of the instructions 84 * that implement the opcode (see UPDATE_PC_AND_TOS_AND_CONTINUE macro). 85 */ 86 #undef USELABELS 87 #ifdef __GNUC__ 88 /* 89 ASSERT signifies debugging. It is much easier to step thru bytecodes if we 90 don't use the computed goto approach. 91 */ 92 #ifndef ASSERT 93 #define USELABELS 94 #endif 95 #endif 96 97 #undef CASE 98 #ifdef USELABELS 99 #define CASE(opcode) opc ## opcode 100 #define DEFAULT opc_default 101 #else 102 #define CASE(opcode) case Bytecodes:: opcode 103 #define DEFAULT default 104 #endif 105 106 /* 107 * PREFETCH_OPCCODE - Some compilers do better if you prefetch the next 108 * opcode before going back to the top of the while loop, rather then having 109 * the top of the while loop handle it. This provides a better opportunity 110 * for instruction scheduling. Some compilers just do this prefetch 111 * automatically. Some actually end up with worse performance if you 112 * force the prefetch. Solaris gcc seems to do better, but cc does worse. 113 */ 114 #undef PREFETCH_OPCCODE 115 #define PREFETCH_OPCCODE 116 117 /* 118 Interpreter safepoint: it is expected that the interpreter will have no live 119 handles of its own creation live at an interpreter safepoint. Therefore we 120 run a HandleMarkCleaner and trash all handles allocated in the call chain 121 since the JavaCalls::call_helper invocation that initiated the chain. 122 There really shouldn't be any handles remaining to trash but this is cheap 123 in relation to a safepoint. 124 */ 125 #define SAFEPOINT \ 126 if ( SafepointSynchronize::is_synchronizing()) { \ 127 { \ 128 /* zap freed handles rather than GC'ing them */ \ 129 HandleMarkCleaner __hmc(THREAD); \ 130 } \ 131 CALL_VM(SafepointSynchronize::block(THREAD), handle_exception); \ 132 } 133 134 /* 135 * VM_JAVA_ERROR - Macro for throwing a java exception from 136 * the interpreter loop. Should really be a CALL_VM but there 137 * is no entry point to do the transition to vm so we just 138 * do it by hand here. 139 */ 140 #define VM_JAVA_ERROR_NO_JUMP(name, msg) \ 141 DECACHE_STATE(); \ 142 SET_LAST_JAVA_FRAME(); \ 143 { \ 144 ThreadInVMfromJava trans(THREAD); \ 145 Exceptions::_throw_msg(THREAD, __FILE__, __LINE__, name, msg); \ 146 } \ 147 RESET_LAST_JAVA_FRAME(); \ 148 CACHE_STATE(); 149 150 // Normal throw of a java error 151 #define VM_JAVA_ERROR(name, msg) \ 152 VM_JAVA_ERROR_NO_JUMP(name, msg) \ 153 goto handle_exception; 154 155 #ifdef PRODUCT 156 #define DO_UPDATE_INSTRUCTION_COUNT(opcode) 157 #else 158 #define DO_UPDATE_INSTRUCTION_COUNT(opcode) \ 159 { \ 160 BytecodeCounter::_counter_value++; \ 161 BytecodeHistogram::_counters[(Bytecodes::Code)opcode]++; \ 162 if (StopInterpreterAt && StopInterpreterAt == BytecodeCounter::_counter_value) os::breakpoint(); \ 163 if (TraceBytecodes) { \ 164 CALL_VM((void)SharedRuntime::trace_bytecode(THREAD, 0, \ 165 topOfStack[Interpreter::expr_index_at(1)], \ 166 topOfStack[Interpreter::expr_index_at(2)]), \ 167 handle_exception); \ 168 } \ 169 } 170 #endif 171 172 #undef DEBUGGER_SINGLE_STEP_NOTIFY 173 #ifdef VM_JVMTI 174 /* NOTE: (kbr) This macro must be called AFTER the PC has been 175 incremented. JvmtiExport::at_single_stepping_point() may cause a 176 breakpoint opcode to get inserted at the current PC to allow the 177 debugger to coalesce single-step events. 178 179 As a result if we call at_single_stepping_point() we refetch opcode 180 to get the current opcode. This will override any other prefetching 181 that might have occurred. 182 */ 183 #define DEBUGGER_SINGLE_STEP_NOTIFY() \ 184 { \ 185 if (_jvmti_interp_events) { \ 186 if (JvmtiExport::should_post_single_step()) { \ 187 DECACHE_STATE(); \ 188 SET_LAST_JAVA_FRAME(); \ 189 ThreadInVMfromJava trans(THREAD); \ 190 JvmtiExport::at_single_stepping_point(THREAD, \ 191 istate->method(), \ 192 pc); \ 193 RESET_LAST_JAVA_FRAME(); \ 194 CACHE_STATE(); \ 195 if (THREAD->pop_frame_pending() && \ 196 !THREAD->pop_frame_in_process()) { \ 197 goto handle_Pop_Frame; \ 198 } \ 199 opcode = *pc; \ 200 } \ 201 } \ 202 } 203 #else 204 #define DEBUGGER_SINGLE_STEP_NOTIFY() 205 #endif 206 207 /* 208 * CONTINUE - Macro for executing the next opcode. 209 */ 210 #undef CONTINUE 211 #ifdef USELABELS 212 // Have to do this dispatch this way in C++ because otherwise gcc complains about crossing an 213 // initialization (which is is the initialization of the table pointer...) 214 #define DISPATCH(opcode) goto *(void*)dispatch_table[opcode] 215 #define CONTINUE { \ 216 opcode = *pc; \ 217 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 218 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 219 DISPATCH(opcode); \ 220 } 221 #else 222 #ifdef PREFETCH_OPCCODE 223 #define CONTINUE { \ 224 opcode = *pc; \ 225 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 226 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 227 continue; \ 228 } 229 #else 230 #define CONTINUE { \ 231 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 232 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 233 continue; \ 234 } 235 #endif 236 #endif 237 238 239 #define UPDATE_PC(opsize) {pc += opsize; } 240 /* 241 * UPDATE_PC_AND_TOS - Macro for updating the pc and topOfStack. 242 */ 243 #undef UPDATE_PC_AND_TOS 244 #define UPDATE_PC_AND_TOS(opsize, stack) \ 245 {pc += opsize; MORE_STACK(stack); } 246 247 /* 248 * UPDATE_PC_AND_TOS_AND_CONTINUE - Macro for updating the pc and topOfStack, 249 * and executing the next opcode. It's somewhat similar to the combination 250 * of UPDATE_PC_AND_TOS and CONTINUE, but with some minor optimizations. 251 */ 252 #undef UPDATE_PC_AND_TOS_AND_CONTINUE 253 #ifdef USELABELS 254 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \ 255 pc += opsize; opcode = *pc; MORE_STACK(stack); \ 256 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 257 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 258 DISPATCH(opcode); \ 259 } 260 261 #define UPDATE_PC_AND_CONTINUE(opsize) { \ 262 pc += opsize; opcode = *pc; \ 263 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 264 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 265 DISPATCH(opcode); \ 266 } 267 #else 268 #ifdef PREFETCH_OPCCODE 269 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \ 270 pc += opsize; opcode = *pc; MORE_STACK(stack); \ 271 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 272 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 273 goto do_continue; \ 274 } 275 276 #define UPDATE_PC_AND_CONTINUE(opsize) { \ 277 pc += opsize; opcode = *pc; \ 278 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 279 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 280 goto do_continue; \ 281 } 282 #else 283 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \ 284 pc += opsize; MORE_STACK(stack); \ 285 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 286 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 287 goto do_continue; \ 288 } 289 290 #define UPDATE_PC_AND_CONTINUE(opsize) { \ 291 pc += opsize; \ 292 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 293 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 294 goto do_continue; \ 295 } 296 #endif /* PREFETCH_OPCCODE */ 297 #endif /* USELABELS */ 298 299 // About to call a new method, update the save the adjusted pc and return to frame manager 300 #define UPDATE_PC_AND_RETURN(opsize) \ 301 DECACHE_TOS(); \ 302 istate->set_bcp(pc+opsize); \ 303 return; 304 305 306 #define METHOD istate->method() 307 #define INVOCATION_COUNT METHOD->invocation_counter() 308 #define BACKEDGE_COUNT METHOD->backedge_counter() 309 310 311 #define INCR_INVOCATION_COUNT INVOCATION_COUNT->increment() 312 #define OSR_REQUEST(res, branch_pc) \ 313 CALL_VM(res=InterpreterRuntime::frequency_counter_overflow(THREAD, branch_pc), handle_exception); 314 /* 315 * For those opcodes that need to have a GC point on a backwards branch 316 */ 317 318 // Backedge counting is kind of strange. The asm interpreter will increment 319 // the backedge counter as a separate counter but it does it's comparisons 320 // to the sum (scaled) of invocation counter and backedge count to make 321 // a decision. Seems kind of odd to sum them together like that 322 323 // skip is delta from current bcp/bci for target, branch_pc is pre-branch bcp 324 325 326 #define DO_BACKEDGE_CHECKS(skip, branch_pc) \ 327 if ((skip) <= 0) { \ 328 if (UseLoopCounter) { \ 329 bool do_OSR = UseOnStackReplacement; \ 330 BACKEDGE_COUNT->increment(); \ 331 if (do_OSR) do_OSR = BACKEDGE_COUNT->reached_InvocationLimit(); \ 332 if (do_OSR) { \ 333 nmethod* osr_nmethod; \ 334 OSR_REQUEST(osr_nmethod, branch_pc); \ 335 if (osr_nmethod != NULL && osr_nmethod->osr_entry_bci() != InvalidOSREntryBci) { \ 336 intptr_t* buf = SharedRuntime::OSR_migration_begin(THREAD); \ 337 istate->set_msg(do_osr); \ 338 istate->set_osr_buf((address)buf); \ 339 istate->set_osr_entry(osr_nmethod->osr_entry()); \ 340 return; \ 341 } \ 342 } \ 343 } /* UseCompiler ... */ \ 344 INCR_INVOCATION_COUNT; \ 345 SAFEPOINT; \ 346 } 347 348 /* 349 * For those opcodes that need to have a GC point on a backwards branch 350 */ 351 352 /* 353 * Macros for caching and flushing the interpreter state. Some local 354 * variables need to be flushed out to the frame before we do certain 355 * things (like pushing frames or becomming gc safe) and some need to 356 * be recached later (like after popping a frame). We could use one 357 * macro to cache or decache everything, but this would be less then 358 * optimal because we don't always need to cache or decache everything 359 * because some things we know are already cached or decached. 360 */ 361 #undef DECACHE_TOS 362 #undef CACHE_TOS 363 #undef CACHE_PREV_TOS 364 #define DECACHE_TOS() istate->set_stack(topOfStack); 365 366 #define CACHE_TOS() topOfStack = (intptr_t *)istate->stack(); 367 368 #undef DECACHE_PC 369 #undef CACHE_PC 370 #define DECACHE_PC() istate->set_bcp(pc); 371 #define CACHE_PC() pc = istate->bcp(); 372 #define CACHE_CP() cp = istate->constants(); 373 #define CACHE_LOCALS() locals = istate->locals(); 374 #undef CACHE_FRAME 375 #define CACHE_FRAME() 376 377 /* 378 * CHECK_NULL - Macro for throwing a NullPointerException if the object 379 * passed is a null ref. 380 * On some architectures/platforms it should be possible to do this implicitly 381 */ 382 #undef CHECK_NULL 383 #define CHECK_NULL(obj_) \ 384 if ((obj_) == NULL) { \ 385 VM_JAVA_ERROR(vmSymbols::java_lang_NullPointerException(), ""); \ 386 } \ 387 VERIFY_OOP(obj_) 388 389 #define VMdoubleConstZero() 0.0 390 #define VMdoubleConstOne() 1.0 391 #define VMlongConstZero() (max_jlong-max_jlong) 392 #define VMlongConstOne() ((max_jlong-max_jlong)+1) 393 394 /* 395 * Alignment 396 */ 397 #define VMalignWordUp(val) (((uintptr_t)(val) + 3) & ~3) 398 399 // Decache the interpreter state that interpreter modifies directly (i.e. GC is indirect mod) 400 #define DECACHE_STATE() DECACHE_PC(); DECACHE_TOS(); 401 402 // Reload interpreter state after calling the VM or a possible GC 403 #define CACHE_STATE() \ 404 CACHE_TOS(); \ 405 CACHE_PC(); \ 406 CACHE_CP(); \ 407 CACHE_LOCALS(); 408 409 // Call the VM don't check for pending exceptions 410 #define CALL_VM_NOCHECK(func) \ 411 DECACHE_STATE(); \ 412 SET_LAST_JAVA_FRAME(); \ 413 func; \ 414 RESET_LAST_JAVA_FRAME(); \ 415 CACHE_STATE(); \ 416 if (THREAD->pop_frame_pending() && \ 417 !THREAD->pop_frame_in_process()) { \ 418 goto handle_Pop_Frame; \ 419 } 420 421 // Call the VM and check for pending exceptions 422 #define CALL_VM(func, label) { \ 423 CALL_VM_NOCHECK(func); \ 424 if (THREAD->has_pending_exception()) goto label; \ 425 } 426 427 428 #ifdef VM_JVMTI 429 #define MAYBE_POST_FIELD_ACCESS() { \ 430 if (_jvmti_interp_events) { \ 431 int *count_addr; \ 432 oop obj; \ 433 /* Check to see if a field modification watch has been set */ \ 434 /* before we take the time to call into the VM. */ \ 435 count_addr = (int *)JvmtiExport::get_field_access_count_addr(); \ 436 if ( *count_addr > 0 ) { \ 437 if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) { \ 438 obj = (oop)NULL; \ 439 } else { \ 440 obj = (oop) STACK_OBJECT(-1); \ 441 VERIFY_OOP(obj); \ 442 } \ 443 CALL_VM(InterpreterRuntime::post_field_access(THREAD, obj, cache), handle_exception); \ 444 } \ 445 } \ 446 } 447 #define MAYBE_POST_FIELD_MODIFICATION() { \ 448 if (_jvmti_interp_events) { \ 449 int *count_addr; \ 450 oop obj; \ 451 /* Check to see if a field modification watch has been set */ \ 452 /* before we take the time to call into the VM. */ \ 453 count_addr = (int *)JvmtiExport::get_field_modification_count_addr(); \ 454 if ( *count_addr > 0 ) { \ 455 if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) { \ 456 obj = (oop)NULL; \ 457 } \ 458 else { \ 459 if (cache->is_long() || cache->is_double()) { \ 460 obj = (oop) STACK_OBJECT(-3); \ 461 } else { \ 462 obj = (oop) STACK_OBJECT(-2); \ 463 } \ 464 VERIFY_OOP(obj); \ 465 } \ 466 CALL_VM(InterpreterRuntime::post_field_modification(THREAD, \ 467 obj, \ 468 cache, \ 469 (jvalue *)STACK_SLOT(-1)), \ 470 handle_exception); \ 471 } \ 472 } \ 473 } 474 #else 475 #define MAYBE_POST_FIELD_ACCESS() {} 476 #define MAYBE_POST_FIELD_MODIFICATION() {} 477 #endif /* VM_JVMTI */ 478 479 static inline int tosstate_to_bc_offset(TosState tos) { 480 switch (tos) { 481 case btos: return Bytecodes::_fast_bgetfield - Bytecodes::_fast_agetfield; 482 case ctos: return Bytecodes::_fast_cgetfield - Bytecodes::_fast_agetfield; 483 case stos: return Bytecodes::_fast_sgetfield - Bytecodes::_fast_agetfield; 484 case itos: return Bytecodes::_fast_igetfield - Bytecodes::_fast_agetfield; 485 case ltos: return Bytecodes::_fast_lgetfield - Bytecodes::_fast_agetfield; 486 case ftos: return Bytecodes::_fast_fgetfield - Bytecodes::_fast_agetfield; 487 case dtos: return Bytecodes::_fast_dgetfield - Bytecodes::_fast_agetfield; 488 case atos: return Bytecodes::_fast_agetfield - Bytecodes::_fast_agetfield; 489 default: ShouldNotReachHere(); 490 } 491 return ilgl; 492 } 493 494 /* 495 * BytecodeInterpreter::run(interpreterState istate) 496 * BytecodeInterpreter::runWithChecks(interpreterState istate) 497 * 498 * The real deal. This is where byte codes actually get interpreted. 499 * Basically it's a big while loop that iterates until we return from 500 * the method passed in. 501 * 502 * The runWithChecks is used if JVMTI is enabled. 503 * 504 */ 505 #if defined(VM_JVMTI) 506 void 507 BytecodeInterpreter::runWithChecks(interpreterState istate) { 508 #else 509 void 510 BytecodeInterpreter::run(interpreterState istate) { 511 #endif 512 513 // In order to simplify some tests based on switches set at runtime 514 // we invoke the interpreter a single time after switches are enabled 515 // and set simpler to to test variables rather than method calls or complex 516 // boolean expressions. 517 518 static int initialized = 0; 519 static int checkit = 0; 520 static intptr_t* c_addr = NULL; 521 static intptr_t c_value; 522 523 if (checkit && *c_addr != c_value) { 524 os::breakpoint(); 525 } 526 #ifdef VM_JVMTI 527 static bool _jvmti_interp_events = 0; 528 #endif 529 530 static int _compiling; // (UseCompiler || CountCompiledCalls) 531 532 #ifdef ASSERT 533 if (istate->_msg != initialize) { 534 assert(abs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + 1), "bad stack limit"); 535 #ifndef SHARK 536 IA32_ONLY(assert(istate->_stack_limit == istate->_thread->last_Java_sp() + 1, "wrong")); 537 #endif // !SHARK 538 } 539 // Verify linkages. 540 interpreterState l = istate; 541 do { 542 assert(l == l->_self_link, "bad link"); 543 l = l->_prev_link; 544 } while (l != NULL); 545 // Screwups with stack management usually cause us to overwrite istate 546 // save a copy so we can verify it. 547 interpreterState orig = istate; 548 #endif 549 550 static volatile jbyte* _byte_map_base; // adjusted card table base for oop store barrier 551 552 register intptr_t* topOfStack = (intptr_t *)istate->stack(); /* access with STACK macros */ 553 register address pc = istate->bcp(); 554 register jubyte opcode; 555 register intptr_t* locals = istate->locals(); 556 register ConstantPoolCache* cp = istate->constants(); // method()->constants()->cache() 557 #ifdef LOTS_OF_REGS 558 register JavaThread* THREAD = istate->thread(); 559 register volatile jbyte* BYTE_MAP_BASE = _byte_map_base; 560 #else 561 #undef THREAD 562 #define THREAD istate->thread() 563 #undef BYTE_MAP_BASE 564 #define BYTE_MAP_BASE _byte_map_base 565 #endif 566 567 #ifdef USELABELS 568 const static void* const opclabels_data[256] = { 569 /* 0x00 */ &&opc_nop, &&opc_aconst_null,&&opc_iconst_m1,&&opc_iconst_0, 570 /* 0x04 */ &&opc_iconst_1,&&opc_iconst_2, &&opc_iconst_3, &&opc_iconst_4, 571 /* 0x08 */ &&opc_iconst_5,&&opc_lconst_0, &&opc_lconst_1, &&opc_fconst_0, 572 /* 0x0C */ &&opc_fconst_1,&&opc_fconst_2, &&opc_dconst_0, &&opc_dconst_1, 573 574 /* 0x10 */ &&opc_bipush, &&opc_sipush, &&opc_ldc, &&opc_ldc_w, 575 /* 0x14 */ &&opc_ldc2_w, &&opc_iload, &&opc_lload, &&opc_fload, 576 /* 0x18 */ &&opc_dload, &&opc_aload, &&opc_iload_0,&&opc_iload_1, 577 /* 0x1C */ &&opc_iload_2,&&opc_iload_3,&&opc_lload_0,&&opc_lload_1, 578 579 /* 0x20 */ &&opc_lload_2,&&opc_lload_3,&&opc_fload_0,&&opc_fload_1, 580 /* 0x24 */ &&opc_fload_2,&&opc_fload_3,&&opc_dload_0,&&opc_dload_1, 581 /* 0x28 */ &&opc_dload_2,&&opc_dload_3,&&opc_aload_0,&&opc_aload_1, 582 /* 0x2C */ &&opc_aload_2,&&opc_aload_3,&&opc_iaload, &&opc_laload, 583 584 /* 0x30 */ &&opc_faload, &&opc_daload, &&opc_aaload, &&opc_baload, 585 /* 0x34 */ &&opc_caload, &&opc_saload, &&opc_istore, &&opc_lstore, 586 /* 0x38 */ &&opc_fstore, &&opc_dstore, &&opc_astore, &&opc_istore_0, 587 /* 0x3C */ &&opc_istore_1,&&opc_istore_2,&&opc_istore_3,&&opc_lstore_0, 588 589 /* 0x40 */ &&opc_lstore_1,&&opc_lstore_2,&&opc_lstore_3,&&opc_fstore_0, 590 /* 0x44 */ &&opc_fstore_1,&&opc_fstore_2,&&opc_fstore_3,&&opc_dstore_0, 591 /* 0x48 */ &&opc_dstore_1,&&opc_dstore_2,&&opc_dstore_3,&&opc_astore_0, 592 /* 0x4C */ &&opc_astore_1,&&opc_astore_2,&&opc_astore_3,&&opc_iastore, 593 594 /* 0x50 */ &&opc_lastore,&&opc_fastore,&&opc_dastore,&&opc_aastore, 595 /* 0x54 */ &&opc_bastore,&&opc_castore,&&opc_sastore,&&opc_pop, 596 /* 0x58 */ &&opc_pop2, &&opc_dup, &&opc_dup_x1, &&opc_dup_x2, 597 /* 0x5C */ &&opc_dup2, &&opc_dup2_x1,&&opc_dup2_x2,&&opc_swap, 598 599 /* 0x60 */ &&opc_iadd,&&opc_ladd,&&opc_fadd,&&opc_dadd, 600 /* 0x64 */ &&opc_isub,&&opc_lsub,&&opc_fsub,&&opc_dsub, 601 /* 0x68 */ &&opc_imul,&&opc_lmul,&&opc_fmul,&&opc_dmul, 602 /* 0x6C */ &&opc_idiv,&&opc_ldiv,&&opc_fdiv,&&opc_ddiv, 603 604 /* 0x70 */ &&opc_irem, &&opc_lrem, &&opc_frem,&&opc_drem, 605 /* 0x74 */ &&opc_ineg, &&opc_lneg, &&opc_fneg,&&opc_dneg, 606 /* 0x78 */ &&opc_ishl, &&opc_lshl, &&opc_ishr,&&opc_lshr, 607 /* 0x7C */ &&opc_iushr,&&opc_lushr,&&opc_iand,&&opc_land, 608 609 /* 0x80 */ &&opc_ior, &&opc_lor,&&opc_ixor,&&opc_lxor, 610 /* 0x84 */ &&opc_iinc,&&opc_i2l,&&opc_i2f, &&opc_i2d, 611 /* 0x88 */ &&opc_l2i, &&opc_l2f,&&opc_l2d, &&opc_f2i, 612 /* 0x8C */ &&opc_f2l, &&opc_f2d,&&opc_d2i, &&opc_d2l, 613 614 /* 0x90 */ &&opc_d2f, &&opc_i2b, &&opc_i2c, &&opc_i2s, 615 /* 0x94 */ &&opc_lcmp, &&opc_fcmpl,&&opc_fcmpg,&&opc_dcmpl, 616 /* 0x98 */ &&opc_dcmpg,&&opc_ifeq, &&opc_ifne, &&opc_iflt, 617 /* 0x9C */ &&opc_ifge, &&opc_ifgt, &&opc_ifle, &&opc_if_icmpeq, 618 619 /* 0xA0 */ &&opc_if_icmpne,&&opc_if_icmplt,&&opc_if_icmpge, &&opc_if_icmpgt, 620 /* 0xA4 */ &&opc_if_icmple,&&opc_if_acmpeq,&&opc_if_acmpne, &&opc_goto, 621 /* 0xA8 */ &&opc_jsr, &&opc_ret, &&opc_tableswitch,&&opc_lookupswitch, 622 /* 0xAC */ &&opc_ireturn, &&opc_lreturn, &&opc_freturn, &&opc_dreturn, 623 624 /* 0xB0 */ &&opc_areturn, &&opc_return, &&opc_getstatic, &&opc_putstatic, 625 /* 0xB4 */ &&opc_getfield, &&opc_putfield, &&opc_invokevirtual,&&opc_invokespecial, 626 /* 0xB8 */ &&opc_invokestatic,&&opc_invokeinterface,&&opc_invokedynamic,&&opc_new, 627 /* 0xBC */ &&opc_newarray, &&opc_anewarray, &&opc_arraylength, &&opc_athrow, 628 629 /* 0xC0 */ &&opc_checkcast, &&opc_instanceof, &&opc_monitorenter, &&opc_monitorexit, 630 /* 0xC4 */ &&opc_wide, &&opc_multianewarray, &&opc_ifnull, &&opc_ifnonnull, 631 /* 0xC8 */ &&opc_goto_w, &&opc_jsr_w, &&opc_breakpoint, &&opc_fast_agetfield, 632 /* 0xCC */ &&opc_fast_bgetfield, &&opc_fast_cgetfield, &&opc_fast_dgetfield, &&opc_fast_fgetfield, 633 634 /* 0xD0 */ &&opc_fast_igetfield, &&opc_fast_lgetfield, &&opc_fast_sgetfield, &&opc_fast_aputfield, 635 /* 0xD4 */ &&opc_fast_bputfield, &&opc_fast_cputfield, &&opc_fast_dputfield, &&opc_fast_fputfield, 636 /* 0xD8 */ &&opc_fast_iputfield, &&opc_fast_lputfield, &&opc_fast_sputfield, &&opc_fast_aload_0, 637 /* 0xDC */ &&opc_fast_iaccess_0, &&opc_fast_aaccess_0, &&opc_fast_faccess_0, &&opc_fast_iload, 638 639 /* 0xE0 */ &&opc_fast_iload2, &&opc_fast_icaload, &&opc_fast_invokevfinal, &&opc_default, 640 /* 0xE4 */ &&opc_default, &&opc_fast_aldc, &&opc_fast_aldc_w, &&opc_return_register_finalizer, 641 /* 0xE8 */ &&opc_invokehandle,&&opc_default, &&opc_default, &&opc_default, 642 /* 0xEC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 643 644 /* 0xF0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 645 /* 0xF4 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 646 /* 0xF8 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 647 /* 0xFC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default 648 }; 649 register uintptr_t *dispatch_table = (uintptr_t*)&opclabels_data[0]; 650 #endif /* USELABELS */ 651 652 #ifdef ASSERT 653 // this will trigger a VERIFY_OOP on entry 654 if (istate->msg() != initialize && ! METHOD->is_static()) { 655 oop rcvr = LOCALS_OBJECT(0); 656 VERIFY_OOP(rcvr); 657 } 658 #endif 659 // #define HACK 660 #ifdef HACK 661 bool interesting = false; 662 #endif // HACK 663 664 /* QQQ this should be a stack method so we don't know actual direction */ 665 guarantee(istate->msg() == initialize || 666 topOfStack >= istate->stack_limit() && 667 topOfStack < istate->stack_base(), 668 "Stack top out of range"); 669 670 switch (istate->msg()) { 671 case initialize: { 672 if (initialized++) ShouldNotReachHere(); // Only one initialize call 673 _compiling = (UseCompiler || CountCompiledCalls); 674 #ifdef VM_JVMTI 675 _jvmti_interp_events = JvmtiExport::can_post_interpreter_events(); 676 #endif 677 BarrierSet* bs = Universe::heap()->barrier_set(); 678 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind"); 679 _byte_map_base = (volatile jbyte*)(((CardTableModRefBS*)bs)->byte_map_base); 680 return; 681 } 682 break; 683 case method_entry: { 684 THREAD->set_do_not_unlock(); 685 // count invocations 686 assert(initialized, "Interpreter not initialized"); 687 if (_compiling) { 688 if (ProfileInterpreter) { 689 METHOD->increment_interpreter_invocation_count(); 690 } 691 INCR_INVOCATION_COUNT; 692 if (INVOCATION_COUNT->reached_InvocationLimit()) { 693 CALL_VM((void)InterpreterRuntime::frequency_counter_overflow(THREAD, NULL), handle_exception); 694 695 // We no longer retry on a counter overflow 696 697 // istate->set_msg(retry_method); 698 // THREAD->clr_do_not_unlock(); 699 // return; 700 } 701 SAFEPOINT; 702 } 703 704 if ((istate->_stack_base - istate->_stack_limit) != istate->method()->max_stack() + 1) { 705 // initialize 706 os::breakpoint(); 707 } 708 709 #ifdef HACK 710 { 711 ResourceMark rm; 712 char *method_name = istate->method()->name_and_sig_as_C_string(); 713 if (strstr(method_name, "runThese$TestRunner.run()V") != NULL) { 714 tty->print_cr("entering: depth %d bci: %d", 715 (istate->_stack_base - istate->_stack), 716 istate->_bcp - istate->_method->code_base()); 717 interesting = true; 718 } 719 } 720 #endif // HACK 721 722 723 // lock method if synchronized 724 if (METHOD->is_synchronized()) { 725 // oop rcvr = locals[0].j.r; 726 oop rcvr; 727 if (METHOD->is_static()) { 728 rcvr = METHOD->constants()->pool_holder()->java_mirror(); 729 } else { 730 rcvr = LOCALS_OBJECT(0); 731 VERIFY_OOP(rcvr); 732 } 733 // The initial monitor is ours for the taking 734 BasicObjectLock* mon = &istate->monitor_base()[-1]; 735 oop monobj = mon->obj(); 736 assert(mon->obj() == rcvr, "method monitor mis-initialized"); 737 738 bool success = UseBiasedLocking; 739 if (UseBiasedLocking) { 740 markOop mark = rcvr->mark(); 741 if (mark->has_bias_pattern()) { 742 // The bias pattern is present in the object's header. Need to check 743 // whether the bias owner and the epoch are both still current. 744 intptr_t xx = ((intptr_t) THREAD) ^ (intptr_t) mark; 745 xx = (intptr_t) rcvr->klass()->prototype_header() ^ xx; 746 intptr_t yy = (xx & ~((int) markOopDesc::age_mask_in_place)); 747 if (yy != 0 ) { 748 // At this point we know that the header has the bias pattern and 749 // that we are not the bias owner in the current epoch. We need to 750 // figure out more details about the state of the header in order to 751 // know what operations can be legally performed on the object's 752 // header. 753 754 // If the low three bits in the xor result aren't clear, that means 755 // the prototype header is no longer biased and we have to revoke 756 // the bias on this object. 757 758 if (yy & markOopDesc::biased_lock_mask_in_place == 0 ) { 759 // Biasing is still enabled for this data type. See whether the 760 // epoch of the current bias is still valid, meaning that the epoch 761 // bits of the mark word are equal to the epoch bits of the 762 // prototype header. (Note that the prototype header's epoch bits 763 // only change at a safepoint.) If not, attempt to rebias the object 764 // toward the current thread. Note that we must be absolutely sure 765 // that the current epoch is invalid in order to do this because 766 // otherwise the manipulations it performs on the mark word are 767 // illegal. 768 if (yy & markOopDesc::epoch_mask_in_place == 0) { 769 // The epoch of the current bias is still valid but we know nothing 770 // about the owner; it might be set or it might be clear. Try to 771 // acquire the bias of the object using an atomic operation. If this 772 // fails we will go in to the runtime to revoke the object's bias. 773 // Note that we first construct the presumed unbiased header so we 774 // don't accidentally blow away another thread's valid bias. 775 intptr_t unbiased = (intptr_t) mark & (markOopDesc::biased_lock_mask_in_place | 776 markOopDesc::age_mask_in_place | 777 markOopDesc::epoch_mask_in_place); 778 if (Atomic::cmpxchg_ptr((intptr_t)THREAD | unbiased, (intptr_t*) rcvr->mark_addr(), unbiased) != unbiased) { 779 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception); 780 } 781 } else { 782 try_rebias: 783 // At this point we know the epoch has expired, meaning that the 784 // current "bias owner", if any, is actually invalid. Under these 785 // circumstances _only_, we are allowed to use the current header's 786 // value as the comparison value when doing the cas to acquire the 787 // bias in the current epoch. In other words, we allow transfer of 788 // the bias from one thread to another directly in this situation. 789 xx = (intptr_t) rcvr->klass()->prototype_header() | (intptr_t) THREAD; 790 if (Atomic::cmpxchg_ptr((intptr_t)THREAD | (intptr_t) rcvr->klass()->prototype_header(), 791 (intptr_t*) rcvr->mark_addr(), 792 (intptr_t) mark) != (intptr_t) mark) { 793 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception); 794 } 795 } 796 } else { 797 try_revoke_bias: 798 // The prototype mark in the klass doesn't have the bias bit set any 799 // more, indicating that objects of this data type are not supposed 800 // to be biased any more. We are going to try to reset the mark of 801 // this object to the prototype value and fall through to the 802 // CAS-based locking scheme. Note that if our CAS fails, it means 803 // that another thread raced us for the privilege of revoking the 804 // bias of this particular object, so it's okay to continue in the 805 // normal locking code. 806 // 807 xx = (intptr_t) rcvr->klass()->prototype_header() | (intptr_t) THREAD; 808 if (Atomic::cmpxchg_ptr(rcvr->klass()->prototype_header(), 809 (intptr_t*) rcvr->mark_addr(), 810 mark) == mark) { 811 // (*counters->revoked_lock_entry_count_addr())++; 812 success = false; 813 } 814 } 815 } 816 } else { 817 cas_label: 818 success = false; 819 } 820 } 821 if (!success) { 822 markOop displaced = rcvr->mark()->set_unlocked(); 823 mon->lock()->set_displaced_header(displaced); 824 if (Atomic::cmpxchg_ptr(mon, rcvr->mark_addr(), displaced) != displaced) { 825 // Is it simple recursive case? 826 if (THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { 827 mon->lock()->set_displaced_header(NULL); 828 } else { 829 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception); 830 } 831 } 832 } 833 } 834 THREAD->clr_do_not_unlock(); 835 836 // Notify jvmti 837 #ifdef VM_JVMTI 838 if (_jvmti_interp_events) { 839 // Whenever JVMTI puts a thread in interp_only_mode, method 840 // entry/exit events are sent for that thread to track stack depth. 841 if (THREAD->is_interp_only_mode()) { 842 CALL_VM(InterpreterRuntime::post_method_entry(THREAD), 843 handle_exception); 844 } 845 } 846 #endif /* VM_JVMTI */ 847 848 goto run; 849 } 850 851 case popping_frame: { 852 // returned from a java call to pop the frame, restart the call 853 // clear the message so we don't confuse ourselves later 854 ShouldNotReachHere(); // we don't return this. 855 assert(THREAD->pop_frame_in_process(), "wrong frame pop state"); 856 istate->set_msg(no_request); 857 THREAD->clr_pop_frame_in_process(); 858 goto run; 859 } 860 861 case method_resume: { 862 if ((istate->_stack_base - istate->_stack_limit) != istate->method()->max_stack() + 1) { 863 // resume 864 os::breakpoint(); 865 } 866 #ifdef HACK 867 { 868 ResourceMark rm; 869 char *method_name = istate->method()->name_and_sig_as_C_string(); 870 if (strstr(method_name, "runThese$TestRunner.run()V") != NULL) { 871 tty->print_cr("resume: depth %d bci: %d", 872 (istate->_stack_base - istate->_stack) , 873 istate->_bcp - istate->_method->code_base()); 874 interesting = true; 875 } 876 } 877 #endif // HACK 878 // returned from a java call, continue executing. 879 if (THREAD->pop_frame_pending() && !THREAD->pop_frame_in_process()) { 880 goto handle_Pop_Frame; 881 } 882 883 if (THREAD->has_pending_exception()) goto handle_exception; 884 // Update the pc by the saved amount of the invoke bytecode size 885 UPDATE_PC(istate->bcp_advance()); 886 goto run; 887 } 888 889 case deopt_resume2: { 890 // Returned from an opcode that will reexecute. Deopt was 891 // a result of a PopFrame request. 892 // 893 goto run; 894 } 895 896 case deopt_resume: { 897 // Returned from an opcode that has completed. The stack has 898 // the result all we need to do is skip across the bytecode 899 // and continue (assuming there is no exception pending) 900 // 901 // compute continuation length 902 // 903 // Note: it is possible to deopt at a return_register_finalizer opcode 904 // because this requires entering the vm to do the registering. While the 905 // opcode is complete we can't advance because there are no more opcodes 906 // much like trying to deopt at a poll return. In that has we simply 907 // get out of here 908 // 909 if ( Bytecodes::code_at(METHOD, pc) == Bytecodes::_return_register_finalizer) { 910 // this will do the right thing even if an exception is pending. 911 goto handle_return; 912 } 913 UPDATE_PC(Bytecodes::length_at(METHOD, pc)); 914 if (THREAD->has_pending_exception()) goto handle_exception; 915 goto run; 916 } 917 case got_monitors: { 918 // continue locking now that we have a monitor to use 919 // we expect to find newly allocated monitor at the "top" of the monitor stack. 920 oop lockee = STACK_OBJECT(-1); 921 VERIFY_OOP(lockee); 922 // derefing's lockee ought to provoke implicit null check 923 // find a free monitor 924 BasicObjectLock* entry = (BasicObjectLock*) istate->stack_base(); 925 assert(entry->obj() == NULL, "Frame manager didn't allocate the monitor"); 926 entry->set_obj(lockee); 927 928 markOop displaced = lockee->mark()->set_unlocked(); 929 entry->lock()->set_displaced_header(displaced); 930 if (Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) { 931 // Is it simple recursive case? 932 if (THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { 933 entry->lock()->set_displaced_header(NULL); 934 } else { 935 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 936 } 937 } 938 UPDATE_PC_AND_TOS(1, -1); 939 goto run; 940 } 941 default: { 942 fatal("Unexpected message from frame manager"); 943 } 944 } 945 946 run: 947 948 DO_UPDATE_INSTRUCTION_COUNT(*pc) 949 DEBUGGER_SINGLE_STEP_NOTIFY(); 950 #ifdef PREFETCH_OPCCODE 951 opcode = *pc; /* prefetch first opcode */ 952 #endif 953 954 #ifndef USELABELS 955 while (1) 956 #endif 957 { 958 #ifndef PREFETCH_OPCCODE 959 opcode = *pc; 960 #endif 961 // Seems like this happens twice per opcode. At worst this is only 962 // need at entry to the loop. 963 // DEBUGGER_SINGLE_STEP_NOTIFY(); 964 /* Using this labels avoids double breakpoints when quickening and 965 * when returing from transition frames. 966 */ 967 opcode_switch: 968 assert(istate == orig, "Corrupted istate"); 969 /* QQQ Hmm this has knowledge of direction, ought to be a stack method */ 970 assert(topOfStack >= istate->stack_limit(), "Stack overrun"); 971 assert(topOfStack < istate->stack_base(), "Stack underrun"); 972 973 #ifdef USELABELS 974 DISPATCH(opcode); 975 #else 976 switch (opcode) 977 #endif 978 { 979 CASE(_nop): 980 UPDATE_PC_AND_CONTINUE(1); 981 982 /* Push miscellaneous constants onto the stack. */ 983 984 CASE(_aconst_null): 985 SET_STACK_OBJECT(NULL, 0); 986 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 987 988 #undef OPC_CONST_n 989 #define OPC_CONST_n(opcode, const_type, value) \ 990 CASE(opcode): \ 991 SET_STACK_ ## const_type(value, 0); \ 992 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 993 994 OPC_CONST_n(_iconst_m1, INT, -1); 995 OPC_CONST_n(_iconst_0, INT, 0); 996 OPC_CONST_n(_iconst_1, INT, 1); 997 OPC_CONST_n(_iconst_2, INT, 2); 998 OPC_CONST_n(_iconst_3, INT, 3); 999 OPC_CONST_n(_iconst_4, INT, 4); 1000 OPC_CONST_n(_iconst_5, INT, 5); 1001 OPC_CONST_n(_fconst_0, FLOAT, 0.0); 1002 OPC_CONST_n(_fconst_1, FLOAT, 1.0); 1003 OPC_CONST_n(_fconst_2, FLOAT, 2.0); 1004 1005 #undef OPC_CONST2_n 1006 #define OPC_CONST2_n(opcname, value, key, kind) \ 1007 CASE(_##opcname): \ 1008 { \ 1009 SET_STACK_ ## kind(VM##key##Const##value(), 1); \ 1010 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); \ 1011 } 1012 OPC_CONST2_n(dconst_0, Zero, double, DOUBLE); 1013 OPC_CONST2_n(dconst_1, One, double, DOUBLE); 1014 OPC_CONST2_n(lconst_0, Zero, long, LONG); 1015 OPC_CONST2_n(lconst_1, One, long, LONG); 1016 1017 /* Load constant from constant pool: */ 1018 1019 /* Push a 1-byte signed integer value onto the stack. */ 1020 CASE(_bipush): 1021 SET_STACK_INT((jbyte)(pc[1]), 0); 1022 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1); 1023 1024 /* Push a 2-byte signed integer constant onto the stack. */ 1025 CASE(_sipush): 1026 SET_STACK_INT((int16_t)Bytes::get_Java_u2(pc + 1), 0); 1027 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1); 1028 1029 /* load from local variable */ 1030 1031 CASE(_aload): 1032 VERIFY_OOP(LOCALS_OBJECT(pc[1])); 1033 SET_STACK_OBJECT(LOCALS_OBJECT(pc[1]), 0); 1034 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1); 1035 1036 CASE(_iload): { 1037 // Attempt to rewrite iload, iload -> fast_iload2 1038 // iload, caload -> fast_icaload 1039 // Normal iloads will be rewritten to fast_iload to avoid checking again. 1040 Bytecodes::Code next = (Bytecodes::Code) *(pc + 2); 1041 switch (next) { 1042 case Bytecodes::_fast_iload: 1043 *(pc) = Bytecodes::_fast_iload2; 1044 break; 1045 case Bytecodes::_caload: 1046 *(pc) = Bytecodes::_fast_icaload; 1047 break; 1048 case Bytecodes::_iload: 1049 // Wait until rewritten to _fast_iload. 1050 break; 1051 default: 1052 // Last iload in a (potential) series, don't check again. 1053 *(pc) = Bytecodes::_fast_iload; 1054 } 1055 // Normal iload handling. 1056 SET_STACK_SLOT(LOCALS_SLOT(pc[1]), 0); 1057 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1); 1058 } 1059 CASE(_fast_iload): 1060 CASE(_fload): 1061 SET_STACK_SLOT(LOCALS_SLOT(pc[1]), 0); 1062 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1); 1063 1064 CASE(_fast_iload2): 1065 SET_STACK_SLOT(LOCALS_SLOT(pc[1]), 0); 1066 SET_STACK_SLOT(LOCALS_SLOT(pc[3]), 1); 1067 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2); 1068 CASE(_lload): 1069 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(pc[1]), 1); 1070 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 2); 1071 1072 CASE(_dload): 1073 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_DOUBLE_AT(pc[1]), 1); 1074 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 2); 1075 1076 #undef OPC_LOAD_n 1077 #define OPC_LOAD_n(num) \ 1078 CASE(_iload_##num): \ 1079 CASE(_fload_##num): \ 1080 SET_STACK_SLOT(LOCALS_SLOT(num), 0); \ 1081 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); \ 1082 \ 1083 CASE(_lload_##num): \ 1084 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(num), 1); \ 1085 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); \ 1086 CASE(_dload_##num): \ 1087 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_DOUBLE_AT(num), 1); \ 1088 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1089 1090 OPC_LOAD_n(0); 1091 OPC_LOAD_n(1); 1092 OPC_LOAD_n(2); 1093 OPC_LOAD_n(3); 1094 1095 CASE(_aload_0): { 1096 { 1097 /* Maybe rewrite if following bytecode is one of the supported _fast_Xgetfield bytecodes. */ 1098 switch (*(pc + 1)) { 1099 case Bytecodes::_fast_agetfield: 1100 *pc = Bytecodes::_fast_aaccess_0; 1101 break; 1102 case Bytecodes::_fast_fgetfield: 1103 *pc = Bytecodes::_fast_faccess_0; 1104 break; 1105 case Bytecodes::_fast_igetfield: 1106 *pc = Bytecodes::_fast_iaccess_0; 1107 break; 1108 case Bytecodes::_getfield: { 1109 /* Otherwise, do nothing here, wait until it gets rewritten to _fast_Xgetfield. 1110 * Unfortunately, this punishes volatile field access, because it never gets 1111 * rewritten. */ 1112 break; 1113 } 1114 default: 1115 *pc = Bytecodes::_fast_aload_0; 1116 break; 1117 } 1118 } 1119 VERIFY_OOP(LOCALS_OBJECT(0)); 1120 SET_STACK_OBJECT(LOCALS_OBJECT(0), 0); 1121 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1122 } 1123 #undef OPC_ALOAD_n 1124 #define OPC_ALOAD_n(num) \ 1125 CASE(_aload_##num): \ 1126 VERIFY_OOP(LOCALS_OBJECT(num)); \ 1127 SET_STACK_OBJECT(LOCALS_OBJECT(num), 0); \ 1128 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1129 1130 OPC_ALOAD_n(1); 1131 OPC_ALOAD_n(2); 1132 OPC_ALOAD_n(3); 1133 1134 /* store to a local variable */ 1135 1136 CASE(_astore): 1137 astore(topOfStack, -1, locals, pc[1]); 1138 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -1); 1139 1140 CASE(_istore): 1141 CASE(_fstore): 1142 SET_LOCALS_SLOT(STACK_SLOT(-1), pc[1]); 1143 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -1); 1144 1145 CASE(_lstore): 1146 SET_LOCALS_LONG(STACK_LONG(-1), pc[1]); 1147 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -2); 1148 1149 CASE(_dstore): 1150 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), pc[1]); 1151 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -2); 1152 1153 CASE(_wide): { 1154 uint16_t reg = Bytes::get_Java_u2(pc + 2); 1155 1156 opcode = pc[1]; 1157 switch(opcode) { 1158 case Bytecodes::_aload: 1159 VERIFY_OOP(LOCALS_OBJECT(reg)); 1160 SET_STACK_OBJECT(LOCALS_OBJECT(reg), 0); 1161 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1); 1162 1163 case Bytecodes::_iload: 1164 case Bytecodes::_fload: 1165 SET_STACK_SLOT(LOCALS_SLOT(reg), 0); 1166 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1); 1167 1168 case Bytecodes::_lload: 1169 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(reg), 1); 1170 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2); 1171 1172 case Bytecodes::_dload: 1173 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_LONG_AT(reg), 1); 1174 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2); 1175 1176 case Bytecodes::_astore: 1177 astore(topOfStack, -1, locals, reg); 1178 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -1); 1179 1180 case Bytecodes::_istore: 1181 case Bytecodes::_fstore: 1182 SET_LOCALS_SLOT(STACK_SLOT(-1), reg); 1183 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -1); 1184 1185 case Bytecodes::_lstore: 1186 SET_LOCALS_LONG(STACK_LONG(-1), reg); 1187 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2); 1188 1189 case Bytecodes::_dstore: 1190 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), reg); 1191 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2); 1192 1193 case Bytecodes::_iinc: { 1194 int16_t offset = (int16_t)Bytes::get_Java_u2(pc+4); 1195 // Be nice to see what this generates.... QQQ 1196 SET_LOCALS_INT(LOCALS_INT(reg) + offset, reg); 1197 UPDATE_PC_AND_CONTINUE(6); 1198 } 1199 case Bytecodes::_ret: 1200 pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(reg)); 1201 UPDATE_PC_AND_CONTINUE(0); 1202 default: 1203 VM_JAVA_ERROR(vmSymbols::java_lang_InternalError(), "undefined opcode"); 1204 } 1205 } 1206 1207 1208 #undef OPC_STORE_n 1209 #define OPC_STORE_n(num) \ 1210 CASE(_astore_##num): \ 1211 astore(topOfStack, -1, locals, num); \ 1212 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1213 CASE(_istore_##num): \ 1214 CASE(_fstore_##num): \ 1215 SET_LOCALS_SLOT(STACK_SLOT(-1), num); \ 1216 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1217 1218 OPC_STORE_n(0); 1219 OPC_STORE_n(1); 1220 OPC_STORE_n(2); 1221 OPC_STORE_n(3); 1222 1223 #undef OPC_DSTORE_n 1224 #define OPC_DSTORE_n(num) \ 1225 CASE(_dstore_##num): \ 1226 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), num); \ 1227 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \ 1228 CASE(_lstore_##num): \ 1229 SET_LOCALS_LONG(STACK_LONG(-1), num); \ 1230 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); 1231 1232 OPC_DSTORE_n(0); 1233 OPC_DSTORE_n(1); 1234 OPC_DSTORE_n(2); 1235 OPC_DSTORE_n(3); 1236 1237 /* stack pop, dup, and insert opcodes */ 1238 1239 1240 CASE(_pop): /* Discard the top item on the stack */ 1241 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1242 1243 1244 CASE(_pop2): /* Discard the top 2 items on the stack */ 1245 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); 1246 1247 1248 CASE(_dup): /* Duplicate the top item on the stack */ 1249 dup(topOfStack); 1250 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1251 1252 CASE(_dup2): /* Duplicate the top 2 items on the stack */ 1253 dup2(topOfStack); 1254 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1255 1256 CASE(_dup_x1): /* insert top word two down */ 1257 dup_x1(topOfStack); 1258 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1259 1260 CASE(_dup_x2): /* insert top word three down */ 1261 dup_x2(topOfStack); 1262 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1263 1264 CASE(_dup2_x1): /* insert top 2 slots three down */ 1265 dup2_x1(topOfStack); 1266 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1267 1268 CASE(_dup2_x2): /* insert top 2 slots four down */ 1269 dup2_x2(topOfStack); 1270 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1271 1272 CASE(_swap): { /* swap top two elements on the stack */ 1273 swap(topOfStack); 1274 UPDATE_PC_AND_CONTINUE(1); 1275 } 1276 1277 /* Perform various binary integer operations */ 1278 1279 #undef OPC_INT_BINARY 1280 #define OPC_INT_BINARY(opcname, opname, test) \ 1281 CASE(_i##opcname): \ 1282 if (test && (STACK_INT(-1) == 0)) { \ 1283 VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \ 1284 "/ by zero"); \ 1285 } \ 1286 SET_STACK_INT(VMint##opname(STACK_INT(-2), \ 1287 STACK_INT(-1)), \ 1288 -2); \ 1289 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1290 CASE(_l##opcname): \ 1291 { \ 1292 if (test) { \ 1293 jlong l1 = STACK_LONG(-1); \ 1294 if (VMlongEqz(l1)) { \ 1295 VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \ 1296 "/ by long zero"); \ 1297 } \ 1298 } \ 1299 /* First long at (-1,-2) next long at (-3,-4) */ \ 1300 SET_STACK_LONG(VMlong##opname(STACK_LONG(-3), \ 1301 STACK_LONG(-1)), \ 1302 -3); \ 1303 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \ 1304 } 1305 1306 OPC_INT_BINARY(add, Add, 0); 1307 OPC_INT_BINARY(sub, Sub, 0); 1308 OPC_INT_BINARY(mul, Mul, 0); 1309 OPC_INT_BINARY(and, And, 0); 1310 OPC_INT_BINARY(or, Or, 0); 1311 OPC_INT_BINARY(xor, Xor, 0); 1312 OPC_INT_BINARY(div, Div, 1); 1313 OPC_INT_BINARY(rem, Rem, 1); 1314 1315 1316 /* Perform various binary floating number operations */ 1317 /* On some machine/platforms/compilers div zero check can be implicit */ 1318 1319 #undef OPC_FLOAT_BINARY 1320 #define OPC_FLOAT_BINARY(opcname, opname) \ 1321 CASE(_d##opcname): { \ 1322 SET_STACK_DOUBLE(VMdouble##opname(STACK_DOUBLE(-3), \ 1323 STACK_DOUBLE(-1)), \ 1324 -3); \ 1325 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \ 1326 } \ 1327 CASE(_f##opcname): \ 1328 SET_STACK_FLOAT(VMfloat##opname(STACK_FLOAT(-2), \ 1329 STACK_FLOAT(-1)), \ 1330 -2); \ 1331 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1332 1333 1334 OPC_FLOAT_BINARY(add, Add); 1335 OPC_FLOAT_BINARY(sub, Sub); 1336 OPC_FLOAT_BINARY(mul, Mul); 1337 OPC_FLOAT_BINARY(div, Div); 1338 OPC_FLOAT_BINARY(rem, Rem); 1339 1340 /* Shift operations 1341 * Shift left int and long: ishl, lshl 1342 * Logical shift right int and long w/zero extension: iushr, lushr 1343 * Arithmetic shift right int and long w/sign extension: ishr, lshr 1344 */ 1345 1346 #undef OPC_SHIFT_BINARY 1347 #define OPC_SHIFT_BINARY(opcname, opname) \ 1348 CASE(_i##opcname): \ 1349 SET_STACK_INT(VMint##opname(STACK_INT(-2), \ 1350 STACK_INT(-1)), \ 1351 -2); \ 1352 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1353 CASE(_l##opcname): \ 1354 { \ 1355 SET_STACK_LONG(VMlong##opname(STACK_LONG(-2), \ 1356 STACK_INT(-1)), \ 1357 -2); \ 1358 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1359 } 1360 1361 OPC_SHIFT_BINARY(shl, Shl); 1362 OPC_SHIFT_BINARY(shr, Shr); 1363 OPC_SHIFT_BINARY(ushr, Ushr); 1364 1365 /* Increment local variable by constant */ 1366 CASE(_iinc): 1367 { 1368 // locals[pc[1]].j.i += (jbyte)(pc[2]); 1369 SET_LOCALS_INT(LOCALS_INT(pc[1]) + (jbyte)(pc[2]), pc[1]); 1370 UPDATE_PC_AND_CONTINUE(3); 1371 } 1372 1373 /* negate the value on the top of the stack */ 1374 1375 CASE(_ineg): 1376 SET_STACK_INT(VMintNeg(STACK_INT(-1)), -1); 1377 UPDATE_PC_AND_CONTINUE(1); 1378 1379 CASE(_fneg): 1380 SET_STACK_FLOAT(VMfloatNeg(STACK_FLOAT(-1)), -1); 1381 UPDATE_PC_AND_CONTINUE(1); 1382 1383 CASE(_lneg): 1384 { 1385 SET_STACK_LONG(VMlongNeg(STACK_LONG(-1)), -1); 1386 UPDATE_PC_AND_CONTINUE(1); 1387 } 1388 1389 CASE(_dneg): 1390 { 1391 SET_STACK_DOUBLE(VMdoubleNeg(STACK_DOUBLE(-1)), -1); 1392 UPDATE_PC_AND_CONTINUE(1); 1393 } 1394 1395 /* Conversion operations */ 1396 1397 CASE(_i2f): /* convert top of stack int to float */ 1398 SET_STACK_FLOAT(VMint2Float(STACK_INT(-1)), -1); 1399 UPDATE_PC_AND_CONTINUE(1); 1400 1401 CASE(_i2l): /* convert top of stack int to long */ 1402 { 1403 // this is ugly QQQ 1404 jlong r = VMint2Long(STACK_INT(-1)); 1405 MORE_STACK(-1); // Pop 1406 SET_STACK_LONG(r, 1); 1407 1408 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1409 } 1410 1411 CASE(_i2d): /* convert top of stack int to double */ 1412 { 1413 // this is ugly QQQ (why cast to jlong?? ) 1414 jdouble r = (jlong)STACK_INT(-1); 1415 MORE_STACK(-1); // Pop 1416 SET_STACK_DOUBLE(r, 1); 1417 1418 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1419 } 1420 1421 CASE(_l2i): /* convert top of stack long to int */ 1422 { 1423 jint r = VMlong2Int(STACK_LONG(-1)); 1424 MORE_STACK(-2); // Pop 1425 SET_STACK_INT(r, 0); 1426 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1427 } 1428 1429 CASE(_l2f): /* convert top of stack long to float */ 1430 { 1431 jlong r = STACK_LONG(-1); 1432 MORE_STACK(-2); // Pop 1433 SET_STACK_FLOAT(VMlong2Float(r), 0); 1434 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1435 } 1436 1437 CASE(_l2d): /* convert top of stack long to double */ 1438 { 1439 jlong r = STACK_LONG(-1); 1440 MORE_STACK(-2); // Pop 1441 SET_STACK_DOUBLE(VMlong2Double(r), 1); 1442 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1443 } 1444 1445 CASE(_f2i): /* Convert top of stack float to int */ 1446 SET_STACK_INT(SharedRuntime::f2i(STACK_FLOAT(-1)), -1); 1447 UPDATE_PC_AND_CONTINUE(1); 1448 1449 CASE(_f2l): /* convert top of stack float to long */ 1450 { 1451 jlong r = SharedRuntime::f2l(STACK_FLOAT(-1)); 1452 MORE_STACK(-1); // POP 1453 SET_STACK_LONG(r, 1); 1454 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1455 } 1456 1457 CASE(_f2d): /* convert top of stack float to double */ 1458 { 1459 jfloat f; 1460 jdouble r; 1461 f = STACK_FLOAT(-1); 1462 r = (jdouble) f; 1463 MORE_STACK(-1); // POP 1464 SET_STACK_DOUBLE(r, 1); 1465 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1466 } 1467 1468 CASE(_d2i): /* convert top of stack double to int */ 1469 { 1470 jint r1 = SharedRuntime::d2i(STACK_DOUBLE(-1)); 1471 MORE_STACK(-2); 1472 SET_STACK_INT(r1, 0); 1473 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1474 } 1475 1476 CASE(_d2f): /* convert top of stack double to float */ 1477 { 1478 jfloat r1 = VMdouble2Float(STACK_DOUBLE(-1)); 1479 MORE_STACK(-2); 1480 SET_STACK_FLOAT(r1, 0); 1481 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1482 } 1483 1484 CASE(_d2l): /* convert top of stack double to long */ 1485 { 1486 jlong r1 = SharedRuntime::d2l(STACK_DOUBLE(-1)); 1487 MORE_STACK(-2); 1488 SET_STACK_LONG(r1, 1); 1489 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1490 } 1491 1492 CASE(_i2b): 1493 SET_STACK_INT(VMint2Byte(STACK_INT(-1)), -1); 1494 UPDATE_PC_AND_CONTINUE(1); 1495 1496 CASE(_i2c): 1497 SET_STACK_INT(VMint2Char(STACK_INT(-1)), -1); 1498 UPDATE_PC_AND_CONTINUE(1); 1499 1500 CASE(_i2s): 1501 SET_STACK_INT(VMint2Short(STACK_INT(-1)), -1); 1502 UPDATE_PC_AND_CONTINUE(1); 1503 1504 /* comparison operators */ 1505 1506 1507 #define COMPARISON_OP(name, comparison) \ 1508 CASE(_if_icmp##name): { \ 1509 int skip = (STACK_INT(-2) comparison STACK_INT(-1)) \ 1510 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1511 address branch_pc = pc; \ 1512 UPDATE_PC_AND_TOS(skip, -2); \ 1513 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1514 CONTINUE; \ 1515 } \ 1516 CASE(_if##name): { \ 1517 int skip = (STACK_INT(-1) comparison 0) \ 1518 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1519 address branch_pc = pc; \ 1520 UPDATE_PC_AND_TOS(skip, -1); \ 1521 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1522 CONTINUE; \ 1523 } 1524 1525 #define COMPARISON_OP2(name, comparison) \ 1526 COMPARISON_OP(name, comparison) \ 1527 CASE(_if_acmp##name): { \ 1528 int skip = (STACK_OBJECT(-2) comparison STACK_OBJECT(-1)) \ 1529 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1530 address branch_pc = pc; \ 1531 UPDATE_PC_AND_TOS(skip, -2); \ 1532 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1533 CONTINUE; \ 1534 } 1535 1536 #define NULL_COMPARISON_NOT_OP(name) \ 1537 CASE(_if##name): { \ 1538 int skip = (!(STACK_OBJECT(-1) == NULL)) \ 1539 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1540 address branch_pc = pc; \ 1541 UPDATE_PC_AND_TOS(skip, -1); \ 1542 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1543 CONTINUE; \ 1544 } 1545 1546 #define NULL_COMPARISON_OP(name) \ 1547 CASE(_if##name): { \ 1548 int skip = ((STACK_OBJECT(-1) == NULL)) \ 1549 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1550 address branch_pc = pc; \ 1551 UPDATE_PC_AND_TOS(skip, -1); \ 1552 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1553 CONTINUE; \ 1554 } 1555 COMPARISON_OP(lt, <); 1556 COMPARISON_OP(gt, >); 1557 COMPARISON_OP(le, <=); 1558 COMPARISON_OP(ge, >=); 1559 COMPARISON_OP2(eq, ==); /* include ref comparison */ 1560 COMPARISON_OP2(ne, !=); /* include ref comparison */ 1561 NULL_COMPARISON_OP(null); 1562 NULL_COMPARISON_NOT_OP(nonnull); 1563 1564 /* Goto pc at specified offset in switch table. */ 1565 1566 CASE(_tableswitch): { 1567 jint* lpc = (jint*)VMalignWordUp(pc+1); 1568 int32_t key = STACK_INT(-1); 1569 int32_t low = Bytes::get_Java_u4((address)&lpc[1]); 1570 int32_t high = Bytes::get_Java_u4((address)&lpc[2]); 1571 int32_t skip; 1572 key -= low; 1573 skip = ((uint32_t) key > (uint32_t)(high - low)) 1574 ? Bytes::get_Java_u4((address)&lpc[0]) 1575 : Bytes::get_Java_u4((address)&lpc[key + 3]); 1576 // Does this really need a full backedge check (osr?) 1577 address branch_pc = pc; 1578 UPDATE_PC_AND_TOS(skip, -1); 1579 DO_BACKEDGE_CHECKS(skip, branch_pc); 1580 CONTINUE; 1581 } 1582 1583 /* Goto pc whose table entry matches specified key */ 1584 1585 CASE(_lookupswitch): { 1586 jint* lpc = (jint*)VMalignWordUp(pc+1); 1587 int32_t key = STACK_INT(-1); 1588 int32_t skip = Bytes::get_Java_u4((address) lpc); /* default amount */ 1589 int32_t npairs = Bytes::get_Java_u4((address) &lpc[1]); 1590 while (--npairs >= 0) { 1591 lpc += 2; 1592 if (key == (int32_t)Bytes::get_Java_u4((address)lpc)) { 1593 skip = Bytes::get_Java_u4((address)&lpc[1]); 1594 break; 1595 } 1596 } 1597 address branch_pc = pc; 1598 UPDATE_PC_AND_TOS(skip, -1); 1599 DO_BACKEDGE_CHECKS(skip, branch_pc); 1600 CONTINUE; 1601 } 1602 1603 CASE(_fcmpl): 1604 CASE(_fcmpg): 1605 { 1606 SET_STACK_INT(VMfloatCompare(STACK_FLOAT(-2), 1607 STACK_FLOAT(-1), 1608 (opcode == Bytecodes::_fcmpl ? -1 : 1)), 1609 -2); 1610 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1611 } 1612 1613 CASE(_dcmpl): 1614 CASE(_dcmpg): 1615 { 1616 int r = VMdoubleCompare(STACK_DOUBLE(-3), 1617 STACK_DOUBLE(-1), 1618 (opcode == Bytecodes::_dcmpl ? -1 : 1)); 1619 MORE_STACK(-4); // Pop 1620 SET_STACK_INT(r, 0); 1621 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1622 } 1623 1624 CASE(_lcmp): 1625 { 1626 int r = VMlongCompare(STACK_LONG(-3), STACK_LONG(-1)); 1627 MORE_STACK(-4); 1628 SET_STACK_INT(r, 0); 1629 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1630 } 1631 1632 1633 /* Return from a method */ 1634 1635 CASE(_areturn): 1636 CASE(_ireturn): 1637 CASE(_freturn): 1638 { 1639 // Allow a safepoint before returning to frame manager. 1640 SAFEPOINT; 1641 1642 goto handle_return; 1643 } 1644 1645 CASE(_lreturn): 1646 CASE(_dreturn): 1647 { 1648 // Allow a safepoint before returning to frame manager. 1649 SAFEPOINT; 1650 goto handle_return; 1651 } 1652 1653 CASE(_return_register_finalizer): { 1654 1655 oop rcvr = LOCALS_OBJECT(0); 1656 VERIFY_OOP(rcvr); 1657 if (rcvr->klass()->has_finalizer()) { 1658 CALL_VM(InterpreterRuntime::register_finalizer(THREAD, rcvr), handle_exception); 1659 } 1660 goto handle_return; 1661 } 1662 CASE(_return): { 1663 1664 // Allow a safepoint before returning to frame manager. 1665 SAFEPOINT; 1666 goto handle_return; 1667 } 1668 1669 /* Array access byte-codes */ 1670 1671 CASE(_fast_icaload): { 1672 // Custom fast access for iload,caload pair. 1673 arrayOop arrObj = (arrayOop) STACK_OBJECT(-1); 1674 jint index = LOCALS_INT(pc[1]); 1675 CHECK_NULL(arrObj); 1676 if ((uint32_t) index >= (uint32_t) arrObj->length()) { 1677 char message[jintAsStringSize]; 1678 sprintf(message, "%d", index); 1679 VM_JAVA_ERROR(vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), message); 1680 } 1681 SET_STACK_INT(*(jchar *)(((address) arrObj->base(T_CHAR)) + index * sizeof(jchar)), -1); 1682 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 0); 1683 } 1684 /* Every array access byte-code starts out like this */ 1685 // arrayOopDesc* arrObj = (arrayOopDesc*)STACK_OBJECT(arrayOff); 1686 #define ARRAY_INTRO(arrayOff) \ 1687 arrayOop arrObj = (arrayOop)STACK_OBJECT(arrayOff); \ 1688 jint index = STACK_INT(arrayOff + 1); \ 1689 char message[jintAsStringSize]; \ 1690 CHECK_NULL(arrObj); \ 1691 if ((uint32_t)index >= (uint32_t)arrObj->length()) { \ 1692 sprintf(message, "%d", index); \ 1693 VM_JAVA_ERROR(vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), \ 1694 message); \ 1695 } 1696 1697 /* 32-bit loads. These handle conversion from < 32-bit types */ 1698 #define ARRAY_LOADTO32(T, T2, format, stackRes, extra) \ 1699 { \ 1700 ARRAY_INTRO(-2); \ 1701 extra; \ 1702 SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), \ 1703 -2); \ 1704 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1705 } 1706 1707 /* 64-bit loads */ 1708 #define ARRAY_LOADTO64(T,T2, stackRes, extra) \ 1709 { \ 1710 ARRAY_INTRO(-2); \ 1711 SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), -1); \ 1712 extra; \ 1713 UPDATE_PC_AND_CONTINUE(1); \ 1714 } 1715 1716 CASE(_iaload): 1717 ARRAY_LOADTO32(T_INT, jint, "%d", STACK_INT, 0); 1718 CASE(_faload): 1719 ARRAY_LOADTO32(T_FLOAT, jfloat, "%f", STACK_FLOAT, 0); 1720 CASE(_aaload): 1721 ARRAY_LOADTO32(T_OBJECT, oop, INTPTR_FORMAT, STACK_OBJECT, 0); 1722 CASE(_baload): 1723 ARRAY_LOADTO32(T_BYTE, jbyte, "%d", STACK_INT, 0); 1724 CASE(_caload): 1725 ARRAY_LOADTO32(T_CHAR, jchar, "%d", STACK_INT, 0); 1726 CASE(_saload): 1727 ARRAY_LOADTO32(T_SHORT, jshort, "%d", STACK_INT, 0); 1728 CASE(_laload): 1729 ARRAY_LOADTO64(T_LONG, jlong, STACK_LONG, 0); 1730 CASE(_daload): 1731 ARRAY_LOADTO64(T_DOUBLE, jdouble, STACK_DOUBLE, 0); 1732 1733 /* 32-bit stores. These handle conversion to < 32-bit types */ 1734 #define ARRAY_STOREFROM32(T, T2, format, stackSrc, extra) \ 1735 { \ 1736 ARRAY_INTRO(-3); \ 1737 extra; \ 1738 *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \ 1739 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3); \ 1740 } 1741 1742 /* 64-bit stores */ 1743 #define ARRAY_STOREFROM64(T, T2, stackSrc, extra) \ 1744 { \ 1745 ARRAY_INTRO(-4); \ 1746 extra; \ 1747 *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \ 1748 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -4); \ 1749 } 1750 1751 CASE(_iastore): 1752 ARRAY_STOREFROM32(T_INT, jint, "%d", STACK_INT, 0); 1753 CASE(_fastore): 1754 ARRAY_STOREFROM32(T_FLOAT, jfloat, "%f", STACK_FLOAT, 0); 1755 /* 1756 * This one looks different because of the assignability check 1757 */ 1758 CASE(_aastore): { 1759 oop rhsObject = STACK_OBJECT(-1); 1760 VERIFY_OOP(rhsObject); 1761 ARRAY_INTRO( -3); 1762 // arrObj, index are set 1763 if (rhsObject != NULL) { 1764 /* Check assignability of rhsObject into arrObj */ 1765 Klass* rhsKlassOop = rhsObject->klass(); // EBX (subclass) 1766 Klass* elemKlassOop = ObjArrayKlass::cast(arrObj->klass())->element_klass(); // superklass EAX 1767 // 1768 // Check for compatibilty. This check must not GC!! 1769 // Seems way more expensive now that we must dispatch 1770 // 1771 if (rhsKlassOop != elemKlassOop && !rhsKlassOop->is_subtype_of(elemKlassOop)) { // ebx->is... 1772 VM_JAVA_ERROR(vmSymbols::java_lang_ArrayStoreException(), ""); 1773 } 1774 } 1775 oop* elem_loc = (oop*)(((address) arrObj->base(T_OBJECT)) + index * sizeof(oop)); 1776 // *(oop*)(((address) arrObj->base(T_OBJECT)) + index * sizeof(oop)) = rhsObject; 1777 *elem_loc = rhsObject; 1778 // Mark the card 1779 OrderAccess::release_store(&BYTE_MAP_BASE[(uintptr_t)elem_loc >> CardTableModRefBS::card_shift], 0); 1780 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3); 1781 } 1782 CASE(_bastore): 1783 ARRAY_STOREFROM32(T_BYTE, jbyte, "%d", STACK_INT, 0); 1784 CASE(_castore): 1785 ARRAY_STOREFROM32(T_CHAR, jchar, "%d", STACK_INT, 0); 1786 CASE(_sastore): 1787 ARRAY_STOREFROM32(T_SHORT, jshort, "%d", STACK_INT, 0); 1788 CASE(_lastore): 1789 ARRAY_STOREFROM64(T_LONG, jlong, STACK_LONG, 0); 1790 CASE(_dastore): 1791 ARRAY_STOREFROM64(T_DOUBLE, jdouble, STACK_DOUBLE, 0); 1792 1793 CASE(_arraylength): 1794 { 1795 arrayOop ary = (arrayOop) STACK_OBJECT(-1); 1796 CHECK_NULL(ary); 1797 SET_STACK_INT(ary->length(), -1); 1798 UPDATE_PC_AND_CONTINUE(1); 1799 } 1800 1801 /* monitorenter and monitorexit for locking/unlocking an object */ 1802 1803 CASE(_monitorenter): { 1804 oop lockee = STACK_OBJECT(-1); 1805 // derefing's lockee ought to provoke implicit null check 1806 CHECK_NULL(lockee); 1807 // find a free monitor or one already allocated for this object 1808 // if we find a matching object then we need a new monitor 1809 // since this is recursive enter 1810 BasicObjectLock* limit = istate->monitor_base(); 1811 BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base(); 1812 BasicObjectLock* entry = NULL; 1813 while (most_recent != limit ) { 1814 if (most_recent->obj() == NULL) entry = most_recent; 1815 else if (most_recent->obj() == lockee) break; 1816 most_recent++; 1817 } 1818 if (entry != NULL) { 1819 entry->set_obj(lockee); 1820 markOop displaced = lockee->mark()->set_unlocked(); 1821 entry->lock()->set_displaced_header(displaced); 1822 if (Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) { 1823 // Is it simple recursive case? 1824 if (THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { 1825 entry->lock()->set_displaced_header(NULL); 1826 } else { 1827 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 1828 } 1829 } 1830 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1831 } else { 1832 istate->set_msg(more_monitors); 1833 UPDATE_PC_AND_RETURN(0); // Re-execute 1834 } 1835 } 1836 1837 CASE(_monitorexit): { 1838 oop lockee = STACK_OBJECT(-1); 1839 CHECK_NULL(lockee); 1840 // derefing's lockee ought to provoke implicit null check 1841 // find our monitor slot 1842 BasicObjectLock* limit = istate->monitor_base(); 1843 BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base(); 1844 while (most_recent != limit ) { 1845 if ((most_recent)->obj() == lockee) { 1846 BasicLock* lock = most_recent->lock(); 1847 markOop header = lock->displaced_header(); 1848 most_recent->set_obj(NULL); 1849 // If it isn't recursive we either must swap old header or call the runtime 1850 if (header != NULL) { 1851 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) { 1852 // restore object for the slow case 1853 most_recent->set_obj(lockee); 1854 CALL_VM(InterpreterRuntime::monitorexit(THREAD, most_recent), handle_exception); 1855 } 1856 } 1857 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1858 } 1859 most_recent++; 1860 } 1861 // Need to throw illegal monitor state exception 1862 CALL_VM(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD), handle_exception); 1863 ShouldNotReachHere(); 1864 } 1865 1866 /* All of the non-quick opcodes. */ 1867 1868 /* -Set clobbersCpIndex true if the quickened opcode clobbers the 1869 * constant pool index in the instruction. 1870 */ 1871 CASE(_getfield): 1872 CASE(_getstatic): 1873 { 1874 u2 index; 1875 ConstantPoolCacheEntry* cache; 1876 index = Bytes::get_native_u2(pc+1); 1877 1878 // QQQ Need to make this as inlined as possible. Probably need to 1879 // split all the bytecode cases out so c++ compiler has a chance 1880 // for constant prop to fold everything possible away. 1881 1882 cache = cp->entry_at(index); 1883 if (! cache->is_resolved((Bytecodes::Code) opcode)) { 1884 CALL_VM(InterpreterRuntime::resolve_get_put(THREAD, (Bytecodes::Code) opcode), handle_exception); 1885 cache = cp->entry_at(index); 1886 } 1887 1888 MAYBE_POST_FIELD_ACCESS(); 1889 1890 oop obj; 1891 if ((Bytecodes::Code) opcode == Bytecodes::_getstatic) { 1892 Klass* k = cache->f1_as_klass(); 1893 obj = k->java_mirror(); 1894 MORE_STACK(1); // Assume single slot push 1895 } else { 1896 obj = (oop) STACK_OBJECT(-1); 1897 CHECK_NULL(obj); 1898 1899 // Check if we can rewrite non-volatile _getfield to one of the _fast_Xgetfield. 1900 if (! cache->is_volatile()) { 1901 // Rewrite current BC to _fast_Xgetfield. 1902 TosState tos_type = cache->flag_state(); 1903 *pc = Bytecodes::_fast_agetfield + tosstate_to_bc_offset(tos_type); 1904 } 1905 } 1906 1907 // 1908 // Now store the result on the stack 1909 // 1910 TosState tos_type = cache->flag_state(); 1911 int field_offset = cache->f2_as_index(); 1912 if (cache->is_volatile()) { 1913 switch (tos_type) { 1914 case atos: 1915 VERIFY_OOP(obj->obj_field_acquire(field_offset)); 1916 SET_STACK_OBJECT(obj->obj_field_acquire(field_offset), -1); 1917 break; 1918 case itos: 1919 SET_STACK_INT(obj->int_field_acquire(field_offset), -1); 1920 break; 1921 case ltos: 1922 SET_STACK_LONG(obj->long_field_acquire(field_offset), 0); 1923 MORE_STACK(1); 1924 break; 1925 case btos: 1926 SET_STACK_INT(obj->byte_field_acquire(field_offset), -1); 1927 break; 1928 case ctos: 1929 SET_STACK_INT(obj->char_field_acquire(field_offset), -1); 1930 break; 1931 case stos: 1932 SET_STACK_INT(obj->short_field_acquire(field_offset), -1); 1933 break; 1934 case ftos: 1935 SET_STACK_FLOAT(obj->float_field_acquire(field_offset), -1); 1936 break; 1937 case dtos: 1938 SET_STACK_DOUBLE(obj->double_field_acquire(field_offset), 0); 1939 MORE_STACK(1); 1940 break; 1941 default: 1942 ShouldNotReachHere(); 1943 } 1944 } else { 1945 switch (tos_type) { 1946 case atos: 1947 VERIFY_OOP(obj->obj_field(field_offset)); 1948 SET_STACK_OBJECT(obj->obj_field(field_offset), -1); 1949 break; 1950 case itos: 1951 SET_STACK_INT(obj->int_field(field_offset), -1); 1952 break; 1953 case ltos: 1954 SET_STACK_LONG(obj->long_field(field_offset), 0); 1955 MORE_STACK(1); 1956 break; 1957 case btos: 1958 SET_STACK_INT(obj->byte_field(field_offset), -1); 1959 break; 1960 case ctos: 1961 SET_STACK_INT(obj->char_field(field_offset), -1); 1962 break; 1963 case stos: 1964 SET_STACK_INT(obj->short_field(field_offset), -1); 1965 break; 1966 case ftos: 1967 SET_STACK_FLOAT(obj->float_field(field_offset), -1); 1968 break; 1969 case dtos: 1970 SET_STACK_DOUBLE(obj->double_field(field_offset), 0); 1971 MORE_STACK(1); 1972 break; 1973 default: 1974 ShouldNotReachHere(); 1975 } 1976 } 1977 1978 UPDATE_PC_AND_CONTINUE(3); 1979 } 1980 1981 CASE(_putfield): 1982 CASE(_putstatic): 1983 { 1984 u2 index = Bytes::get_native_u2(pc+1); 1985 ConstantPoolCacheEntry* cache = cp->entry_at(index); 1986 if (!cache->is_resolved((Bytecodes::Code)opcode)) { 1987 CALL_VM(InterpreterRuntime::resolve_get_put(THREAD, (Bytecodes::Code)opcode), 1988 handle_exception); 1989 cache = cp->entry_at(index); 1990 } 1991 1992 MAYBE_POST_FIELD_MODIFICATION(); 1993 1994 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases 1995 // out so c++ compiler has a chance for constant prop to fold everything possible away. 1996 1997 oop obj; 1998 int count; 1999 TosState tos_type = cache->flag_state(); 2000 2001 count = -1; 2002 if (tos_type == ltos || tos_type == dtos) { 2003 --count; 2004 } 2005 if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) { 2006 Klass* k = cache->f1_as_klass(); 2007 obj = k->java_mirror(); 2008 } else { 2009 --count; 2010 obj = (oop) STACK_OBJECT(count); 2011 CHECK_NULL(obj); 2012 2013 // Check if we can rewrite non-volatile _putfield to one of the _fast_Xputfield. 2014 if (! cache->is_volatile()) { 2015 // Rewrite current BC to _fast_Xputfield. 2016 TosState tos_type = cache->flag_state(); 2017 *pc = Bytecodes::_fast_aputfield + tosstate_to_bc_offset(tos_type); 2018 } 2019 } 2020 2021 // 2022 // Now store the result 2023 // 2024 int field_offset = cache->f2_as_index(); 2025 if (cache->is_volatile()) { 2026 switch (tos_type) { 2027 case itos: 2028 obj->release_int_field_put(field_offset, STACK_INT(-1)); 2029 break; 2030 case atos: 2031 VERIFY_OOP(STACK_OBJECT(-1)); 2032 obj->release_obj_field_put(field_offset, STACK_OBJECT(-1)); 2033 OrderAccess::release_store(&BYTE_MAP_BASE[(uintptr_t)obj >> CardTableModRefBS::card_shift], 0); 2034 break; 2035 case btos: 2036 obj->release_byte_field_put(field_offset, STACK_INT(-1)); 2037 break; 2038 case ltos: 2039 obj->release_long_field_put(field_offset, STACK_LONG(-1)); 2040 break; 2041 case ctos: 2042 obj->release_char_field_put(field_offset, STACK_INT(-1)); 2043 break; 2044 case stos: 2045 obj->release_short_field_put(field_offset, STACK_INT(-1)); 2046 break; 2047 case ftos: 2048 obj->release_float_field_put(field_offset, STACK_FLOAT(-1)); 2049 break; 2050 case dtos: 2051 obj->release_double_field_put(field_offset, STACK_DOUBLE(-1)); 2052 break; 2053 default: 2054 ShouldNotReachHere(); 2055 } 2056 OrderAccess::storeload(); 2057 } else { 2058 switch (tos_type) { 2059 case itos: 2060 obj->int_field_put(field_offset, STACK_INT(-1)); 2061 break; 2062 case atos: 2063 VERIFY_OOP(STACK_OBJECT(-1)); 2064 obj->obj_field_put(field_offset, STACK_OBJECT(-1)); 2065 OrderAccess::release_store(&BYTE_MAP_BASE[(uintptr_t)obj >> CardTableModRefBS::card_shift], 0); 2066 break; 2067 case btos: 2068 obj->byte_field_put(field_offset, STACK_INT(-1)); 2069 break; 2070 case ltos: 2071 obj->long_field_put(field_offset, STACK_LONG(-1)); 2072 break; 2073 case ctos: 2074 obj->char_field_put(field_offset, STACK_INT(-1)); 2075 break; 2076 case stos: 2077 obj->short_field_put(field_offset, STACK_INT(-1)); 2078 break; 2079 case ftos: 2080 obj->float_field_put(field_offset, STACK_FLOAT(-1)); 2081 break; 2082 case dtos: 2083 obj->double_field_put(field_offset, STACK_DOUBLE(-1)); 2084 break; 2085 default: 2086 ShouldNotReachHere(); 2087 } 2088 } 2089 2090 UPDATE_PC_AND_TOS_AND_CONTINUE(3, count); 2091 } 2092 2093 CASE(_new): { 2094 u2 index = Bytes::get_Java_u2(pc+1); 2095 ConstantPool* constants = istate->method()->constants(); 2096 if (!constants->tag_at(index).is_unresolved_klass()) { 2097 // Make sure klass is initialized and doesn't have a finalizer 2098 Klass* entry = constants->slot_at(index).get_klass(); 2099 assert(entry->is_klass(), "Should be resolved klass"); 2100 Klass* k_entry = (Klass*) entry; 2101 assert(k_entry->oop_is_instance(), "Should be InstanceKlass"); 2102 InstanceKlass* ik = (InstanceKlass*) k_entry; 2103 if ( ik->is_initialized() && ik->can_be_fastpath_allocated() ) { 2104 size_t obj_size = ik->size_helper(); 2105 oop result = NULL; 2106 // If the TLAB isn't pre-zeroed then we'll have to do it 2107 bool need_zero = !ZeroTLAB; 2108 if (UseTLAB) { 2109 result = (oop) THREAD->tlab().allocate(obj_size); 2110 } 2111 if (result == NULL) { 2112 need_zero = true; 2113 // Try allocate in shared eden 2114 retry: 2115 HeapWord* compare_to = *Universe::heap()->top_addr(); 2116 HeapWord* new_top = compare_to + obj_size; 2117 if (new_top <= *Universe::heap()->end_addr()) { 2118 if (Atomic::cmpxchg_ptr(new_top, Universe::heap()->top_addr(), compare_to) != compare_to) { 2119 goto retry; 2120 } 2121 result = (oop) compare_to; 2122 } 2123 } 2124 if (result != NULL) { 2125 // Initialize object (if nonzero size and need) and then the header 2126 if (need_zero ) { 2127 HeapWord* to_zero = (HeapWord*) result + sizeof(oopDesc) / oopSize; 2128 obj_size -= sizeof(oopDesc) / oopSize; 2129 if (obj_size > 0 ) { 2130 memset(to_zero, 0, obj_size * HeapWordSize); 2131 } 2132 } 2133 if (UseBiasedLocking) { 2134 result->set_mark(ik->prototype_header()); 2135 } else { 2136 result->set_mark(markOopDesc::prototype()); 2137 } 2138 result->set_klass_gap(0); 2139 result->set_klass(k_entry); 2140 SET_STACK_OBJECT(result, 0); 2141 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1); 2142 } 2143 } 2144 } 2145 // Slow case allocation 2146 CALL_VM(InterpreterRuntime::_new(THREAD, METHOD->constants(), index), 2147 handle_exception); 2148 SET_STACK_OBJECT(THREAD->vm_result(), 0); 2149 THREAD->set_vm_result(NULL); 2150 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1); 2151 } 2152 CASE(_anewarray): { 2153 u2 index = Bytes::get_Java_u2(pc+1); 2154 jint size = STACK_INT(-1); 2155 CALL_VM(InterpreterRuntime::anewarray(THREAD, METHOD->constants(), index, size), 2156 handle_exception); 2157 SET_STACK_OBJECT(THREAD->vm_result(), -1); 2158 THREAD->set_vm_result(NULL); 2159 UPDATE_PC_AND_CONTINUE(3); 2160 } 2161 CASE(_multianewarray): { 2162 jint dims = *(pc+3); 2163 jint size = STACK_INT(-1); 2164 // stack grows down, dimensions are up! 2165 jint *dimarray = 2166 (jint*)&topOfStack[dims * Interpreter::stackElementWords+ 2167 Interpreter::stackElementWords-1]; 2168 //adjust pointer to start of stack element 2169 CALL_VM(InterpreterRuntime::multianewarray(THREAD, dimarray), 2170 handle_exception); 2171 SET_STACK_OBJECT(THREAD->vm_result(), -dims); 2172 THREAD->set_vm_result(NULL); 2173 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -(dims-1)); 2174 } 2175 CASE(_checkcast): 2176 if (STACK_OBJECT(-1) != NULL) { 2177 VERIFY_OOP(STACK_OBJECT(-1)); 2178 u2 index = Bytes::get_Java_u2(pc+1); 2179 if (ProfileInterpreter) { 2180 // needs Profile_checkcast QQQ 2181 ShouldNotReachHere(); 2182 } 2183 // Constant pool may have actual klass or unresolved klass. If it is 2184 // unresolved we must resolve it 2185 if (METHOD->constants()->tag_at(index).is_unresolved_klass()) { 2186 CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception); 2187 } 2188 Klass* klassOf = (Klass*) METHOD->constants()->slot_at(index).get_klass(); 2189 Klass* objKlassOop = STACK_OBJECT(-1)->klass(); //ebx 2190 // 2191 // Check for compatibilty. This check must not GC!! 2192 // Seems way more expensive now that we must dispatch 2193 // 2194 if (objKlassOop != klassOf && 2195 !objKlassOop->is_subtype_of(klassOf)) { 2196 ResourceMark rm(THREAD); 2197 const char* objName = objKlassOop->external_name(); 2198 const char* klassName = klassOf->external_name(); 2199 char* message = SharedRuntime::generate_class_cast_message( 2200 objName, klassName); 2201 VM_JAVA_ERROR(vmSymbols::java_lang_ClassCastException(), message); 2202 } 2203 } else { 2204 if (UncommonNullCast) { 2205 // istate->method()->set_null_cast_seen(); 2206 // [RGV] Not sure what to do here! 2207 2208 } 2209 } 2210 UPDATE_PC_AND_CONTINUE(3); 2211 2212 CASE(_instanceof): 2213 if (STACK_OBJECT(-1) == NULL) { 2214 SET_STACK_INT(0, -1); 2215 } else { 2216 VERIFY_OOP(STACK_OBJECT(-1)); 2217 u2 index = Bytes::get_Java_u2(pc+1); 2218 // Constant pool may have actual klass or unresolved klass. If it is 2219 // unresolved we must resolve it 2220 if (METHOD->constants()->tag_at(index).is_unresolved_klass()) { 2221 CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception); 2222 } 2223 Klass* klassOf = (Klass*) METHOD->constants()->slot_at(index).get_klass(); 2224 Klass* objKlassOop = STACK_OBJECT(-1)->klass(); 2225 // 2226 // Check for compatibilty. This check must not GC!! 2227 // Seems way more expensive now that we must dispatch 2228 // 2229 if ( objKlassOop == klassOf || objKlassOop->is_subtype_of(klassOf)) { 2230 SET_STACK_INT(1, -1); 2231 } else { 2232 SET_STACK_INT(0, -1); 2233 } 2234 } 2235 UPDATE_PC_AND_CONTINUE(3); 2236 2237 CASE(_ldc_w): 2238 CASE(_ldc): 2239 { 2240 u2 index; 2241 bool wide = false; 2242 int incr = 2; // frequent case 2243 if (opcode == Bytecodes::_ldc) { 2244 index = pc[1]; 2245 } else { 2246 index = Bytes::get_Java_u2(pc+1); 2247 incr = 3; 2248 wide = true; 2249 } 2250 2251 ConstantPool* constants = METHOD->constants(); 2252 switch (constants->tag_at(index).value()) { 2253 case JVM_CONSTANT_Integer: 2254 SET_STACK_INT(constants->int_at(index), 0); 2255 break; 2256 2257 case JVM_CONSTANT_Float: 2258 SET_STACK_FLOAT(constants->float_at(index), 0); 2259 break; 2260 2261 case JVM_CONSTANT_String: 2262 { 2263 oop result = constants->resolved_references()->obj_at(index); 2264 if (result == NULL) { 2265 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), handle_exception); 2266 SET_STACK_OBJECT(THREAD->vm_result(), 0); 2267 THREAD->set_vm_result(NULL); 2268 } else { 2269 VERIFY_OOP(result); 2270 SET_STACK_OBJECT(result, 0); 2271 } 2272 break; 2273 } 2274 2275 case JVM_CONSTANT_Class: 2276 VERIFY_OOP(constants->resolved_klass_at(index)->java_mirror()); 2277 SET_STACK_OBJECT(constants->resolved_klass_at(index)->java_mirror(), 0); 2278 break; 2279 2280 case JVM_CONSTANT_UnresolvedClass: 2281 case JVM_CONSTANT_UnresolvedClassInError: 2282 CALL_VM(InterpreterRuntime::ldc(THREAD, wide), handle_exception); 2283 SET_STACK_OBJECT(THREAD->vm_result(), 0); 2284 THREAD->set_vm_result(NULL); 2285 break; 2286 2287 default: ShouldNotReachHere(); 2288 } 2289 UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1); 2290 } 2291 2292 CASE(_ldc2_w): 2293 { 2294 u2 index = Bytes::get_Java_u2(pc+1); 2295 2296 ConstantPool* constants = METHOD->constants(); 2297 switch (constants->tag_at(index).value()) { 2298 2299 case JVM_CONSTANT_Long: 2300 SET_STACK_LONG(constants->long_at(index), 1); 2301 break; 2302 2303 case JVM_CONSTANT_Double: 2304 SET_STACK_DOUBLE(constants->double_at(index), 1); 2305 break; 2306 default: ShouldNotReachHere(); 2307 } 2308 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 2); 2309 } 2310 2311 CASE(_fast_aldc_w): 2312 CASE(_fast_aldc): { 2313 u2 index; 2314 int incr; 2315 if (opcode == Bytecodes::_fast_aldc) { 2316 index = pc[1]; 2317 incr = 2; 2318 } else { 2319 index = Bytes::get_native_u2(pc+1); 2320 incr = 3; 2321 } 2322 2323 // We are resolved if the f1 field contains a non-null object (CallSite, etc.) 2324 // This kind of CP cache entry does not need to match the flags byte, because 2325 // there is a 1-1 relation between bytecode type and CP entry type. 2326 ConstantPool* constants = METHOD->constants(); 2327 oop result = constants->resolved_references()->obj_at(index); 2328 if (result == NULL) { 2329 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), 2330 handle_exception); 2331 result = THREAD->vm_result(); 2332 } 2333 2334 VERIFY_OOP(result); 2335 SET_STACK_OBJECT(result, 0); 2336 UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1); 2337 } 2338 2339 CASE(_invokedynamic): { 2340 2341 if (!EnableInvokeDynamic) { 2342 // We should not encounter this bytecode if !EnableInvokeDynamic. 2343 // The verifier will stop it. However, if we get past the verifier, 2344 // this will stop the thread in a reasonable way, without crashing the JVM. 2345 CALL_VM(InterpreterRuntime::throw_IncompatibleClassChangeError(THREAD), 2346 handle_exception); 2347 ShouldNotReachHere(); 2348 } 2349 2350 u4 index = Bytes::get_native_u4(pc+1); 2351 ConstantPoolCacheEntry* cache = cp->constant_pool()->invokedynamic_cp_cache_entry_at(index); 2352 2353 // We are resolved if the resolved_references field contains a non-null object (CallSite, etc.) 2354 // This kind of CP cache entry does not need to match the flags byte, because 2355 // there is a 1-1 relation between bytecode type and CP entry type. 2356 if (! cache->is_resolved((Bytecodes::Code) opcode)) { 2357 CALL_VM(InterpreterRuntime::resolve_invokedynamic(THREAD), 2358 handle_exception); 2359 cache = cp->constant_pool()->invokedynamic_cp_cache_entry_at(index); 2360 } 2361 2362 Method* method = cache->f1_as_method(); 2363 VERIFY_OOP(method); 2364 2365 if (cache->has_appendix()) { 2366 ConstantPool* constants = METHOD->constants(); 2367 SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0); 2368 MORE_STACK(1); 2369 } 2370 2371 istate->set_msg(call_method); 2372 istate->set_callee(method); 2373 istate->set_callee_entry_point(method->from_interpreted_entry()); 2374 istate->set_bcp_advance(5); 2375 2376 UPDATE_PC_AND_RETURN(0); // I'll be back... 2377 } 2378 2379 CASE(_invokehandle): { 2380 2381 if (!EnableInvokeDynamic) { 2382 ShouldNotReachHere(); 2383 } 2384 2385 u2 index = Bytes::get_native_u2(pc+1); 2386 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2387 2388 if (! cache->is_resolved((Bytecodes::Code) opcode)) { 2389 CALL_VM(InterpreterRuntime::resolve_invokehandle(THREAD), 2390 handle_exception); 2391 cache = cp->entry_at(index); 2392 } 2393 2394 Method* method = cache->f1_as_method(); 2395 2396 VERIFY_OOP(method); 2397 2398 if (cache->has_appendix()) { 2399 ConstantPool* constants = METHOD->constants(); 2400 SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0); 2401 MORE_STACK(1); 2402 } 2403 2404 istate->set_msg(call_method); 2405 istate->set_callee(method); 2406 istate->set_callee_entry_point(method->from_interpreted_entry()); 2407 istate->set_bcp_advance(3); 2408 2409 UPDATE_PC_AND_RETURN(0); // I'll be back... 2410 } 2411 2412 CASE(_invokeinterface): { 2413 u2 index = Bytes::get_native_u2(pc+1); 2414 2415 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases 2416 // out so c++ compiler has a chance for constant prop to fold everything possible away. 2417 2418 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2419 if (!cache->is_resolved((Bytecodes::Code)opcode)) { 2420 CALL_VM(InterpreterRuntime::resolve_invoke(THREAD, (Bytecodes::Code)opcode), 2421 handle_exception); 2422 cache = cp->entry_at(index); 2423 } 2424 2425 istate->set_msg(call_method); 2426 2427 // Special case of invokeinterface called for virtual method of 2428 // java.lang.Object. See cpCacheOop.cpp for details. 2429 // This code isn't produced by javac, but could be produced by 2430 // another compliant java compiler. 2431 if (cache->is_forced_virtual()) { 2432 Method* callee; 2433 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); 2434 if (cache->is_vfinal()) { 2435 callee = cache->f2_as_vfinal_method(); 2436 } else { 2437 // get receiver 2438 int parms = cache->parameter_size(); 2439 // Same comments as invokevirtual apply here 2440 VERIFY_OOP(STACK_OBJECT(-parms)); 2441 InstanceKlass* rcvrKlass = (InstanceKlass*) 2442 STACK_OBJECT(-parms)->klass(); 2443 callee = (Method*) rcvrKlass->start_of_vtable()[ cache->f2_as_index()]; 2444 } 2445 istate->set_callee(callee); 2446 istate->set_callee_entry_point(callee->from_interpreted_entry()); 2447 #ifdef VM_JVMTI 2448 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) { 2449 istate->set_callee_entry_point(callee->interpreter_entry()); 2450 } 2451 #endif /* VM_JVMTI */ 2452 istate->set_bcp_advance(5); 2453 UPDATE_PC_AND_RETURN(0); // I'll be back... 2454 } 2455 2456 // this could definitely be cleaned up QQQ 2457 Method* callee; 2458 Klass* iclass = cache->f1_as_klass(); 2459 // InstanceKlass* interface = (InstanceKlass*) iclass; 2460 // get receiver 2461 int parms = cache->parameter_size(); 2462 oop rcvr = STACK_OBJECT(-parms); 2463 CHECK_NULL(rcvr); 2464 InstanceKlass* int2 = (InstanceKlass*) rcvr->klass(); 2465 itableOffsetEntry* ki = (itableOffsetEntry*) int2->start_of_itable(); 2466 int i; 2467 for ( i = 0 ; i < int2->itable_length() ; i++, ki++ ) { 2468 if (ki->interface_klass() == iclass) break; 2469 } 2470 // If the interface isn't found, this class doesn't implement this 2471 // interface. The link resolver checks this but only for the first 2472 // time this interface is called. 2473 if (i == int2->itable_length()) { 2474 VM_JAVA_ERROR(vmSymbols::java_lang_IncompatibleClassChangeError(), ""); 2475 } 2476 int mindex = cache->f2_as_index(); 2477 itableMethodEntry* im = ki->first_method_entry(rcvr->klass()); 2478 callee = im[mindex].method(); 2479 if (callee == NULL) { 2480 VM_JAVA_ERROR(vmSymbols::java_lang_AbstractMethodError(), ""); 2481 } 2482 2483 istate->set_callee(callee); 2484 istate->set_callee_entry_point(callee->from_interpreted_entry()); 2485 #ifdef VM_JVMTI 2486 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) { 2487 istate->set_callee_entry_point(callee->interpreter_entry()); 2488 } 2489 #endif /* VM_JVMTI */ 2490 istate->set_bcp_advance(5); 2491 UPDATE_PC_AND_RETURN(0); // I'll be back... 2492 } 2493 2494 CASE(_invokevirtual): 2495 CASE(_invokespecial): 2496 CASE(_invokestatic): { 2497 u2 index = Bytes::get_native_u2(pc+1); 2498 2499 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2500 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases 2501 // out so c++ compiler has a chance for constant prop to fold everything possible away. 2502 2503 if (!cache->is_resolved((Bytecodes::Code)opcode)) { 2504 CALL_VM(InterpreterRuntime::resolve_invoke(THREAD, (Bytecodes::Code)opcode), 2505 handle_exception); 2506 cache = cp->entry_at(index); 2507 } 2508 2509 istate->set_msg(call_method); 2510 { 2511 Method* callee; 2512 if ((Bytecodes::Code)opcode == Bytecodes::_invokevirtual) { 2513 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); 2514 if (cache->is_vfinal()) { 2515 callee = cache->f2_as_vfinal_method(); 2516 // Rewrite to _fast_invokevfinal. 2517 *pc = Bytecodes::_fast_invokevfinal; 2518 } 2519 else { 2520 // get receiver 2521 int parms = cache->parameter_size(); 2522 // this works but needs a resourcemark and seems to create a vtable on every call: 2523 // Method* callee = rcvr->klass()->vtable()->method_at(cache->f2_as_index()); 2524 // 2525 // this fails with an assert 2526 // InstanceKlass* rcvrKlass = InstanceKlass::cast(STACK_OBJECT(-parms)->klass()); 2527 // but this works 2528 VERIFY_OOP(STACK_OBJECT(-parms)); 2529 InstanceKlass* rcvrKlass = (InstanceKlass*) STACK_OBJECT(-parms)->klass(); 2530 /* 2531 Executing this code in java.lang.String: 2532 public String(char value[]) { 2533 this.count = value.length; 2534 this.value = (char[])value.clone(); 2535 } 2536 2537 a find on rcvr->klass() reports: 2538 {type array char}{type array class} 2539 - klass: {other class} 2540 2541 but using InstanceKlass::cast(STACK_OBJECT(-parms)->klass()) causes in assertion failure 2542 because rcvr->klass()->oop_is_instance() == 0 2543 However it seems to have a vtable in the right location. Huh? 2544 2545 */ 2546 callee = (Method*) rcvrKlass->start_of_vtable()[ cache->f2_as_index()]; 2547 } 2548 } else { 2549 if ((Bytecodes::Code)opcode == Bytecodes::_invokespecial) { 2550 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); 2551 } 2552 callee = cache->f1_as_method(); 2553 } 2554 2555 istate->set_callee(callee); 2556 istate->set_callee_entry_point(callee->from_interpreted_entry()); 2557 #ifdef VM_JVMTI 2558 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) { 2559 istate->set_callee_entry_point(callee->interpreter_entry()); 2560 } 2561 #endif /* VM_JVMTI */ 2562 istate->set_bcp_advance(3); 2563 UPDATE_PC_AND_RETURN(0); // I'll be back... 2564 } 2565 } 2566 2567 /* Allocate memory for a new java object. */ 2568 2569 CASE(_newarray): { 2570 BasicType atype = (BasicType) *(pc+1); 2571 jint size = STACK_INT(-1); 2572 CALL_VM(InterpreterRuntime::newarray(THREAD, atype, size), 2573 handle_exception); 2574 SET_STACK_OBJECT(THREAD->vm_result(), -1); 2575 THREAD->set_vm_result(NULL); 2576 2577 UPDATE_PC_AND_CONTINUE(2); 2578 } 2579 2580 /* Throw an exception. */ 2581 2582 CASE(_athrow): { 2583 oop except_oop = STACK_OBJECT(-1); 2584 CHECK_NULL(except_oop); 2585 // set pending_exception so we use common code 2586 THREAD->set_pending_exception(except_oop, NULL, 0); 2587 goto handle_exception; 2588 } 2589 2590 /* goto and jsr. They are exactly the same except jsr pushes 2591 * the address of the next instruction first. 2592 */ 2593 2594 CASE(_jsr): { 2595 /* push bytecode index on stack */ 2596 SET_STACK_ADDR(((address)pc - (intptr_t)(istate->method()->code_base()) + 3), 0); 2597 MORE_STACK(1); 2598 /* FALL THROUGH */ 2599 } 2600 2601 CASE(_goto): 2602 { 2603 int16_t offset = (int16_t)Bytes::get_Java_u2(pc + 1); 2604 address branch_pc = pc; 2605 UPDATE_PC(offset); 2606 DO_BACKEDGE_CHECKS(offset, branch_pc); 2607 CONTINUE; 2608 } 2609 2610 CASE(_jsr_w): { 2611 /* push return address on the stack */ 2612 SET_STACK_ADDR(((address)pc - (intptr_t)(istate->method()->code_base()) + 5), 0); 2613 MORE_STACK(1); 2614 /* FALL THROUGH */ 2615 } 2616 2617 CASE(_goto_w): 2618 { 2619 int32_t offset = Bytes::get_Java_u4(pc + 1); 2620 address branch_pc = pc; 2621 UPDATE_PC(offset); 2622 DO_BACKEDGE_CHECKS(offset, branch_pc); 2623 CONTINUE; 2624 } 2625 2626 /* return from a jsr or jsr_w */ 2627 2628 CASE(_ret): { 2629 pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(pc[1])); 2630 UPDATE_PC_AND_CONTINUE(0); 2631 } 2632 2633 /* debugger breakpoint */ 2634 2635 CASE(_breakpoint): { 2636 Bytecodes::Code original_bytecode; 2637 DECACHE_STATE(); 2638 SET_LAST_JAVA_FRAME(); 2639 original_bytecode = InterpreterRuntime::get_original_bytecode_at(THREAD, 2640 METHOD, pc); 2641 RESET_LAST_JAVA_FRAME(); 2642 CACHE_STATE(); 2643 if (THREAD->has_pending_exception()) goto handle_exception; 2644 CALL_VM(InterpreterRuntime::_breakpoint(THREAD, METHOD, pc), 2645 handle_exception); 2646 2647 opcode = (jubyte)original_bytecode; 2648 goto opcode_switch; 2649 } 2650 CASE(_fast_agetfield): { 2651 u2 index = Bytes::get_native_u2(pc+1); 2652 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2653 int field_offset = cache->f2_as_index(); 2654 2655 MAYBE_POST_FIELD_ACCESS(); 2656 2657 oop obj = (oop) STACK_OBJECT(-1); 2658 CHECK_NULL(obj); 2659 2660 VERIFY_OOP(obj->obj_field(field_offset)); 2661 SET_STACK_OBJECT(obj->obj_field(field_offset), -1); 2662 2663 UPDATE_PC_AND_CONTINUE(3); 2664 } 2665 CASE(_fast_bgetfield): { 2666 u2 index = Bytes::get_native_u2(pc+1); 2667 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2668 int field_offset = cache->f2_as_index(); 2669 2670 MAYBE_POST_FIELD_ACCESS(); 2671 2672 oop obj = (oop) STACK_OBJECT(-1); 2673 CHECK_NULL(obj); 2674 2675 SET_STACK_INT(obj->byte_field(field_offset), -1); 2676 2677 UPDATE_PC_AND_CONTINUE(3); 2678 } 2679 CASE(_fast_cgetfield): { 2680 u2 index = Bytes::get_native_u2(pc+1); 2681 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2682 int field_offset = cache->f2_as_index(); 2683 2684 MAYBE_POST_FIELD_ACCESS(); 2685 2686 oop obj = (oop) STACK_OBJECT(-1); 2687 CHECK_NULL(obj); 2688 2689 SET_STACK_INT(obj->char_field(field_offset), -1); 2690 2691 UPDATE_PC_AND_CONTINUE(3); 2692 } 2693 CASE(_fast_dgetfield): { 2694 u2 index = Bytes::get_native_u2(pc+1); 2695 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2696 int field_offset = cache->f2_as_index(); 2697 2698 MAYBE_POST_FIELD_ACCESS(); 2699 2700 oop obj = (oop) STACK_OBJECT(-1); 2701 CHECK_NULL(obj); 2702 2703 SET_STACK_DOUBLE(obj->double_field(field_offset), 0); 2704 MORE_STACK(1); 2705 2706 UPDATE_PC_AND_CONTINUE(3); 2707 } 2708 CASE(_fast_fgetfield): { 2709 u2 index = Bytes::get_native_u2(pc+1); 2710 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2711 int field_offset = cache->f2_as_index(); 2712 2713 MAYBE_POST_FIELD_ACCESS(); 2714 2715 oop obj = (oop) STACK_OBJECT(-1); 2716 CHECK_NULL(obj); 2717 2718 SET_STACK_FLOAT(obj->float_field(field_offset), -1); 2719 2720 UPDATE_PC_AND_CONTINUE(3); 2721 } 2722 CASE(_fast_igetfield): { 2723 u2 index = Bytes::get_native_u2(pc+1); 2724 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2725 int field_offset = cache->f2_as_index(); 2726 2727 MAYBE_POST_FIELD_ACCESS(); 2728 2729 oop obj = (oop) STACK_OBJECT(-1); 2730 CHECK_NULL(obj); 2731 2732 SET_STACK_INT(obj->int_field(field_offset), -1); 2733 2734 UPDATE_PC_AND_CONTINUE(3); 2735 } 2736 CASE(_fast_lgetfield): { 2737 u2 index = Bytes::get_native_u2(pc+1); 2738 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2739 int field_offset = cache->f2_as_index(); 2740 2741 MAYBE_POST_FIELD_ACCESS(); 2742 2743 oop obj = (oop) STACK_OBJECT(-1); 2744 CHECK_NULL(obj); 2745 2746 SET_STACK_LONG(obj->long_field(field_offset), 0); 2747 MORE_STACK(1); 2748 2749 UPDATE_PC_AND_CONTINUE(3); 2750 } 2751 CASE(_fast_sgetfield): { 2752 u2 index = Bytes::get_native_u2(pc+1); 2753 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2754 int field_offset = cache->f2_as_index(); 2755 2756 MAYBE_POST_FIELD_ACCESS(); 2757 2758 oop obj = (oop) STACK_OBJECT(-1); 2759 CHECK_NULL(obj); 2760 2761 SET_STACK_INT(obj->short_field(field_offset), -1); 2762 2763 UPDATE_PC_AND_CONTINUE(3); 2764 } 2765 CASE(_fast_aputfield): { 2766 u2 index = Bytes::get_native_u2(pc+1); 2767 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2768 2769 oop obj = (oop) STACK_OBJECT(-2); 2770 CHECK_NULL(obj); 2771 2772 MAYBE_POST_FIELD_MODIFICATION(); 2773 2774 int field_offset = cache->f2_as_index(); 2775 VERIFY_OOP(STACK_OBJECT(-1)); 2776 obj->obj_field_put(field_offset, STACK_OBJECT(-1)); 2777 OrderAccess::release_store(&BYTE_MAP_BASE[(uintptr_t)obj >> CardTableModRefBS::card_shift], 0); 2778 2779 UPDATE_PC_AND_TOS_AND_CONTINUE(3, -2); 2780 } 2781 CASE(_fast_bputfield): { 2782 u2 index = Bytes::get_native_u2(pc+1); 2783 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2784 2785 oop obj = (oop) STACK_OBJECT(-2); 2786 CHECK_NULL(obj); 2787 2788 MAYBE_POST_FIELD_MODIFICATION(); 2789 2790 int field_offset = cache->f2_as_index(); 2791 obj->byte_field_put(field_offset, STACK_INT(-1)); 2792 2793 UPDATE_PC_AND_TOS_AND_CONTINUE(3, -2); 2794 } 2795 CASE(_fast_cputfield): { 2796 u2 index = Bytes::get_native_u2(pc+1); 2797 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2798 2799 oop obj = (oop) STACK_OBJECT(-2); 2800 CHECK_NULL(obj); 2801 2802 MAYBE_POST_FIELD_MODIFICATION(); 2803 2804 int field_offset = cache->f2_as_index(); 2805 obj->char_field_put(field_offset, STACK_INT(-1)); 2806 2807 UPDATE_PC_AND_TOS_AND_CONTINUE(3, -2); 2808 } 2809 CASE(_fast_dputfield): { 2810 u2 index = Bytes::get_native_u2(pc+1); 2811 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2812 2813 oop obj = (oop) STACK_OBJECT(-3); 2814 CHECK_NULL(obj); 2815 2816 MAYBE_POST_FIELD_MODIFICATION(); 2817 2818 int field_offset = cache->f2_as_index(); 2819 obj->double_field_put(field_offset, STACK_DOUBLE(-1)); 2820 2821 UPDATE_PC_AND_TOS_AND_CONTINUE(3, -3); 2822 } 2823 CASE(_fast_fputfield): { 2824 u2 index = Bytes::get_native_u2(pc+1); 2825 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2826 2827 oop obj = (oop) STACK_OBJECT(-2); 2828 CHECK_NULL(obj); 2829 2830 MAYBE_POST_FIELD_MODIFICATION(); 2831 2832 int field_offset = cache->f2_as_index(); 2833 obj->float_field_put(field_offset, STACK_FLOAT(-1)); 2834 2835 UPDATE_PC_AND_TOS_AND_CONTINUE(3, -2); 2836 } 2837 CASE(_fast_iputfield): { 2838 u2 index = Bytes::get_native_u2(pc+1); 2839 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2840 2841 oop obj = (oop) STACK_OBJECT(-2); 2842 CHECK_NULL(obj); 2843 2844 MAYBE_POST_FIELD_MODIFICATION(); 2845 2846 int field_offset = cache->f2_as_index(); 2847 obj->int_field_put(field_offset, STACK_INT(-1)); 2848 2849 UPDATE_PC_AND_TOS_AND_CONTINUE(3, -2); 2850 } 2851 CASE(_fast_lputfield): { 2852 u2 index = Bytes::get_native_u2(pc+1); 2853 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2854 2855 oop obj = (oop) STACK_OBJECT(-3); 2856 CHECK_NULL(obj); 2857 2858 MAYBE_POST_FIELD_MODIFICATION(); 2859 2860 int field_offset = cache->f2_as_index(); 2861 obj->long_field_put(field_offset, STACK_LONG(-1)); 2862 2863 UPDATE_PC_AND_TOS_AND_CONTINUE(3, -3); 2864 } 2865 CASE(_fast_sputfield): { 2866 u2 index = Bytes::get_native_u2(pc+1); 2867 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2868 2869 oop obj = (oop) STACK_OBJECT(-2); 2870 CHECK_NULL(obj); 2871 2872 MAYBE_POST_FIELD_MODIFICATION(); 2873 2874 int field_offset = cache->f2_as_index(); 2875 obj->short_field_put(field_offset, STACK_INT(-1)); 2876 2877 UPDATE_PC_AND_TOS_AND_CONTINUE(3, -2); 2878 } 2879 CASE(_fast_aload_0): { 2880 VERIFY_OOP(LOCALS_OBJECT(0)); 2881 SET_STACK_OBJECT(LOCALS_OBJECT(0), 0); 2882 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 2883 } 2884 CASE(_fast_aaccess_0): { 2885 u2 index = Bytes::get_native_u2(pc+2); 2886 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2887 int field_offset = cache->f2_as_index(); 2888 2889 VERIFY_OOP(LOCALS_OBJECT(0)); 2890 2891 MAYBE_POST_FIELD_ACCESS(); 2892 2893 oop obj = (oop) LOCALS_OBJECT(0); 2894 CHECK_NULL(obj); 2895 2896 VERIFY_OOP(obj->obj_field(field_offset)); 2897 SET_STACK_OBJECT(obj->obj_field(field_offset), 0); 2898 2899 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1); 2900 } 2901 CASE(_fast_faccess_0): { 2902 u2 index = Bytes::get_native_u2(pc+2); 2903 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2904 int field_offset = cache->f2_as_index(); 2905 2906 VERIFY_OOP(LOCALS_OBJECT(0)); 2907 2908 MAYBE_POST_FIELD_ACCESS(); 2909 2910 oop obj = (oop) LOCALS_OBJECT(0); 2911 CHECK_NULL(obj); 2912 2913 SET_STACK_INT(obj->int_field(field_offset), 0); 2914 2915 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1); 2916 } 2917 CASE(_fast_iaccess_0): { 2918 u2 index = Bytes::get_native_u2(pc+2); 2919 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2920 int field_offset = cache->f2_as_index(); 2921 2922 VERIFY_OOP(LOCALS_OBJECT(0)); 2923 2924 MAYBE_POST_FIELD_ACCESS(); 2925 2926 oop obj = (oop) LOCALS_OBJECT(0); 2927 CHECK_NULL(obj); 2928 2929 SET_STACK_FLOAT(obj->float_field(field_offset), 0); 2930 2931 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1); 2932 } 2933 CASE(_fast_invokevfinal): { 2934 u2 index = Bytes::get_native_u2(pc+1); 2935 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2936 2937 assert (cache->is_resolved(Bytecodes::_invokevirtual), "Should be resolved before rewriting"); 2938 2939 istate->set_msg(call_method); 2940 2941 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); 2942 Method* callee = cache->f2_as_vfinal_method(); 2943 istate->set_callee(callee); 2944 istate->set_callee_entry_point(callee->from_interpreted_entry()); 2945 #ifdef VM_JVMTI 2946 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) { 2947 istate->set_callee_entry_point(callee->interpreter_entry()); 2948 } 2949 #endif /* VM_JVMTI */ 2950 istate->set_bcp_advance(3); 2951 UPDATE_PC_AND_RETURN(0); // I'll be back... 2952 } 2953 DEFAULT: 2954 fatal(err_msg("Unimplemented opcode %d = %s", opcode, 2955 Bytecodes::name((Bytecodes::Code)opcode))); 2956 goto finish; 2957 2958 } /* switch(opc) */ 2959 2960 2961 #ifdef USELABELS 2962 check_for_exception: 2963 #endif 2964 { 2965 if (!THREAD->has_pending_exception()) { 2966 CONTINUE; 2967 } 2968 /* We will be gcsafe soon, so flush our state. */ 2969 DECACHE_PC(); 2970 goto handle_exception; 2971 } 2972 do_continue: ; 2973 2974 } /* while (1) interpreter loop */ 2975 2976 2977 // An exception exists in the thread state see whether this activation can handle it 2978 handle_exception: { 2979 2980 HandleMarkCleaner __hmc(THREAD); 2981 Handle except_oop(THREAD, THREAD->pending_exception()); 2982 // Prevent any subsequent HandleMarkCleaner in the VM 2983 // from freeing the except_oop handle. 2984 HandleMark __hm(THREAD); 2985 2986 THREAD->clear_pending_exception(); 2987 assert(except_oop(), "No exception to process"); 2988 intptr_t continuation_bci; 2989 // expression stack is emptied 2990 topOfStack = istate->stack_base() - Interpreter::stackElementWords; 2991 CALL_VM(continuation_bci = (intptr_t)InterpreterRuntime::exception_handler_for_exception(THREAD, except_oop()), 2992 handle_exception); 2993 2994 except_oop = THREAD->vm_result(); 2995 THREAD->set_vm_result(NULL); 2996 if (continuation_bci >= 0) { 2997 // Place exception on top of stack 2998 SET_STACK_OBJECT(except_oop(), 0); 2999 MORE_STACK(1); 3000 pc = METHOD->code_base() + continuation_bci; 3001 if (TraceExceptions) { 3002 ttyLocker ttyl; 3003 ResourceMark rm; 3004 tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), except_oop()); 3005 tty->print_cr(" thrown in interpreter method <%s>", METHOD->print_value_string()); 3006 tty->print_cr(" at bci %d, continuing at %d for thread " INTPTR_FORMAT, 3007 pc - (intptr_t)METHOD->code_base(), 3008 continuation_bci, THREAD); 3009 } 3010 // for AbortVMOnException flag 3011 NOT_PRODUCT(Exceptions::debug_check_abort(except_oop)); 3012 goto run; 3013 } 3014 if (TraceExceptions) { 3015 ttyLocker ttyl; 3016 ResourceMark rm; 3017 tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), except_oop()); 3018 tty->print_cr(" thrown in interpreter method <%s>", METHOD->print_value_string()); 3019 tty->print_cr(" at bci %d, unwinding for thread " INTPTR_FORMAT, 3020 pc - (intptr_t) METHOD->code_base(), 3021 THREAD); 3022 } 3023 // for AbortVMOnException flag 3024 NOT_PRODUCT(Exceptions::debug_check_abort(except_oop)); 3025 // No handler in this activation, unwind and try again 3026 THREAD->set_pending_exception(except_oop(), NULL, 0); 3027 goto handle_return; 3028 } /* handle_exception: */ 3029 3030 3031 3032 // Return from an interpreter invocation with the result of the interpretation 3033 // on the top of the Java Stack (or a pending exception) 3034 3035 handle_Pop_Frame: 3036 3037 // We don't really do anything special here except we must be aware 3038 // that we can get here without ever locking the method (if sync). 3039 // Also we skip the notification of the exit. 3040 3041 istate->set_msg(popping_frame); 3042 // Clear pending so while the pop is in process 3043 // we don't start another one if a call_vm is done. 3044 THREAD->clr_pop_frame_pending(); 3045 // Let interpreter (only) see the we're in the process of popping a frame 3046 THREAD->set_pop_frame_in_process(); 3047 3048 handle_return: 3049 { 3050 DECACHE_STATE(); 3051 3052 bool suppress_error = istate->msg() == popping_frame; 3053 bool suppress_exit_event = THREAD->has_pending_exception() || suppress_error; 3054 Handle original_exception(THREAD, THREAD->pending_exception()); 3055 Handle illegal_state_oop(THREAD, NULL); 3056 3057 // We'd like a HandleMark here to prevent any subsequent HandleMarkCleaner 3058 // in any following VM entries from freeing our live handles, but illegal_state_oop 3059 // isn't really allocated yet and so doesn't become live until later and 3060 // in unpredicatable places. Instead we must protect the places where we enter the 3061 // VM. It would be much simpler (and safer) if we could allocate a real handle with 3062 // a NULL oop in it and then overwrite the oop later as needed. This isn't 3063 // unfortunately isn't possible. 3064 3065 THREAD->clear_pending_exception(); 3066 3067 // 3068 // As far as we are concerned we have returned. If we have a pending exception 3069 // that will be returned as this invocation's result. However if we get any 3070 // exception(s) while checking monitor state one of those IllegalMonitorStateExceptions 3071 // will be our final result (i.e. monitor exception trumps a pending exception). 3072 // 3073 3074 // If we never locked the method (or really passed the point where we would have), 3075 // there is no need to unlock it (or look for other monitors), since that 3076 // could not have happened. 3077 3078 if (THREAD->do_not_unlock()) { 3079 3080 // Never locked, reset the flag now because obviously any caller must 3081 // have passed their point of locking for us to have gotten here. 3082 3083 THREAD->clr_do_not_unlock(); 3084 } else { 3085 // At this point we consider that we have returned. We now check that the 3086 // locks were properly block structured. If we find that they were not 3087 // used properly we will return with an illegal monitor exception. 3088 // The exception is checked by the caller not the callee since this 3089 // checking is considered to be part of the invocation and therefore 3090 // in the callers scope (JVM spec 8.13). 3091 // 3092 // Another weird thing to watch for is if the method was locked 3093 // recursively and then not exited properly. This means we must 3094 // examine all the entries in reverse time(and stack) order and 3095 // unlock as we find them. If we find the method monitor before 3096 // we are at the initial entry then we should throw an exception. 3097 // It is not clear the template based interpreter does this 3098 // correctly 3099 3100 BasicObjectLock* base = istate->monitor_base(); 3101 BasicObjectLock* end = (BasicObjectLock*) istate->stack_base(); 3102 bool method_unlock_needed = METHOD->is_synchronized(); 3103 // We know the initial monitor was used for the method don't check that 3104 // slot in the loop 3105 if (method_unlock_needed) base--; 3106 3107 // Check all the monitors to see they are unlocked. Install exception if found to be locked. 3108 while (end < base) { 3109 oop lockee = end->obj(); 3110 if (lockee != NULL) { 3111 BasicLock* lock = end->lock(); 3112 markOop header = lock->displaced_header(); 3113 end->set_obj(NULL); 3114 // If it isn't recursive we either must swap old header or call the runtime 3115 if (header != NULL) { 3116 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) { 3117 // restore object for the slow case 3118 end->set_obj(lockee); 3119 { 3120 // Prevent any HandleMarkCleaner from freeing our live handles 3121 HandleMark __hm(THREAD); 3122 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, end)); 3123 } 3124 } 3125 } 3126 // One error is plenty 3127 if (illegal_state_oop() == NULL && !suppress_error) { 3128 { 3129 // Prevent any HandleMarkCleaner from freeing our live handles 3130 HandleMark __hm(THREAD); 3131 CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD)); 3132 } 3133 assert(THREAD->has_pending_exception(), "Lost our exception!"); 3134 illegal_state_oop = THREAD->pending_exception(); 3135 THREAD->clear_pending_exception(); 3136 } 3137 } 3138 end++; 3139 } 3140 // Unlock the method if needed 3141 if (method_unlock_needed) { 3142 if (base->obj() == NULL) { 3143 // The method is already unlocked this is not good. 3144 if (illegal_state_oop() == NULL && !suppress_error) { 3145 { 3146 // Prevent any HandleMarkCleaner from freeing our live handles 3147 HandleMark __hm(THREAD); 3148 CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD)); 3149 } 3150 assert(THREAD->has_pending_exception(), "Lost our exception!"); 3151 illegal_state_oop = THREAD->pending_exception(); 3152 THREAD->clear_pending_exception(); 3153 } 3154 } else { 3155 // 3156 // The initial monitor is always used for the method 3157 // However if that slot is no longer the oop for the method it was unlocked 3158 // and reused by something that wasn't unlocked! 3159 // 3160 // deopt can come in with rcvr dead because c2 knows 3161 // its value is preserved in the monitor. So we can't use locals[0] at all 3162 // and must use first monitor slot. 3163 // 3164 oop rcvr = base->obj(); 3165 if (rcvr == NULL) { 3166 if (!suppress_error) { 3167 VM_JAVA_ERROR_NO_JUMP(vmSymbols::java_lang_NullPointerException(), ""); 3168 illegal_state_oop = THREAD->pending_exception(); 3169 THREAD->clear_pending_exception(); 3170 } 3171 } else { 3172 BasicLock* lock = base->lock(); 3173 markOop header = lock->displaced_header(); 3174 base->set_obj(NULL); 3175 // If it isn't recursive we either must swap old header or call the runtime 3176 if (header != NULL) { 3177 if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), lock) != lock) { 3178 // restore object for the slow case 3179 base->set_obj(rcvr); 3180 { 3181 // Prevent any HandleMarkCleaner from freeing our live handles 3182 HandleMark __hm(THREAD); 3183 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base)); 3184 } 3185 if (THREAD->has_pending_exception()) { 3186 if (!suppress_error) illegal_state_oop = THREAD->pending_exception(); 3187 THREAD->clear_pending_exception(); 3188 } 3189 } 3190 } 3191 } 3192 } 3193 } 3194 } 3195 3196 // 3197 // Notify jvmti/jvmdi 3198 // 3199 // NOTE: we do not notify a method_exit if we have a pending exception, 3200 // including an exception we generate for unlocking checks. In the former 3201 // case, JVMDI has already been notified by our call for the exception handler 3202 // and in both cases as far as JVMDI is concerned we have already returned. 3203 // If we notify it again JVMDI will be all confused about how many frames 3204 // are still on the stack (4340444). 3205 // 3206 // NOTE Further! It turns out the the JVMTI spec in fact expects to see 3207 // method_exit events whenever we leave an activation unless it was done 3208 // for popframe. This is nothing like jvmdi. However we are passing the 3209 // tests at the moment (apparently because they are jvmdi based) so rather 3210 // than change this code and possibly fail tests we will leave it alone 3211 // (with this note) in anticipation of changing the vm and the tests 3212 // simultaneously. 3213 3214 3215 // 3216 suppress_exit_event = suppress_exit_event || illegal_state_oop() != NULL; 3217 3218 3219 3220 #ifdef VM_JVMTI 3221 if (_jvmti_interp_events) { 3222 // Whenever JVMTI puts a thread in interp_only_mode, method 3223 // entry/exit events are sent for that thread to track stack depth. 3224 if ( !suppress_exit_event && THREAD->is_interp_only_mode() ) { 3225 { 3226 // Prevent any HandleMarkCleaner from freeing our live handles 3227 HandleMark __hm(THREAD); 3228 CALL_VM_NOCHECK(InterpreterRuntime::post_method_exit(THREAD)); 3229 } 3230 } 3231 } 3232 #endif /* VM_JVMTI */ 3233 3234 // 3235 // See if we are returning any exception 3236 // A pending exception that was pending prior to a possible popping frame 3237 // overrides the popping frame. 3238 // 3239 assert(!suppress_error || suppress_error && illegal_state_oop() == NULL, "Error was not suppressed"); 3240 if (illegal_state_oop() != NULL || original_exception() != NULL) { 3241 // inform the frame manager we have no result 3242 istate->set_msg(throwing_exception); 3243 if (illegal_state_oop() != NULL) 3244 THREAD->set_pending_exception(illegal_state_oop(), NULL, 0); 3245 else 3246 THREAD->set_pending_exception(original_exception(), NULL, 0); 3247 istate->set_return_kind((Bytecodes::Code)opcode); 3248 UPDATE_PC_AND_RETURN(0); 3249 } 3250 3251 if (istate->msg() == popping_frame) { 3252 // Make it simpler on the assembly code and set the message for the frame pop. 3253 // returns 3254 if (istate->prev() == NULL) { 3255 // We must be returning to a deoptimized frame (because popframe only happens between 3256 // two interpreted frames). We need to save the current arguments in C heap so that 3257 // the deoptimized frame when it restarts can copy the arguments to its expression 3258 // stack and re-execute the call. We also have to notify deoptimization that this 3259 // has occurred and to pick the preserved args copy them to the deoptimized frame's 3260 // java expression stack. Yuck. 3261 // 3262 THREAD->popframe_preserve_args(in_ByteSize(METHOD->size_of_parameters() * wordSize), 3263 LOCALS_SLOT(METHOD->size_of_parameters() - 1)); 3264 THREAD->set_popframe_condition_bit(JavaThread::popframe_force_deopt_reexecution_bit); 3265 } 3266 THREAD->clr_pop_frame_in_process(); 3267 } 3268 3269 // Normal return 3270 // Advance the pc and return to frame manager 3271 istate->set_msg(return_from_method); 3272 istate->set_return_kind((Bytecodes::Code)opcode); 3273 UPDATE_PC_AND_RETURN(1); 3274 } /* handle_return: */ 3275 3276 // This is really a fatal error return 3277 3278 finish: 3279 DECACHE_TOS(); 3280 DECACHE_PC(); 3281 3282 return; 3283 } 3284 3285 /* 3286 * All the code following this point is only produced once and is not present 3287 * in the JVMTI version of the interpreter 3288 */ 3289 3290 #ifndef VM_JVMTI 3291 3292 // This constructor should only be used to contruct the object to signal 3293 // interpreter initialization. All other instances should be created by 3294 // the frame manager. 3295 BytecodeInterpreter::BytecodeInterpreter(messages msg) { 3296 if (msg != initialize) ShouldNotReachHere(); 3297 _msg = msg; 3298 _self_link = this; 3299 _prev_link = NULL; 3300 } 3301 3302 // Inline static functions for Java Stack and Local manipulation 3303 3304 // The implementations are platform dependent. We have to worry about alignment 3305 // issues on some machines which can change on the same platform depending on 3306 // whether it is an LP64 machine also. 3307 address BytecodeInterpreter::stack_slot(intptr_t *tos, int offset) { 3308 return (address) tos[Interpreter::expr_index_at(-offset)]; 3309 } 3310 3311 jint BytecodeInterpreter::stack_int(intptr_t *tos, int offset) { 3312 return *((jint*) &tos[Interpreter::expr_index_at(-offset)]); 3313 } 3314 3315 jfloat BytecodeInterpreter::stack_float(intptr_t *tos, int offset) { 3316 return *((jfloat *) &tos[Interpreter::expr_index_at(-offset)]); 3317 } 3318 3319 oop BytecodeInterpreter::stack_object(intptr_t *tos, int offset) { 3320 return (oop)tos [Interpreter::expr_index_at(-offset)]; 3321 } 3322 3323 jdouble BytecodeInterpreter::stack_double(intptr_t *tos, int offset) { 3324 return ((VMJavaVal64*) &tos[Interpreter::expr_index_at(-offset)])->d; 3325 } 3326 3327 jlong BytecodeInterpreter::stack_long(intptr_t *tos, int offset) { 3328 return ((VMJavaVal64 *) &tos[Interpreter::expr_index_at(-offset)])->l; 3329 } 3330 3331 // only used for value types 3332 void BytecodeInterpreter::set_stack_slot(intptr_t *tos, address value, 3333 int offset) { 3334 *((address *)&tos[Interpreter::expr_index_at(-offset)]) = value; 3335 } 3336 3337 void BytecodeInterpreter::set_stack_int(intptr_t *tos, int value, 3338 int offset) { 3339 *((jint *)&tos[Interpreter::expr_index_at(-offset)]) = value; 3340 } 3341 3342 void BytecodeInterpreter::set_stack_float(intptr_t *tos, jfloat value, 3343 int offset) { 3344 *((jfloat *)&tos[Interpreter::expr_index_at(-offset)]) = value; 3345 } 3346 3347 void BytecodeInterpreter::set_stack_object(intptr_t *tos, oop value, 3348 int offset) { 3349 *((oop *)&tos[Interpreter::expr_index_at(-offset)]) = value; 3350 } 3351 3352 // needs to be platform dep for the 32 bit platforms. 3353 void BytecodeInterpreter::set_stack_double(intptr_t *tos, jdouble value, 3354 int offset) { 3355 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->d = value; 3356 } 3357 3358 void BytecodeInterpreter::set_stack_double_from_addr(intptr_t *tos, 3359 address addr, int offset) { 3360 (((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->d = 3361 ((VMJavaVal64*)addr)->d); 3362 } 3363 3364 void BytecodeInterpreter::set_stack_long(intptr_t *tos, jlong value, 3365 int offset) { 3366 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset+1)])->l = 0xdeedbeeb; 3367 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->l = value; 3368 } 3369 3370 void BytecodeInterpreter::set_stack_long_from_addr(intptr_t *tos, 3371 address addr, int offset) { 3372 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset+1)])->l = 0xdeedbeeb; 3373 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->l = 3374 ((VMJavaVal64*)addr)->l; 3375 } 3376 3377 // Locals 3378 3379 address BytecodeInterpreter::locals_slot(intptr_t* locals, int offset) { 3380 return (address)locals[Interpreter::local_index_at(-offset)]; 3381 } 3382 jint BytecodeInterpreter::locals_int(intptr_t* locals, int offset) { 3383 return (jint)locals[Interpreter::local_index_at(-offset)]; 3384 } 3385 jfloat BytecodeInterpreter::locals_float(intptr_t* locals, int offset) { 3386 return (jfloat)locals[Interpreter::local_index_at(-offset)]; 3387 } 3388 oop BytecodeInterpreter::locals_object(intptr_t* locals, int offset) { 3389 return (oop)locals[Interpreter::local_index_at(-offset)]; 3390 } 3391 jdouble BytecodeInterpreter::locals_double(intptr_t* locals, int offset) { 3392 return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d; 3393 } 3394 jlong BytecodeInterpreter::locals_long(intptr_t* locals, int offset) { 3395 return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l; 3396 } 3397 3398 // Returns the address of locals value. 3399 address BytecodeInterpreter::locals_long_at(intptr_t* locals, int offset) { 3400 return ((address)&locals[Interpreter::local_index_at(-(offset+1))]); 3401 } 3402 address BytecodeInterpreter::locals_double_at(intptr_t* locals, int offset) { 3403 return ((address)&locals[Interpreter::local_index_at(-(offset+1))]); 3404 } 3405 3406 // Used for local value or returnAddress 3407 void BytecodeInterpreter::set_locals_slot(intptr_t *locals, 3408 address value, int offset) { 3409 *((address*)&locals[Interpreter::local_index_at(-offset)]) = value; 3410 } 3411 void BytecodeInterpreter::set_locals_int(intptr_t *locals, 3412 jint value, int offset) { 3413 *((jint *)&locals[Interpreter::local_index_at(-offset)]) = value; 3414 } 3415 void BytecodeInterpreter::set_locals_float(intptr_t *locals, 3416 jfloat value, int offset) { 3417 *((jfloat *)&locals[Interpreter::local_index_at(-offset)]) = value; 3418 } 3419 void BytecodeInterpreter::set_locals_object(intptr_t *locals, 3420 oop value, int offset) { 3421 *((oop *)&locals[Interpreter::local_index_at(-offset)]) = value; 3422 } 3423 void BytecodeInterpreter::set_locals_double(intptr_t *locals, 3424 jdouble value, int offset) { 3425 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d = value; 3426 } 3427 void BytecodeInterpreter::set_locals_long(intptr_t *locals, 3428 jlong value, int offset) { 3429 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l = value; 3430 } 3431 void BytecodeInterpreter::set_locals_double_from_addr(intptr_t *locals, 3432 address addr, int offset) { 3433 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d = ((VMJavaVal64*)addr)->d; 3434 } 3435 void BytecodeInterpreter::set_locals_long_from_addr(intptr_t *locals, 3436 address addr, int offset) { 3437 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l = ((VMJavaVal64*)addr)->l; 3438 } 3439 3440 void BytecodeInterpreter::astore(intptr_t* tos, int stack_offset, 3441 intptr_t* locals, int locals_offset) { 3442 intptr_t value = tos[Interpreter::expr_index_at(-stack_offset)]; 3443 locals[Interpreter::local_index_at(-locals_offset)] = value; 3444 } 3445 3446 3447 void BytecodeInterpreter::copy_stack_slot(intptr_t *tos, int from_offset, 3448 int to_offset) { 3449 tos[Interpreter::expr_index_at(-to_offset)] = 3450 (intptr_t)tos[Interpreter::expr_index_at(-from_offset)]; 3451 } 3452 3453 void BytecodeInterpreter::dup(intptr_t *tos) { 3454 copy_stack_slot(tos, -1, 0); 3455 } 3456 void BytecodeInterpreter::dup2(intptr_t *tos) { 3457 copy_stack_slot(tos, -2, 0); 3458 copy_stack_slot(tos, -1, 1); 3459 } 3460 3461 void BytecodeInterpreter::dup_x1(intptr_t *tos) { 3462 /* insert top word two down */ 3463 copy_stack_slot(tos, -1, 0); 3464 copy_stack_slot(tos, -2, -1); 3465 copy_stack_slot(tos, 0, -2); 3466 } 3467 3468 void BytecodeInterpreter::dup_x2(intptr_t *tos) { 3469 /* insert top word three down */ 3470 copy_stack_slot(tos, -1, 0); 3471 copy_stack_slot(tos, -2, -1); 3472 copy_stack_slot(tos, -3, -2); 3473 copy_stack_slot(tos, 0, -3); 3474 } 3475 void BytecodeInterpreter::dup2_x1(intptr_t *tos) { 3476 /* insert top 2 slots three down */ 3477 copy_stack_slot(tos, -1, 1); 3478 copy_stack_slot(tos, -2, 0); 3479 copy_stack_slot(tos, -3, -1); 3480 copy_stack_slot(tos, 1, -2); 3481 copy_stack_slot(tos, 0, -3); 3482 } 3483 void BytecodeInterpreter::dup2_x2(intptr_t *tos) { 3484 /* insert top 2 slots four down */ 3485 copy_stack_slot(tos, -1, 1); 3486 copy_stack_slot(tos, -2, 0); 3487 copy_stack_slot(tos, -3, -1); 3488 copy_stack_slot(tos, -4, -2); 3489 copy_stack_slot(tos, 1, -3); 3490 copy_stack_slot(tos, 0, -4); 3491 } 3492 3493 3494 void BytecodeInterpreter::swap(intptr_t *tos) { 3495 // swap top two elements 3496 intptr_t val = tos[Interpreter::expr_index_at(1)]; 3497 // Copy -2 entry to -1 3498 copy_stack_slot(tos, -2, -1); 3499 // Store saved -1 entry into -2 3500 tos[Interpreter::expr_index_at(2)] = val; 3501 } 3502 // -------------------------------------------------------------------------------- 3503 // Non-product code 3504 #ifndef PRODUCT 3505 3506 const char* BytecodeInterpreter::C_msg(BytecodeInterpreter::messages msg) { 3507 switch (msg) { 3508 case BytecodeInterpreter::no_request: return("no_request"); 3509 case BytecodeInterpreter::initialize: return("initialize"); 3510 // status message to C++ interpreter 3511 case BytecodeInterpreter::method_entry: return("method_entry"); 3512 case BytecodeInterpreter::method_resume: return("method_resume"); 3513 case BytecodeInterpreter::got_monitors: return("got_monitors"); 3514 case BytecodeInterpreter::rethrow_exception: return("rethrow_exception"); 3515 // requests to frame manager from C++ interpreter 3516 case BytecodeInterpreter::call_method: return("call_method"); 3517 case BytecodeInterpreter::return_from_method: return("return_from_method"); 3518 case BytecodeInterpreter::more_monitors: return("more_monitors"); 3519 case BytecodeInterpreter::throwing_exception: return("throwing_exception"); 3520 case BytecodeInterpreter::popping_frame: return("popping_frame"); 3521 case BytecodeInterpreter::do_osr: return("do_osr"); 3522 // deopt 3523 case BytecodeInterpreter::deopt_resume: return("deopt_resume"); 3524 case BytecodeInterpreter::deopt_resume2: return("deopt_resume2"); 3525 default: return("BAD MSG"); 3526 } 3527 } 3528 void 3529 BytecodeInterpreter::print() { 3530 tty->print_cr("thread: " INTPTR_FORMAT, (uintptr_t) this->_thread); 3531 tty->print_cr("bcp: " INTPTR_FORMAT, (uintptr_t) this->_bcp); 3532 tty->print_cr("locals: " INTPTR_FORMAT, (uintptr_t) this->_locals); 3533 tty->print_cr("constants: " INTPTR_FORMAT, (uintptr_t) this->_constants); 3534 { 3535 ResourceMark rm; 3536 char *method_name = _method->name_and_sig_as_C_string(); 3537 tty->print_cr("method: " INTPTR_FORMAT "[ %s ]", (uintptr_t) this->_method, method_name); 3538 } 3539 tty->print_cr("mdx: " INTPTR_FORMAT, (uintptr_t) this->_mdx); 3540 tty->print_cr("stack: " INTPTR_FORMAT, (uintptr_t) this->_stack); 3541 tty->print_cr("msg: %s", C_msg(this->_msg)); 3542 tty->print_cr("result_to_call._callee: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee); 3543 tty->print_cr("result_to_call._callee_entry_point: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee_entry_point); 3544 tty->print_cr("result_to_call._bcp_advance: %d ", this->_result._to_call._bcp_advance); 3545 tty->print_cr("osr._osr_buf: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_buf); 3546 tty->print_cr("osr._osr_entry: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_entry); 3547 tty->print_cr("result_return_kind 0x%x ", (int) this->_result._return_kind); 3548 tty->print_cr("prev_link: " INTPTR_FORMAT, (uintptr_t) this->_prev_link); 3549 tty->print_cr("native_mirror: " INTPTR_FORMAT, (uintptr_t) this->_oop_temp); 3550 tty->print_cr("stack_base: " INTPTR_FORMAT, (uintptr_t) this->_stack_base); 3551 tty->print_cr("stack_limit: " INTPTR_FORMAT, (uintptr_t) this->_stack_limit); 3552 tty->print_cr("monitor_base: " INTPTR_FORMAT, (uintptr_t) this->_monitor_base); 3553 #ifdef SPARC 3554 tty->print_cr("last_Java_pc: " INTPTR_FORMAT, (uintptr_t) this->_last_Java_pc); 3555 tty->print_cr("frame_bottom: " INTPTR_FORMAT, (uintptr_t) this->_frame_bottom); 3556 tty->print_cr("&native_fresult: " INTPTR_FORMAT, (uintptr_t) &this->_native_fresult); 3557 tty->print_cr("native_lresult: " INTPTR_FORMAT, (uintptr_t) this->_native_lresult); 3558 #endif 3559 #if !defined(ZERO) 3560 tty->print_cr("last_Java_fp: " INTPTR_FORMAT, (uintptr_t) this->_last_Java_fp); 3561 #endif // !ZERO 3562 tty->print_cr("self_link: " INTPTR_FORMAT, (uintptr_t) this->_self_link); 3563 } 3564 3565 extern "C" { 3566 void PI(uintptr_t arg) { 3567 ((BytecodeInterpreter*)arg)->print(); 3568 } 3569 } 3570 #endif // PRODUCT 3571 3572 #endif // JVMTI 3573 #endif // CC_INTERP