1 /* 2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/javaClasses.inline.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "classfile/vmSymbols.hpp" 29 #include "code/codeCache.hpp" 30 #include "compiler/compileBroker.hpp" 31 #include "compiler/disassembler.hpp" 32 #include "gc/shared/collectedHeap.hpp" 33 #include "interpreter/interpreter.hpp" 34 #include "interpreter/interpreterRuntime.hpp" 35 #include "interpreter/linkResolver.hpp" 36 #include "interpreter/templateTable.hpp" 37 #include "logging/log.hpp" 38 #include "memory/oopFactory.hpp" 39 #include "memory/resourceArea.hpp" 40 #include "memory/universe.inline.hpp" 41 #include "oops/constantPool.hpp" 42 #include "oops/instanceKlass.hpp" 43 #include "oops/methodData.hpp" 44 #include "oops/objArrayKlass.hpp" 45 #include "oops/objArrayOop.inline.hpp" 46 #include "oops/oop.inline.hpp" 47 #include "oops/symbol.hpp" 48 #include "prims/jvmtiExport.hpp" 49 #include "prims/nativeLookup.hpp" 50 #include "runtime/atomic.hpp" 51 #include "runtime/biasedLocking.hpp" 52 #include "runtime/compilationPolicy.hpp" 53 #include "runtime/deoptimization.hpp" 54 #include "runtime/fieldDescriptor.hpp" 55 #include "runtime/handles.inline.hpp" 56 #include "runtime/icache.hpp" 57 #include "runtime/interfaceSupport.hpp" 58 #include "runtime/java.hpp" 59 #include "runtime/jfieldIDWorkaround.hpp" 60 #include "runtime/osThread.hpp" 61 #include "runtime/sharedRuntime.hpp" 62 #include "runtime/stubRoutines.hpp" 63 #include "runtime/synchronizer.hpp" 64 #include "runtime/threadCritical.hpp" 65 #include "utilities/align.hpp" 66 #include "utilities/events.hpp" 67 #ifdef COMPILER2 68 #include "opto/runtime.hpp" 69 #endif 70 71 class UnlockFlagSaver { 72 private: 73 JavaThread* _thread; 74 bool _do_not_unlock; 75 public: 76 UnlockFlagSaver(JavaThread* t) { 77 _thread = t; 78 _do_not_unlock = t->do_not_unlock_if_synchronized(); 79 t->set_do_not_unlock_if_synchronized(false); 80 } 81 ~UnlockFlagSaver() { 82 _thread->set_do_not_unlock_if_synchronized(_do_not_unlock); 83 } 84 }; 85 86 //------------------------------------------------------------------------------------------------------------------------ 87 // State accessors 88 89 void InterpreterRuntime::set_bcp_and_mdp(address bcp, JavaThread *thread) { 90 LastFrameAccessor last_frame(thread); 91 last_frame.set_bcp(bcp); 92 if (ProfileInterpreter) { 93 // ProfileTraps uses MDOs independently of ProfileInterpreter. 94 // That is why we must check both ProfileInterpreter and mdo != NULL. 95 MethodData* mdo = last_frame.method()->method_data(); 96 if (mdo != NULL) { 97 NEEDS_CLEANUP; 98 last_frame.set_mdp(mdo->bci_to_dp(last_frame.bci())); 99 } 100 } 101 } 102 103 //------------------------------------------------------------------------------------------------------------------------ 104 // Constants 105 106 107 IRT_ENTRY(void, InterpreterRuntime::ldc(JavaThread* thread, bool wide)) 108 // access constant pool 109 LastFrameAccessor last_frame(thread); 110 ConstantPool* pool = last_frame.method()->constants(); 111 int index = wide ? last_frame.get_index_u2(Bytecodes::_ldc_w) : last_frame.get_index_u1(Bytecodes::_ldc); 112 constantTag tag = pool->tag_at(index); 113 114 assert (tag.is_unresolved_klass() || tag.is_klass(), "wrong ldc call"); 115 Klass* klass = pool->klass_at(index, CHECK); 116 oop java_class = klass->java_mirror(); 117 thread->set_vm_result(java_class); 118 IRT_END 119 120 IRT_ENTRY(void, InterpreterRuntime::resolve_ldc(JavaThread* thread, Bytecodes::Code bytecode)) { 121 assert(bytecode == Bytecodes::_fast_aldc || 122 bytecode == Bytecodes::_fast_aldc_w, "wrong bc"); 123 ResourceMark rm(thread); 124 LastFrameAccessor last_frame(thread); 125 methodHandle m (thread, last_frame.method()); 126 Bytecode_loadconstant ldc(m, last_frame.bci()); 127 oop result = ldc.resolve_constant(CHECK); 128 #ifdef ASSERT 129 { 130 // The bytecode wrappers aren't GC-safe so construct a new one 131 Bytecode_loadconstant ldc2(m, last_frame.bci()); 132 oop coop = m->constants()->resolved_references()->obj_at(ldc2.cache_index()); 133 assert(result == coop, "expected result for assembly code"); 134 } 135 #endif 136 thread->set_vm_result(result); 137 } 138 IRT_END 139 140 141 //------------------------------------------------------------------------------------------------------------------------ 142 // Allocation 143 144 IRT_ENTRY(void, InterpreterRuntime::_new(JavaThread* thread, ConstantPool* pool, int index)) 145 Klass* k = pool->klass_at(index, CHECK); 146 InstanceKlass* klass = InstanceKlass::cast(k); 147 148 // Make sure we are not instantiating an abstract klass 149 klass->check_valid_for_instantiation(true, CHECK); 150 151 // Make sure klass is initialized 152 klass->initialize(CHECK); 153 154 // At this point the class may not be fully initialized 155 // because of recursive initialization. If it is fully 156 // initialized & has_finalized is not set, we rewrite 157 // it into its fast version (Note: no locking is needed 158 // here since this is an atomic byte write and can be 159 // done more than once). 160 // 161 // Note: In case of classes with has_finalized we don't 162 // rewrite since that saves us an extra check in 163 // the fast version which then would call the 164 // slow version anyway (and do a call back into 165 // Java). 166 // If we have a breakpoint, then we don't rewrite 167 // because the _breakpoint bytecode would be lost. 168 oop obj = klass->allocate_instance(CHECK); 169 thread->set_vm_result(obj); 170 IRT_END 171 172 173 IRT_ENTRY(void, InterpreterRuntime::newarray(JavaThread* thread, BasicType type, jint size)) 174 oop obj = oopFactory::new_typeArray(type, size, CHECK); 175 thread->set_vm_result(obj); 176 IRT_END 177 178 179 IRT_ENTRY(void, InterpreterRuntime::anewarray(JavaThread* thread, ConstantPool* pool, int index, jint size)) 180 Klass* klass = pool->klass_at(index, CHECK); 181 objArrayOop obj = oopFactory::new_objArray(klass, size, CHECK); 182 thread->set_vm_result(obj); 183 IRT_END 184 185 186 IRT_ENTRY(void, InterpreterRuntime::multianewarray(JavaThread* thread, jint* first_size_address)) 187 // We may want to pass in more arguments - could make this slightly faster 188 LastFrameAccessor last_frame(thread); 189 ConstantPool* constants = last_frame.method()->constants(); 190 int i = last_frame.get_index_u2(Bytecodes::_multianewarray); 191 Klass* klass = constants->klass_at(i, CHECK); 192 int nof_dims = last_frame.number_of_dimensions(); 193 assert(klass->is_klass(), "not a class"); 194 assert(nof_dims >= 1, "multianewarray rank must be nonzero"); 195 196 // We must create an array of jints to pass to multi_allocate. 197 ResourceMark rm(thread); 198 const int small_dims = 10; 199 jint dim_array[small_dims]; 200 jint *dims = &dim_array[0]; 201 if (nof_dims > small_dims) { 202 dims = (jint*) NEW_RESOURCE_ARRAY(jint, nof_dims); 203 } 204 for (int index = 0; index < nof_dims; index++) { 205 // offset from first_size_address is addressed as local[index] 206 int n = Interpreter::local_offset_in_bytes(index)/jintSize; 207 dims[index] = first_size_address[n]; 208 } 209 oop obj = ArrayKlass::cast(klass)->multi_allocate(nof_dims, dims, CHECK); 210 thread->set_vm_result(obj); 211 IRT_END 212 213 214 IRT_ENTRY(void, InterpreterRuntime::register_finalizer(JavaThread* thread, oopDesc* obj)) 215 assert(oopDesc::is_oop(obj), "must be a valid oop"); 216 assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise"); 217 InstanceKlass::register_finalizer(instanceOop(obj), CHECK); 218 IRT_END 219 220 221 // Quicken instance-of and check-cast bytecodes 222 IRT_ENTRY(void, InterpreterRuntime::quicken_io_cc(JavaThread* thread)) 223 // Force resolving; quicken the bytecode 224 LastFrameAccessor last_frame(thread); 225 int which = last_frame.get_index_u2(Bytecodes::_checkcast); 226 ConstantPool* cpool = last_frame.method()->constants(); 227 // We'd expect to assert that we're only here to quicken bytecodes, but in a multithreaded 228 // program we might have seen an unquick'd bytecode in the interpreter but have another 229 // thread quicken the bytecode before we get here. 230 // assert( cpool->tag_at(which).is_unresolved_klass(), "should only come here to quicken bytecodes" ); 231 Klass* klass = cpool->klass_at(which, CHECK); 232 thread->set_vm_result_2(klass); 233 IRT_END 234 235 236 //------------------------------------------------------------------------------------------------------------------------ 237 // Exceptions 238 239 void InterpreterRuntime::note_trap_inner(JavaThread* thread, int reason, 240 const methodHandle& trap_method, int trap_bci, TRAPS) { 241 if (trap_method.not_null()) { 242 MethodData* trap_mdo = trap_method->method_data(); 243 if (trap_mdo == NULL) { 244 Method::build_interpreter_method_data(trap_method, THREAD); 245 if (HAS_PENDING_EXCEPTION) { 246 assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), 247 "we expect only an OOM error here"); 248 CLEAR_PENDING_EXCEPTION; 249 } 250 trap_mdo = trap_method->method_data(); 251 // and fall through... 252 } 253 if (trap_mdo != NULL) { 254 // Update per-method count of trap events. The interpreter 255 // is updating the MDO to simulate the effect of compiler traps. 256 Deoptimization::update_method_data_from_interpreter(trap_mdo, trap_bci, reason); 257 } 258 } 259 } 260 261 // Assume the compiler is (or will be) interested in this event. 262 // If necessary, create an MDO to hold the information, and record it. 263 void InterpreterRuntime::note_trap(JavaThread* thread, int reason, TRAPS) { 264 assert(ProfileTraps, "call me only if profiling"); 265 LastFrameAccessor last_frame(thread); 266 methodHandle trap_method(thread, last_frame.method()); 267 int trap_bci = trap_method->bci_from(last_frame.bcp()); 268 note_trap_inner(thread, reason, trap_method, trap_bci, THREAD); 269 } 270 271 #ifdef CC_INTERP 272 // As legacy note_trap, but we have more arguments. 273 IRT_ENTRY(void, InterpreterRuntime::note_trap(JavaThread* thread, int reason, Method *method, int trap_bci)) 274 methodHandle trap_method(method); 275 note_trap_inner(thread, reason, trap_method, trap_bci, THREAD); 276 IRT_END 277 278 // Class Deoptimization is not visible in BytecodeInterpreter, so we need a wrapper 279 // for each exception. 280 void InterpreterRuntime::note_nullCheck_trap(JavaThread* thread, Method *method, int trap_bci) 281 { if (ProfileTraps) note_trap(thread, Deoptimization::Reason_null_check, method, trap_bci); } 282 void InterpreterRuntime::note_div0Check_trap(JavaThread* thread, Method *method, int trap_bci) 283 { if (ProfileTraps) note_trap(thread, Deoptimization::Reason_div0_check, method, trap_bci); } 284 void InterpreterRuntime::note_rangeCheck_trap(JavaThread* thread, Method *method, int trap_bci) 285 { if (ProfileTraps) note_trap(thread, Deoptimization::Reason_range_check, method, trap_bci); } 286 void InterpreterRuntime::note_classCheck_trap(JavaThread* thread, Method *method, int trap_bci) 287 { if (ProfileTraps) note_trap(thread, Deoptimization::Reason_class_check, method, trap_bci); } 288 void InterpreterRuntime::note_arrayCheck_trap(JavaThread* thread, Method *method, int trap_bci) 289 { if (ProfileTraps) note_trap(thread, Deoptimization::Reason_array_check, method, trap_bci); } 290 #endif // CC_INTERP 291 292 293 static Handle get_preinitialized_exception(Klass* k, TRAPS) { 294 // get klass 295 InstanceKlass* klass = InstanceKlass::cast(k); 296 assert(klass->is_initialized(), 297 "this klass should have been initialized during VM initialization"); 298 // create instance - do not call constructor since we may have no 299 // (java) stack space left (should assert constructor is empty) 300 Handle exception; 301 oop exception_oop = klass->allocate_instance(CHECK_(exception)); 302 exception = Handle(THREAD, exception_oop); 303 if (StackTraceInThrowable) { 304 java_lang_Throwable::fill_in_stack_trace(exception); 305 } 306 return exception; 307 } 308 309 // Special handling for stack overflow: since we don't have any (java) stack 310 // space left we use the pre-allocated & pre-initialized StackOverflowError 311 // klass to create an stack overflow error instance. We do not call its 312 // constructor for the same reason (it is empty, anyway). 313 IRT_ENTRY(void, InterpreterRuntime::throw_StackOverflowError(JavaThread* thread)) 314 Handle exception = get_preinitialized_exception( 315 SystemDictionary::StackOverflowError_klass(), 316 CHECK); 317 // Increment counter for hs_err file reporting 318 Atomic::inc(&Exceptions::_stack_overflow_errors); 319 THROW_HANDLE(exception); 320 IRT_END 321 322 IRT_ENTRY(void, InterpreterRuntime::throw_delayed_StackOverflowError(JavaThread* thread)) 323 Handle exception = get_preinitialized_exception( 324 SystemDictionary::StackOverflowError_klass(), 325 CHECK); 326 java_lang_Throwable::set_message(exception(), 327 Universe::delayed_stack_overflow_error_message()); 328 // Increment counter for hs_err file reporting 329 Atomic::inc(&Exceptions::_stack_overflow_errors); 330 THROW_HANDLE(exception); 331 IRT_END 332 333 IRT_ENTRY(void, InterpreterRuntime::create_exception(JavaThread* thread, char* name, char* message)) 334 // lookup exception klass 335 TempNewSymbol s = SymbolTable::new_symbol(name, CHECK); 336 if (ProfileTraps) { 337 if (s == vmSymbols::java_lang_ArithmeticException()) { 338 note_trap(thread, Deoptimization::Reason_div0_check, CHECK); 339 } else if (s == vmSymbols::java_lang_NullPointerException()) { 340 note_trap(thread, Deoptimization::Reason_null_check, CHECK); 341 } 342 } 343 // create exception 344 Handle exception = Exceptions::new_exception(thread, s, message); 345 thread->set_vm_result(exception()); 346 IRT_END 347 348 349 IRT_ENTRY(void, InterpreterRuntime::create_klass_exception(JavaThread* thread, char* name, oopDesc* obj)) 350 ResourceMark rm(thread); 351 const char* klass_name = obj->klass()->external_name(); 352 // lookup exception klass 353 TempNewSymbol s = SymbolTable::new_symbol(name, CHECK); 354 if (ProfileTraps) { 355 note_trap(thread, Deoptimization::Reason_class_check, CHECK); 356 } 357 // create exception, with klass name as detail message 358 Handle exception = Exceptions::new_exception(thread, s, klass_name); 359 thread->set_vm_result(exception()); 360 IRT_END 361 362 363 IRT_ENTRY(void, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException(JavaThread* thread, char* name, jint index)) 364 char message[jintAsStringSize]; 365 // lookup exception klass 366 TempNewSymbol s = SymbolTable::new_symbol(name, CHECK); 367 if (ProfileTraps) { 368 note_trap(thread, Deoptimization::Reason_range_check, CHECK); 369 } 370 // create exception 371 sprintf(message, "%d", index); 372 THROW_MSG(s, message); 373 IRT_END 374 375 IRT_ENTRY(void, InterpreterRuntime::throw_ClassCastException( 376 JavaThread* thread, oopDesc* obj)) 377 378 ResourceMark rm(thread); 379 char* message = SharedRuntime::generate_class_cast_message( 380 thread, obj->klass()); 381 382 if (ProfileTraps) { 383 note_trap(thread, Deoptimization::Reason_class_check, CHECK); 384 } 385 386 // create exception 387 THROW_MSG(vmSymbols::java_lang_ClassCastException(), message); 388 IRT_END 389 390 // exception_handler_for_exception(...) returns the continuation address, 391 // the exception oop (via TLS) and sets the bci/bcp for the continuation. 392 // The exception oop is returned to make sure it is preserved over GC (it 393 // is only on the stack if the exception was thrown explicitly via athrow). 394 // During this operation, the expression stack contains the values for the 395 // bci where the exception happened. If the exception was propagated back 396 // from a call, the expression stack contains the values for the bci at the 397 // invoke w/o arguments (i.e., as if one were inside the call). 398 IRT_ENTRY(address, InterpreterRuntime::exception_handler_for_exception(JavaThread* thread, oopDesc* exception)) 399 400 LastFrameAccessor last_frame(thread); 401 Handle h_exception(thread, exception); 402 methodHandle h_method (thread, last_frame.method()); 403 constantPoolHandle h_constants(thread, h_method->constants()); 404 bool should_repeat; 405 int handler_bci; 406 int current_bci = last_frame.bci(); 407 408 if (thread->frames_to_pop_failed_realloc() > 0) { 409 // Allocation of scalar replaced object used in this frame 410 // failed. Unconditionally pop the frame. 411 thread->dec_frames_to_pop_failed_realloc(); 412 thread->set_vm_result(h_exception()); 413 // If the method is synchronized we already unlocked the monitor 414 // during deoptimization so the interpreter needs to skip it when 415 // the frame is popped. 416 thread->set_do_not_unlock_if_synchronized(true); 417 #ifdef CC_INTERP 418 return (address) -1; 419 #else 420 return Interpreter::remove_activation_entry(); 421 #endif 422 } 423 424 // Need to do this check first since when _do_not_unlock_if_synchronized 425 // is set, we don't want to trigger any classloading which may make calls 426 // into java, or surprisingly find a matching exception handler for bci 0 427 // since at this moment the method hasn't been "officially" entered yet. 428 if (thread->do_not_unlock_if_synchronized()) { 429 ResourceMark rm; 430 assert(current_bci == 0, "bci isn't zero for do_not_unlock_if_synchronized"); 431 thread->set_vm_result(exception); 432 #ifdef CC_INTERP 433 return (address) -1; 434 #else 435 return Interpreter::remove_activation_entry(); 436 #endif 437 } 438 439 do { 440 should_repeat = false; 441 442 // assertions 443 #ifdef ASSERT 444 assert(h_exception.not_null(), "NULL exceptions should be handled by athrow"); 445 // Check that exception is a subclass of Throwable, otherwise we have a VerifyError 446 if (!(h_exception->is_a(SystemDictionary::Throwable_klass()))) { 447 if (ExitVMOnVerifyError) vm_exit(-1); 448 ShouldNotReachHere(); 449 } 450 #endif 451 452 // tracing 453 if (log_is_enabled(Info, exceptions)) { 454 ResourceMark rm(thread); 455 stringStream tempst; 456 tempst.print("interpreter method <%s>\n" 457 " at bci %d for thread " INTPTR_FORMAT, 458 h_method->print_value_string(), current_bci, p2i(thread)); 459 Exceptions::log_exception(h_exception, tempst); 460 } 461 // Don't go paging in something which won't be used. 462 // else if (extable->length() == 0) { 463 // // disabled for now - interpreter is not using shortcut yet 464 // // (shortcut is not to call runtime if we have no exception handlers) 465 // // warning("performance bug: should not call runtime if method has no exception handlers"); 466 // } 467 // for AbortVMOnException flag 468 Exceptions::debug_check_abort(h_exception); 469 470 // exception handler lookup 471 Klass* klass = h_exception->klass(); 472 handler_bci = Method::fast_exception_handler_bci_for(h_method, klass, current_bci, THREAD); 473 if (HAS_PENDING_EXCEPTION) { 474 // We threw an exception while trying to find the exception handler. 475 // Transfer the new exception to the exception handle which will 476 // be set into thread local storage, and do another lookup for an 477 // exception handler for this exception, this time starting at the 478 // BCI of the exception handler which caused the exception to be 479 // thrown (bug 4307310). 480 h_exception = Handle(THREAD, PENDING_EXCEPTION); 481 CLEAR_PENDING_EXCEPTION; 482 if (handler_bci >= 0) { 483 current_bci = handler_bci; 484 should_repeat = true; 485 } 486 } 487 } while (should_repeat == true); 488 489 #if INCLUDE_JVMCI 490 if (EnableJVMCI && h_method->method_data() != NULL) { 491 ResourceMark rm(thread); 492 ProfileData* pdata = h_method->method_data()->allocate_bci_to_data(current_bci, NULL); 493 if (pdata != NULL && pdata->is_BitData()) { 494 BitData* bit_data = (BitData*) pdata; 495 bit_data->set_exception_seen(); 496 } 497 } 498 #endif 499 500 // notify JVMTI of an exception throw; JVMTI will detect if this is a first 501 // time throw or a stack unwinding throw and accordingly notify the debugger 502 if (JvmtiExport::can_post_on_exceptions()) { 503 JvmtiExport::post_exception_throw(thread, h_method(), last_frame.bcp(), h_exception()); 504 } 505 506 #ifdef CC_INTERP 507 address continuation = (address)(intptr_t) handler_bci; 508 #else 509 address continuation = NULL; 510 #endif 511 address handler_pc = NULL; 512 if (handler_bci < 0 || !thread->reguard_stack((address) &continuation)) { 513 // Forward exception to callee (leaving bci/bcp untouched) because (a) no 514 // handler in this method, or (b) after a stack overflow there is not yet 515 // enough stack space available to reprotect the stack. 516 #ifndef CC_INTERP 517 continuation = Interpreter::remove_activation_entry(); 518 #endif 519 #if COMPILER2_OR_JVMCI 520 // Count this for compilation purposes 521 h_method->interpreter_throwout_increment(THREAD); 522 #endif 523 } else { 524 // handler in this method => change bci/bcp to handler bci/bcp and continue there 525 handler_pc = h_method->code_base() + handler_bci; 526 #ifndef CC_INTERP 527 set_bcp_and_mdp(handler_pc, thread); 528 continuation = Interpreter::dispatch_table(vtos)[*handler_pc]; 529 #endif 530 } 531 // notify debugger of an exception catch 532 // (this is good for exceptions caught in native methods as well) 533 if (JvmtiExport::can_post_on_exceptions()) { 534 JvmtiExport::notice_unwind_due_to_exception(thread, h_method(), handler_pc, h_exception(), (handler_pc != NULL)); 535 } 536 537 thread->set_vm_result(h_exception()); 538 return continuation; 539 IRT_END 540 541 542 IRT_ENTRY(void, InterpreterRuntime::throw_pending_exception(JavaThread* thread)) 543 assert(thread->has_pending_exception(), "must only ne called if there's an exception pending"); 544 // nothing to do - eventually we should remove this code entirely (see comments @ call sites) 545 IRT_END 546 547 548 IRT_ENTRY(void, InterpreterRuntime::throw_AbstractMethodError(JavaThread* thread)) 549 THROW(vmSymbols::java_lang_AbstractMethodError()); 550 IRT_END 551 552 553 IRT_ENTRY(void, InterpreterRuntime::throw_IncompatibleClassChangeError(JavaThread* thread)) 554 THROW(vmSymbols::java_lang_IncompatibleClassChangeError()); 555 IRT_END 556 557 558 //------------------------------------------------------------------------------------------------------------------------ 559 // Fields 560 // 561 562 void InterpreterRuntime::resolve_get_put(JavaThread* thread, Bytecodes::Code bytecode) { 563 Thread* THREAD = thread; 564 // resolve field 565 fieldDescriptor info; 566 LastFrameAccessor last_frame(thread); 567 constantPoolHandle pool(thread, last_frame.method()->constants()); 568 methodHandle m(thread, last_frame.method()); 569 bool is_put = (bytecode == Bytecodes::_putfield || bytecode == Bytecodes::_nofast_putfield || 570 bytecode == Bytecodes::_putstatic); 571 bool is_static = (bytecode == Bytecodes::_getstatic || bytecode == Bytecodes::_putstatic); 572 573 { 574 JvmtiHideSingleStepping jhss(thread); 575 LinkResolver::resolve_field_access(info, pool, last_frame.get_index_u2_cpcache(bytecode), 576 m, bytecode, CHECK); 577 } // end JvmtiHideSingleStepping 578 579 // check if link resolution caused cpCache to be updated 580 ConstantPoolCacheEntry* cp_cache_entry = last_frame.cache_entry(); 581 if (cp_cache_entry->is_resolved(bytecode)) return; 582 583 // compute auxiliary field attributes 584 TosState state = as_TosState(info.field_type()); 585 586 // Resolution of put instructions on final fields is delayed. That is required so that 587 // exceptions are thrown at the correct place (when the instruction is actually invoked). 588 // If we do not resolve an instruction in the current pass, leaving the put_code 589 // set to zero will cause the next put instruction to the same field to reresolve. 590 591 // Resolution of put instructions to final instance fields with invalid updates (i.e., 592 // to final instance fields with updates originating from a method different than <init>) 593 // is inhibited. A putfield instruction targeting an instance final field must throw 594 // an IllegalAccessError if the instruction is not in an instance 595 // initializer method <init>. If resolution were not inhibited, a putfield 596 // in an initializer method could be resolved in the initializer. Subsequent 597 // putfield instructions to the same field would then use cached information. 598 // As a result, those instructions would not pass through the VM. That is, 599 // checks in resolve_field_access() would not be executed for those instructions 600 // and the required IllegalAccessError would not be thrown. 601 // 602 // Also, we need to delay resolving getstatic and putstatic instructions until the 603 // class is initialized. This is required so that access to the static 604 // field will call the initialization function every time until the class 605 // is completely initialized ala. in 2.17.5 in JVM Specification. 606 InstanceKlass* klass = InstanceKlass::cast(info.field_holder()); 607 bool uninitialized_static = is_static && !klass->is_initialized(); 608 bool has_initialized_final_update = info.field_holder()->major_version() >= 53 && 609 info.has_initialized_final_update(); 610 assert(!(has_initialized_final_update && !info.access_flags().is_final()), "Fields with initialized final updates must be final"); 611 612 Bytecodes::Code get_code = (Bytecodes::Code)0; 613 Bytecodes::Code put_code = (Bytecodes::Code)0; 614 if (!uninitialized_static) { 615 get_code = ((is_static) ? Bytecodes::_getstatic : Bytecodes::_getfield); 616 if ((is_put && !has_initialized_final_update) || !info.access_flags().is_final()) { 617 put_code = ((is_static) ? Bytecodes::_putstatic : Bytecodes::_putfield); 618 } 619 } 620 621 cp_cache_entry->set_field( 622 get_code, 623 put_code, 624 info.field_holder(), 625 info.index(), 626 info.offset(), 627 state, 628 info.access_flags().is_final(), 629 info.access_flags().is_volatile(), 630 pool->pool_holder() 631 ); 632 } 633 634 635 //------------------------------------------------------------------------------------------------------------------------ 636 // Synchronization 637 // 638 // The interpreter's synchronization code is factored out so that it can 639 // be shared by method invocation and synchronized blocks. 640 //%note synchronization_3 641 642 //%note monitor_1 643 IRT_ENTRY_NO_ASYNC(void, InterpreterRuntime::monitorenter(JavaThread* thread, BasicObjectLock* elem)) 644 #ifdef ASSERT 645 thread->last_frame().interpreter_frame_verify_monitor(elem); 646 #endif 647 if (PrintBiasedLockingStatistics) { 648 Atomic::inc(BiasedLocking::slow_path_entry_count_addr()); 649 } 650 Handle h_obj(thread, elem->obj()); 651 assert(Universe::heap()->is_in_reserved_or_null(h_obj()), 652 "must be NULL or an object"); 653 if (UseBiasedLocking) { 654 // Retry fast entry if bias is revoked to avoid unnecessary inflation 655 ObjectSynchronizer::fast_enter(h_obj, elem->lock(), true, CHECK); 656 } else { 657 ObjectSynchronizer::slow_enter(h_obj, elem->lock(), CHECK); 658 } 659 assert(Universe::heap()->is_in_reserved_or_null(elem->obj()), 660 "must be NULL or an object"); 661 #ifdef ASSERT 662 thread->last_frame().interpreter_frame_verify_monitor(elem); 663 #endif 664 IRT_END 665 666 667 //%note monitor_1 668 IRT_ENTRY_NO_ASYNC(void, InterpreterRuntime::monitorexit(JavaThread* thread, BasicObjectLock* elem)) 669 #ifdef ASSERT 670 thread->last_frame().interpreter_frame_verify_monitor(elem); 671 #endif 672 Handle h_obj(thread, elem->obj()); 673 assert(Universe::heap()->is_in_reserved_or_null(h_obj()), 674 "must be NULL or an object"); 675 if (elem == NULL || h_obj()->is_unlocked()) { 676 THROW(vmSymbols::java_lang_IllegalMonitorStateException()); 677 } 678 ObjectSynchronizer::slow_exit(h_obj(), elem->lock(), thread); 679 // Free entry. This must be done here, since a pending exception might be installed on 680 // exit. If it is not cleared, the exception handling code will try to unlock the monitor again. 681 elem->set_obj(NULL); 682 #ifdef ASSERT 683 thread->last_frame().interpreter_frame_verify_monitor(elem); 684 #endif 685 IRT_END 686 687 688 IRT_ENTRY(void, InterpreterRuntime::throw_illegal_monitor_state_exception(JavaThread* thread)) 689 THROW(vmSymbols::java_lang_IllegalMonitorStateException()); 690 IRT_END 691 692 693 IRT_ENTRY(void, InterpreterRuntime::new_illegal_monitor_state_exception(JavaThread* thread)) 694 // Returns an illegal exception to install into the current thread. The 695 // pending_exception flag is cleared so normal exception handling does not 696 // trigger. Any current installed exception will be overwritten. This 697 // method will be called during an exception unwind. 698 699 assert(!HAS_PENDING_EXCEPTION, "no pending exception"); 700 Handle exception(thread, thread->vm_result()); 701 assert(exception() != NULL, "vm result should be set"); 702 thread->set_vm_result(NULL); // clear vm result before continuing (may cause memory leaks and assert failures) 703 if (!exception->is_a(SystemDictionary::ThreadDeath_klass())) { 704 exception = get_preinitialized_exception( 705 SystemDictionary::IllegalMonitorStateException_klass(), 706 CATCH); 707 } 708 thread->set_vm_result(exception()); 709 IRT_END 710 711 712 //------------------------------------------------------------------------------------------------------------------------ 713 // Invokes 714 715 IRT_ENTRY(Bytecodes::Code, InterpreterRuntime::get_original_bytecode_at(JavaThread* thread, Method* method, address bcp)) 716 return method->orig_bytecode_at(method->bci_from(bcp)); 717 IRT_END 718 719 IRT_ENTRY(void, InterpreterRuntime::set_original_bytecode_at(JavaThread* thread, Method* method, address bcp, Bytecodes::Code new_code)) 720 method->set_orig_bytecode_at(method->bci_from(bcp), new_code); 721 IRT_END 722 723 IRT_ENTRY(void, InterpreterRuntime::_breakpoint(JavaThread* thread, Method* method, address bcp)) 724 JvmtiExport::post_raw_breakpoint(thread, method, bcp); 725 IRT_END 726 727 void InterpreterRuntime::resolve_invoke(JavaThread* thread, Bytecodes::Code bytecode) { 728 Thread* THREAD = thread; 729 LastFrameAccessor last_frame(thread); 730 // extract receiver from the outgoing argument list if necessary 731 Handle receiver(thread, NULL); 732 if (bytecode == Bytecodes::_invokevirtual || bytecode == Bytecodes::_invokeinterface || 733 bytecode == Bytecodes::_invokespecial) { 734 ResourceMark rm(thread); 735 methodHandle m (thread, last_frame.method()); 736 Bytecode_invoke call(m, last_frame.bci()); 737 Symbol* signature = call.signature(); 738 receiver = Handle(thread, last_frame.callee_receiver(signature)); 739 740 assert(Universe::heap()->is_in_reserved_or_null(receiver()), 741 "sanity check"); 742 assert(receiver.is_null() || 743 !Universe::heap()->is_in_reserved(receiver->klass()), 744 "sanity check"); 745 } 746 747 // resolve method 748 CallInfo info; 749 constantPoolHandle pool(thread, last_frame.method()->constants()); 750 751 { 752 JvmtiHideSingleStepping jhss(thread); 753 LinkResolver::resolve_invoke(info, receiver, pool, 754 last_frame.get_index_u2_cpcache(bytecode), bytecode, 755 CHECK); 756 if (JvmtiExport::can_hotswap_or_post_breakpoint()) { 757 int retry_count = 0; 758 while (info.resolved_method()->is_old()) { 759 // It is very unlikely that method is redefined more than 100 times 760 // in the middle of resolve. If it is looping here more than 100 times 761 // means then there could be a bug here. 762 guarantee((retry_count++ < 100), 763 "Could not resolve to latest version of redefined method"); 764 // method is redefined in the middle of resolve so re-try. 765 LinkResolver::resolve_invoke(info, receiver, pool, 766 last_frame.get_index_u2_cpcache(bytecode), bytecode, 767 CHECK); 768 } 769 } 770 } // end JvmtiHideSingleStepping 771 772 // check if link resolution caused cpCache to be updated 773 ConstantPoolCacheEntry* cp_cache_entry = last_frame.cache_entry(); 774 if (cp_cache_entry->is_resolved(bytecode)) return; 775 776 #ifdef ASSERT 777 if (bytecode == Bytecodes::_invokeinterface) { 778 if (info.resolved_method()->method_holder() == 779 SystemDictionary::Object_klass()) { 780 // NOTE: THIS IS A FIX FOR A CORNER CASE in the JVM spec 781 // (see also CallInfo::set_interface for details) 782 assert(info.call_kind() == CallInfo::vtable_call || 783 info.call_kind() == CallInfo::direct_call, ""); 784 methodHandle rm = info.resolved_method(); 785 assert(rm->is_final() || info.has_vtable_index(), 786 "should have been set already"); 787 } else if (!info.resolved_method()->has_itable_index()) { 788 // Resolved something like CharSequence.toString. Use vtable not itable. 789 assert(info.call_kind() != CallInfo::itable_call, ""); 790 } else { 791 // Setup itable entry 792 assert(info.call_kind() == CallInfo::itable_call, ""); 793 int index = info.resolved_method()->itable_index(); 794 assert(info.itable_index() == index, ""); 795 } 796 } else if (bytecode == Bytecodes::_invokespecial) { 797 assert(info.call_kind() == CallInfo::direct_call, "must be direct call"); 798 } else { 799 assert(info.call_kind() == CallInfo::direct_call || 800 info.call_kind() == CallInfo::vtable_call, ""); 801 } 802 #endif 803 // Get sender or sender's host_klass, and only set cpCache entry to resolved if 804 // it is not an interface. The receiver for invokespecial calls within interface 805 // methods must be checked for every call. 806 InstanceKlass* sender = pool->pool_holder(); 807 sender = sender->is_anonymous() ? sender->host_klass() : sender; 808 809 switch (info.call_kind()) { 810 case CallInfo::direct_call: 811 cp_cache_entry->set_direct_call( 812 bytecode, 813 info.resolved_method(), 814 sender->is_interface()); 815 break; 816 case CallInfo::vtable_call: 817 cp_cache_entry->set_vtable_call( 818 bytecode, 819 info.resolved_method(), 820 info.vtable_index()); 821 break; 822 case CallInfo::itable_call: 823 cp_cache_entry->set_itable_call( 824 bytecode, 825 info.resolved_method(), 826 info.itable_index()); 827 break; 828 default: ShouldNotReachHere(); 829 } 830 } 831 832 833 // First time execution: Resolve symbols, create a permanent MethodType object. 834 void InterpreterRuntime::resolve_invokehandle(JavaThread* thread) { 835 Thread* THREAD = thread; 836 const Bytecodes::Code bytecode = Bytecodes::_invokehandle; 837 LastFrameAccessor last_frame(thread); 838 839 // resolve method 840 CallInfo info; 841 constantPoolHandle pool(thread, last_frame.method()->constants()); 842 { 843 JvmtiHideSingleStepping jhss(thread); 844 LinkResolver::resolve_invoke(info, Handle(), pool, 845 last_frame.get_index_u2_cpcache(bytecode), bytecode, 846 CHECK); 847 } // end JvmtiHideSingleStepping 848 849 ConstantPoolCacheEntry* cp_cache_entry = last_frame.cache_entry(); 850 cp_cache_entry->set_method_handle(pool, info); 851 } 852 853 // First time execution: Resolve symbols, create a permanent CallSite object. 854 void InterpreterRuntime::resolve_invokedynamic(JavaThread* thread) { 855 Thread* THREAD = thread; 856 LastFrameAccessor last_frame(thread); 857 const Bytecodes::Code bytecode = Bytecodes::_invokedynamic; 858 859 //TO DO: consider passing BCI to Java. 860 // int caller_bci = last_frame.method()->bci_from(last_frame.bcp()); 861 862 // resolve method 863 CallInfo info; 864 constantPoolHandle pool(thread, last_frame.method()->constants()); 865 int index = last_frame.get_index_u4(bytecode); 866 { 867 JvmtiHideSingleStepping jhss(thread); 868 LinkResolver::resolve_invoke(info, Handle(), pool, 869 index, bytecode, CHECK); 870 } // end JvmtiHideSingleStepping 871 872 ConstantPoolCacheEntry* cp_cache_entry = pool->invokedynamic_cp_cache_entry_at(index); 873 cp_cache_entry->set_dynamic_call(pool, info); 874 } 875 876 // This function is the interface to the assembly code. It returns the resolved 877 // cpCache entry. This doesn't safepoint, but the helper routines safepoint. 878 // This function will check for redefinition! 879 IRT_ENTRY(void, InterpreterRuntime::resolve_from_cache(JavaThread* thread, Bytecodes::Code bytecode)) { 880 switch (bytecode) { 881 case Bytecodes::_getstatic: 882 case Bytecodes::_putstatic: 883 case Bytecodes::_getfield: 884 case Bytecodes::_putfield: 885 resolve_get_put(thread, bytecode); 886 break; 887 case Bytecodes::_invokevirtual: 888 case Bytecodes::_invokespecial: 889 case Bytecodes::_invokestatic: 890 case Bytecodes::_invokeinterface: 891 resolve_invoke(thread, bytecode); 892 break; 893 case Bytecodes::_invokehandle: 894 resolve_invokehandle(thread); 895 break; 896 case Bytecodes::_invokedynamic: 897 resolve_invokedynamic(thread); 898 break; 899 default: 900 fatal("unexpected bytecode: %s", Bytecodes::name(bytecode)); 901 break; 902 } 903 } 904 IRT_END 905 906 //------------------------------------------------------------------------------------------------------------------------ 907 // Miscellaneous 908 909 910 nmethod* InterpreterRuntime::frequency_counter_overflow(JavaThread* thread, address branch_bcp) { 911 nmethod* nm = frequency_counter_overflow_inner(thread, branch_bcp); 912 assert(branch_bcp != NULL || nm == NULL, "always returns null for non OSR requests"); 913 if (branch_bcp != NULL && nm != NULL) { 914 // This was a successful request for an OSR nmethod. Because 915 // frequency_counter_overflow_inner ends with a safepoint check, 916 // nm could have been unloaded so look it up again. It's unsafe 917 // to examine nm directly since it might have been freed and used 918 // for something else. 919 LastFrameAccessor last_frame(thread); 920 Method* method = last_frame.method(); 921 int bci = method->bci_from(last_frame.bcp()); 922 nm = method->lookup_osr_nmethod_for(bci, CompLevel_none, false); 923 } 924 #ifndef PRODUCT 925 if (TraceOnStackReplacement) { 926 if (nm != NULL) { 927 tty->print("OSR entry @ pc: " INTPTR_FORMAT ": ", p2i(nm->osr_entry())); 928 nm->print(); 929 } 930 } 931 #endif 932 return nm; 933 } 934 935 IRT_ENTRY(nmethod*, 936 InterpreterRuntime::frequency_counter_overflow_inner(JavaThread* thread, address branch_bcp)) 937 // use UnlockFlagSaver to clear and restore the _do_not_unlock_if_synchronized 938 // flag, in case this method triggers classloading which will call into Java. 939 UnlockFlagSaver fs(thread); 940 941 LastFrameAccessor last_frame(thread); 942 assert(last_frame.is_interpreted_frame(), "must come from interpreter"); 943 methodHandle method(thread, last_frame.method()); 944 const int branch_bci = branch_bcp != NULL ? method->bci_from(branch_bcp) : InvocationEntryBci; 945 const int bci = branch_bcp != NULL ? method->bci_from(last_frame.bcp()) : InvocationEntryBci; 946 947 assert(!HAS_PENDING_EXCEPTION, "Should not have any exceptions pending"); 948 nmethod* osr_nm = CompilationPolicy::policy()->event(method, method, branch_bci, bci, CompLevel_none, NULL, thread); 949 assert(!HAS_PENDING_EXCEPTION, "Event handler should not throw any exceptions"); 950 951 if (osr_nm != NULL) { 952 // We may need to do on-stack replacement which requires that no 953 // monitors in the activation are biased because their 954 // BasicObjectLocks will need to migrate during OSR. Force 955 // unbiasing of all monitors in the activation now (even though 956 // the OSR nmethod might be invalidated) because we don't have a 957 // safepoint opportunity later once the migration begins. 958 if (UseBiasedLocking) { 959 ResourceMark rm; 960 GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>(); 961 for( BasicObjectLock *kptr = last_frame.monitor_end(); 962 kptr < last_frame.monitor_begin(); 963 kptr = last_frame.next_monitor(kptr) ) { 964 if( kptr->obj() != NULL ) { 965 objects_to_revoke->append(Handle(THREAD, kptr->obj())); 966 } 967 } 968 BiasedLocking::revoke(objects_to_revoke); 969 } 970 } 971 return osr_nm; 972 IRT_END 973 974 IRT_LEAF(jint, InterpreterRuntime::bcp_to_di(Method* method, address cur_bcp)) 975 assert(ProfileInterpreter, "must be profiling interpreter"); 976 int bci = method->bci_from(cur_bcp); 977 MethodData* mdo = method->method_data(); 978 if (mdo == NULL) return 0; 979 return mdo->bci_to_di(bci); 980 IRT_END 981 982 IRT_ENTRY(void, InterpreterRuntime::profile_method(JavaThread* thread)) 983 // use UnlockFlagSaver to clear and restore the _do_not_unlock_if_synchronized 984 // flag, in case this method triggers classloading which will call into Java. 985 UnlockFlagSaver fs(thread); 986 987 assert(ProfileInterpreter, "must be profiling interpreter"); 988 LastFrameAccessor last_frame(thread); 989 assert(last_frame.is_interpreted_frame(), "must come from interpreter"); 990 methodHandle method(thread, last_frame.method()); 991 Method::build_interpreter_method_data(method, THREAD); 992 if (HAS_PENDING_EXCEPTION) { 993 assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here"); 994 CLEAR_PENDING_EXCEPTION; 995 // and fall through... 996 } 997 IRT_END 998 999 1000 #ifdef ASSERT 1001 IRT_LEAF(void, InterpreterRuntime::verify_mdp(Method* method, address bcp, address mdp)) 1002 assert(ProfileInterpreter, "must be profiling interpreter"); 1003 1004 MethodData* mdo = method->method_data(); 1005 assert(mdo != NULL, "must not be null"); 1006 1007 int bci = method->bci_from(bcp); 1008 1009 address mdp2 = mdo->bci_to_dp(bci); 1010 if (mdp != mdp2) { 1011 ResourceMark rm; 1012 ResetNoHandleMark rnm; // In a LEAF entry. 1013 HandleMark hm; 1014 tty->print_cr("FAILED verify : actual mdp %p expected mdp %p @ bci %d", mdp, mdp2, bci); 1015 int current_di = mdo->dp_to_di(mdp); 1016 int expected_di = mdo->dp_to_di(mdp2); 1017 tty->print_cr(" actual di %d expected di %d", current_di, expected_di); 1018 int expected_approx_bci = mdo->data_at(expected_di)->bci(); 1019 int approx_bci = -1; 1020 if (current_di >= 0) { 1021 approx_bci = mdo->data_at(current_di)->bci(); 1022 } 1023 tty->print_cr(" actual bci is %d expected bci %d", approx_bci, expected_approx_bci); 1024 mdo->print_on(tty); 1025 method->print_codes(); 1026 } 1027 assert(mdp == mdp2, "wrong mdp"); 1028 IRT_END 1029 #endif // ASSERT 1030 1031 IRT_ENTRY(void, InterpreterRuntime::update_mdp_for_ret(JavaThread* thread, int return_bci)) 1032 assert(ProfileInterpreter, "must be profiling interpreter"); 1033 ResourceMark rm(thread); 1034 HandleMark hm(thread); 1035 LastFrameAccessor last_frame(thread); 1036 assert(last_frame.is_interpreted_frame(), "must come from interpreter"); 1037 MethodData* h_mdo = last_frame.method()->method_data(); 1038 1039 // Grab a lock to ensure atomic access to setting the return bci and 1040 // the displacement. This can block and GC, invalidating all naked oops. 1041 MutexLocker ml(RetData_lock); 1042 1043 // ProfileData is essentially a wrapper around a derived oop, so we 1044 // need to take the lock before making any ProfileData structures. 1045 ProfileData* data = h_mdo->data_at(h_mdo->dp_to_di(last_frame.mdp())); 1046 guarantee(data != NULL, "profile data must be valid"); 1047 RetData* rdata = data->as_RetData(); 1048 address new_mdp = rdata->fixup_ret(return_bci, h_mdo); 1049 last_frame.set_mdp(new_mdp); 1050 IRT_END 1051 1052 IRT_ENTRY(MethodCounters*, InterpreterRuntime::build_method_counters(JavaThread* thread, Method* m)) 1053 MethodCounters* mcs = Method::build_method_counters(m, thread); 1054 if (HAS_PENDING_EXCEPTION) { 1055 assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here"); 1056 CLEAR_PENDING_EXCEPTION; 1057 } 1058 return mcs; 1059 IRT_END 1060 1061 1062 IRT_ENTRY(void, InterpreterRuntime::at_safepoint(JavaThread* thread)) 1063 // We used to need an explict preserve_arguments here for invoke bytecodes. However, 1064 // stack traversal automatically takes care of preserving arguments for invoke, so 1065 // this is no longer needed. 1066 1067 // IRT_END does an implicit safepoint check, hence we are guaranteed to block 1068 // if this is called during a safepoint 1069 1070 if (JvmtiExport::should_post_single_step()) { 1071 // We are called during regular safepoints and when the VM is 1072 // single stepping. If any thread is marked for single stepping, 1073 // then we may have JVMTI work to do. 1074 LastFrameAccessor last_frame(thread); 1075 JvmtiExport::at_single_stepping_point(thread, last_frame.method(), last_frame.bcp()); 1076 } 1077 IRT_END 1078 1079 IRT_ENTRY(void, InterpreterRuntime::post_field_access(JavaThread *thread, oopDesc* obj, 1080 ConstantPoolCacheEntry *cp_entry)) 1081 1082 // check the access_flags for the field in the klass 1083 1084 InstanceKlass* ik = InstanceKlass::cast(cp_entry->f1_as_klass()); 1085 int index = cp_entry->field_index(); 1086 if ((ik->field_access_flags(index) & JVM_ACC_FIELD_ACCESS_WATCHED) == 0) return; 1087 1088 bool is_static = (obj == NULL); 1089 HandleMark hm(thread); 1090 1091 Handle h_obj; 1092 if (!is_static) { 1093 // non-static field accessors have an object, but we need a handle 1094 h_obj = Handle(thread, obj); 1095 } 1096 InstanceKlass* cp_entry_f1 = InstanceKlass::cast(cp_entry->f1_as_klass()); 1097 jfieldID fid = jfieldIDWorkaround::to_jfieldID(cp_entry_f1, cp_entry->f2_as_index(), is_static); 1098 LastFrameAccessor last_frame(thread); 1099 JvmtiExport::post_field_access(thread, last_frame.method(), last_frame.bcp(), cp_entry_f1, h_obj, fid); 1100 IRT_END 1101 1102 IRT_ENTRY(void, InterpreterRuntime::post_field_modification(JavaThread *thread, 1103 oopDesc* obj, ConstantPoolCacheEntry *cp_entry, jvalue *value)) 1104 1105 Klass* k = cp_entry->f1_as_klass(); 1106 1107 // check the access_flags for the field in the klass 1108 InstanceKlass* ik = InstanceKlass::cast(k); 1109 int index = cp_entry->field_index(); 1110 // bail out if field modifications are not watched 1111 if ((ik->field_access_flags(index) & JVM_ACC_FIELD_MODIFICATION_WATCHED) == 0) return; 1112 1113 char sig_type = '\0'; 1114 1115 switch(cp_entry->flag_state()) { 1116 case btos: sig_type = 'B'; break; 1117 case ztos: sig_type = 'Z'; break; 1118 case ctos: sig_type = 'C'; break; 1119 case stos: sig_type = 'S'; break; 1120 case itos: sig_type = 'I'; break; 1121 case ftos: sig_type = 'F'; break; 1122 case atos: sig_type = 'L'; break; 1123 case ltos: sig_type = 'J'; break; 1124 case dtos: sig_type = 'D'; break; 1125 default: ShouldNotReachHere(); return; 1126 } 1127 bool is_static = (obj == NULL); 1128 1129 HandleMark hm(thread); 1130 jfieldID fid = jfieldIDWorkaround::to_jfieldID(ik, cp_entry->f2_as_index(), is_static); 1131 jvalue fvalue; 1132 #ifdef _LP64 1133 fvalue = *value; 1134 #else 1135 // Long/double values are stored unaligned and also noncontiguously with 1136 // tagged stacks. We can't just do a simple assignment even in the non- 1137 // J/D cases because a C++ compiler is allowed to assume that a jvalue is 1138 // 8-byte aligned, and interpreter stack slots are only 4-byte aligned. 1139 // We assume that the two halves of longs/doubles are stored in interpreter 1140 // stack slots in platform-endian order. 1141 jlong_accessor u; 1142 jint* newval = (jint*)value; 1143 u.words[0] = newval[0]; 1144 u.words[1] = newval[Interpreter::stackElementWords]; // skip if tag 1145 fvalue.j = u.long_value; 1146 #endif // _LP64 1147 1148 Handle h_obj; 1149 if (!is_static) { 1150 // non-static field accessors have an object, but we need a handle 1151 h_obj = Handle(thread, obj); 1152 } 1153 1154 LastFrameAccessor last_frame(thread); 1155 JvmtiExport::post_raw_field_modification(thread, last_frame.method(), last_frame.bcp(), ik, h_obj, 1156 fid, sig_type, &fvalue); 1157 IRT_END 1158 1159 IRT_ENTRY(void, InterpreterRuntime::post_method_entry(JavaThread *thread)) 1160 LastFrameAccessor last_frame(thread); 1161 JvmtiExport::post_method_entry(thread, last_frame.method(), last_frame.get_frame()); 1162 IRT_END 1163 1164 1165 IRT_ENTRY(void, InterpreterRuntime::post_method_exit(JavaThread *thread)) 1166 LastFrameAccessor last_frame(thread); 1167 JvmtiExport::post_method_exit(thread, last_frame.method(), last_frame.get_frame()); 1168 IRT_END 1169 1170 IRT_LEAF(int, InterpreterRuntime::interpreter_contains(address pc)) 1171 { 1172 return (Interpreter::contains(pc) ? 1 : 0); 1173 } 1174 IRT_END 1175 1176 1177 // Implementation of SignatureHandlerLibrary 1178 1179 #ifndef SHARING_FAST_NATIVE_FINGERPRINTS 1180 // Dummy definition (else normalization method is defined in CPU 1181 // dependant code) 1182 uint64_t InterpreterRuntime::normalize_fast_native_fingerprint(uint64_t fingerprint) { 1183 return fingerprint; 1184 } 1185 #endif 1186 1187 address SignatureHandlerLibrary::set_handler_blob() { 1188 BufferBlob* handler_blob = BufferBlob::create("native signature handlers", blob_size); 1189 if (handler_blob == NULL) { 1190 return NULL; 1191 } 1192 address handler = handler_blob->code_begin(); 1193 _handler_blob = handler_blob; 1194 _handler = handler; 1195 return handler; 1196 } 1197 1198 void SignatureHandlerLibrary::initialize() { 1199 if (_fingerprints != NULL) { 1200 return; 1201 } 1202 if (set_handler_blob() == NULL) { 1203 vm_exit_out_of_memory(blob_size, OOM_MALLOC_ERROR, "native signature handlers"); 1204 } 1205 1206 BufferBlob* bb = BufferBlob::create("Signature Handler Temp Buffer", 1207 SignatureHandlerLibrary::buffer_size); 1208 _buffer = bb->code_begin(); 1209 1210 _fingerprints = new(ResourceObj::C_HEAP, mtCode)GrowableArray<uint64_t>(32, true); 1211 _handlers = new(ResourceObj::C_HEAP, mtCode)GrowableArray<address>(32, true); 1212 } 1213 1214 address SignatureHandlerLibrary::set_handler(CodeBuffer* buffer) { 1215 address handler = _handler; 1216 int insts_size = buffer->pure_insts_size(); 1217 if (handler + insts_size > _handler_blob->code_end()) { 1218 // get a new handler blob 1219 handler = set_handler_blob(); 1220 } 1221 if (handler != NULL) { 1222 memcpy(handler, buffer->insts_begin(), insts_size); 1223 pd_set_handler(handler); 1224 ICache::invalidate_range(handler, insts_size); 1225 _handler = handler + insts_size; 1226 } 1227 return handler; 1228 } 1229 1230 void SignatureHandlerLibrary::add(const methodHandle& method) { 1231 if (method->signature_handler() == NULL) { 1232 // use slow signature handler if we can't do better 1233 int handler_index = -1; 1234 // check if we can use customized (fast) signature handler 1235 if (UseFastSignatureHandlers && method->size_of_parameters() <= Fingerprinter::max_size_of_parameters) { 1236 // use customized signature handler 1237 MutexLocker mu(SignatureHandlerLibrary_lock); 1238 // make sure data structure is initialized 1239 initialize(); 1240 // lookup method signature's fingerprint 1241 uint64_t fingerprint = Fingerprinter(method).fingerprint(); 1242 // allow CPU dependant code to optimize the fingerprints for the fast handler 1243 fingerprint = InterpreterRuntime::normalize_fast_native_fingerprint(fingerprint); 1244 handler_index = _fingerprints->find(fingerprint); 1245 // create handler if necessary 1246 if (handler_index < 0) { 1247 ResourceMark rm; 1248 ptrdiff_t align_offset = align_up(_buffer, CodeEntryAlignment) - (address)_buffer; 1249 CodeBuffer buffer((address)(_buffer + align_offset), 1250 SignatureHandlerLibrary::buffer_size - align_offset); 1251 InterpreterRuntime::SignatureHandlerGenerator(method, &buffer).generate(fingerprint); 1252 // copy into code heap 1253 address handler = set_handler(&buffer); 1254 if (handler == NULL) { 1255 // use slow signature handler (without memorizing it in the fingerprints) 1256 } else { 1257 // debugging suppport 1258 if (PrintSignatureHandlers && (handler != Interpreter::slow_signature_handler())) { 1259 ttyLocker ttyl; 1260 tty->cr(); 1261 tty->print_cr("argument handler #%d for: %s %s (fingerprint = " UINT64_FORMAT ", %d bytes generated)", 1262 _handlers->length(), 1263 (method->is_static() ? "static" : "receiver"), 1264 method->name_and_sig_as_C_string(), 1265 fingerprint, 1266 buffer.insts_size()); 1267 if (buffer.insts_size() > 0) { 1268 Disassembler::decode(handler, handler + buffer.insts_size()); 1269 } 1270 #ifndef PRODUCT 1271 address rh_begin = Interpreter::result_handler(method()->result_type()); 1272 if (CodeCache::contains(rh_begin)) { 1273 // else it might be special platform dependent values 1274 tty->print_cr(" --- associated result handler ---"); 1275 address rh_end = rh_begin; 1276 while (*(int*)rh_end != 0) { 1277 rh_end += sizeof(int); 1278 } 1279 Disassembler::decode(rh_begin, rh_end); 1280 } else { 1281 tty->print_cr(" associated result handler: " PTR_FORMAT, p2i(rh_begin)); 1282 } 1283 #endif 1284 } 1285 // add handler to library 1286 _fingerprints->append(fingerprint); 1287 _handlers->append(handler); 1288 // set handler index 1289 assert(_fingerprints->length() == _handlers->length(), "sanity check"); 1290 handler_index = _fingerprints->length() - 1; 1291 } 1292 } 1293 // Set handler under SignatureHandlerLibrary_lock 1294 if (handler_index < 0) { 1295 // use generic signature handler 1296 method->set_signature_handler(Interpreter::slow_signature_handler()); 1297 } else { 1298 // set handler 1299 method->set_signature_handler(_handlers->at(handler_index)); 1300 } 1301 } else { 1302 CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops()); 1303 // use generic signature handler 1304 method->set_signature_handler(Interpreter::slow_signature_handler()); 1305 } 1306 } 1307 #ifdef ASSERT 1308 int handler_index = -1; 1309 int fingerprint_index = -2; 1310 { 1311 // '_handlers' and '_fingerprints' are 'GrowableArray's and are NOT synchronized 1312 // in any way if accessed from multiple threads. To avoid races with another 1313 // thread which may change the arrays in the above, mutex protected block, we 1314 // have to protect this read access here with the same mutex as well! 1315 MutexLocker mu(SignatureHandlerLibrary_lock); 1316 if (_handlers != NULL) { 1317 handler_index = _handlers->find(method->signature_handler()); 1318 uint64_t fingerprint = Fingerprinter(method).fingerprint(); 1319 fingerprint = InterpreterRuntime::normalize_fast_native_fingerprint(fingerprint); 1320 fingerprint_index = _fingerprints->find(fingerprint); 1321 } 1322 } 1323 assert(method->signature_handler() == Interpreter::slow_signature_handler() || 1324 handler_index == fingerprint_index, "sanity check"); 1325 #endif // ASSERT 1326 } 1327 1328 void SignatureHandlerLibrary::add(uint64_t fingerprint, address handler) { 1329 int handler_index = -1; 1330 // use customized signature handler 1331 MutexLocker mu(SignatureHandlerLibrary_lock); 1332 // make sure data structure is initialized 1333 initialize(); 1334 fingerprint = InterpreterRuntime::normalize_fast_native_fingerprint(fingerprint); 1335 handler_index = _fingerprints->find(fingerprint); 1336 // create handler if necessary 1337 if (handler_index < 0) { 1338 if (PrintSignatureHandlers && (handler != Interpreter::slow_signature_handler())) { 1339 tty->cr(); 1340 tty->print_cr("argument handler #%d at " PTR_FORMAT " for fingerprint " UINT64_FORMAT, 1341 _handlers->length(), 1342 p2i(handler), 1343 fingerprint); 1344 } 1345 _fingerprints->append(fingerprint); 1346 _handlers->append(handler); 1347 } else { 1348 if (PrintSignatureHandlers) { 1349 tty->cr(); 1350 tty->print_cr("duplicate argument handler #%d for fingerprint " UINT64_FORMAT "(old: " PTR_FORMAT ", new : " PTR_FORMAT ")", 1351 _handlers->length(), 1352 fingerprint, 1353 p2i(_handlers->at(handler_index)), 1354 p2i(handler)); 1355 } 1356 } 1357 } 1358 1359 1360 BufferBlob* SignatureHandlerLibrary::_handler_blob = NULL; 1361 address SignatureHandlerLibrary::_handler = NULL; 1362 GrowableArray<uint64_t>* SignatureHandlerLibrary::_fingerprints = NULL; 1363 GrowableArray<address>* SignatureHandlerLibrary::_handlers = NULL; 1364 address SignatureHandlerLibrary::_buffer = NULL; 1365 1366 1367 IRT_ENTRY(void, InterpreterRuntime::prepare_native_call(JavaThread* thread, Method* method)) 1368 methodHandle m(thread, method); 1369 assert(m->is_native(), "sanity check"); 1370 // lookup native function entry point if it doesn't exist 1371 bool in_base_library; 1372 if (!m->has_native_function()) { 1373 NativeLookup::lookup(m, in_base_library, CHECK); 1374 } 1375 // make sure signature handler is installed 1376 SignatureHandlerLibrary::add(m); 1377 // The interpreter entry point checks the signature handler first, 1378 // before trying to fetch the native entry point and klass mirror. 1379 // We must set the signature handler last, so that multiple processors 1380 // preparing the same method will be sure to see non-null entry & mirror. 1381 IRT_END 1382 1383 #if defined(IA32) || defined(AMD64) || defined(ARM) 1384 IRT_LEAF(void, InterpreterRuntime::popframe_move_outgoing_args(JavaThread* thread, void* src_address, void* dest_address)) 1385 if (src_address == dest_address) { 1386 return; 1387 } 1388 ResetNoHandleMark rnm; // In a LEAF entry. 1389 HandleMark hm; 1390 ResourceMark rm; 1391 LastFrameAccessor last_frame(thread); 1392 assert(last_frame.is_interpreted_frame(), ""); 1393 jint bci = last_frame.bci(); 1394 methodHandle mh(thread, last_frame.method()); 1395 Bytecode_invoke invoke(mh, bci); 1396 ArgumentSizeComputer asc(invoke.signature()); 1397 int size_of_arguments = (asc.size() + (invoke.has_receiver() ? 1 : 0)); // receiver 1398 Copy::conjoint_jbytes(src_address, dest_address, 1399 size_of_arguments * Interpreter::stackElementSize); 1400 IRT_END 1401 #endif 1402 1403 #if INCLUDE_JVMTI 1404 // This is a support of the JVMTI PopFrame interface. 1405 // Make sure it is an invokestatic of a polymorphic intrinsic that has a member_name argument 1406 // and return it as a vm_result so that it can be reloaded in the list of invokestatic parameters. 1407 // The member_name argument is a saved reference (in local#0) to the member_name. 1408 // For backward compatibility with some JDK versions (7, 8) it can also be a direct method handle. 1409 // FIXME: remove DMH case after j.l.i.InvokerBytecodeGenerator code shape is updated. 1410 IRT_ENTRY(void, InterpreterRuntime::member_name_arg_or_null(JavaThread* thread, address member_name, 1411 Method* method, address bcp)) 1412 Bytecodes::Code code = Bytecodes::code_at(method, bcp); 1413 if (code != Bytecodes::_invokestatic) { 1414 return; 1415 } 1416 ConstantPool* cpool = method->constants(); 1417 int cp_index = Bytes::get_native_u2(bcp + 1) + ConstantPool::CPCACHE_INDEX_TAG; 1418 Symbol* cname = cpool->klass_name_at(cpool->klass_ref_index_at(cp_index)); 1419 Symbol* mname = cpool->name_ref_at(cp_index); 1420 1421 if (MethodHandles::has_member_arg(cname, mname)) { 1422 oop member_name_oop = (oop) member_name; 1423 if (java_lang_invoke_DirectMethodHandle::is_instance(member_name_oop)) { 1424 // FIXME: remove after j.l.i.InvokerBytecodeGenerator code shape is updated. 1425 member_name_oop = java_lang_invoke_DirectMethodHandle::member(member_name_oop); 1426 } 1427 thread->set_vm_result(member_name_oop); 1428 } else { 1429 thread->set_vm_result(NULL); 1430 } 1431 IRT_END 1432 #endif // INCLUDE_JVMTI 1433 1434 #ifndef PRODUCT 1435 // This must be a IRT_LEAF function because the interpreter must save registers on x86 to 1436 // call this, which changes rsp and makes the interpreter's expression stack not walkable. 1437 // The generated code still uses call_VM because that will set up the frame pointer for 1438 // bcp and method. 1439 IRT_LEAF(intptr_t, InterpreterRuntime::trace_bytecode(JavaThread* thread, intptr_t preserve_this_value, intptr_t tos, intptr_t tos2)) 1440 LastFrameAccessor last_frame(thread); 1441 assert(last_frame.is_interpreted_frame(), "must be an interpreted frame"); 1442 methodHandle mh(thread, last_frame.method()); 1443 BytecodeTracer::trace(mh, last_frame.bcp(), tos, tos2); 1444 return preserve_this_value; 1445 IRT_END 1446 #endif // !PRODUCT