1 /* 2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/javaClasses.inline.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "classfile/vmSymbols.hpp" 29 #include "code/codeCache.hpp" 30 #include "compiler/compileBroker.hpp" 31 #include "compiler/disassembler.hpp" 32 #include "gc/shared/collectedHeap.hpp" 33 #include "interpreter/interpreter.hpp" 34 #include "interpreter/interpreterRuntime.hpp" 35 #include "interpreter/linkResolver.hpp" 36 #include "interpreter/templateTable.hpp" 37 #include "logging/log.hpp" 38 #include "memory/oopFactory.hpp" 39 #include "memory/resourceArea.hpp" 40 #include "memory/universe.inline.hpp" 41 #include "oops/constantPool.hpp" 42 #include "oops/instanceKlass.hpp" 43 #include "oops/methodData.hpp" 44 #include "oops/objArrayKlass.hpp" 45 #include "oops/objArrayOop.inline.hpp" 46 #include "oops/oop.inline.hpp" 47 #include "oops/symbol.hpp" 48 #include "prims/jvmtiExport.hpp" 49 #include "prims/nativeLookup.hpp" 50 #include "runtime/atomic.hpp" 51 #include "runtime/biasedLocking.hpp" 52 #include "runtime/compilationPolicy.hpp" 53 #include "runtime/deoptimization.hpp" 54 #include "runtime/fieldDescriptor.hpp" 55 #include "runtime/handles.inline.hpp" 56 #include "runtime/icache.hpp" 57 #include "runtime/interfaceSupport.hpp" 58 #include "runtime/java.hpp" 59 #include "runtime/jfieldIDWorkaround.hpp" 60 #include "runtime/osThread.hpp" 61 #include "runtime/sharedRuntime.hpp" 62 #include "runtime/stubRoutines.hpp" 63 #include "runtime/synchronizer.hpp" 64 #include "runtime/threadCritical.hpp" 65 #include "utilities/align.hpp" 66 #include "utilities/events.hpp" 67 #ifdef COMPILER2 68 #include "opto/runtime.hpp" 69 #endif 70 71 class UnlockFlagSaver { 72 private: 73 JavaThread* _thread; 74 bool _do_not_unlock; 75 public: 76 UnlockFlagSaver(JavaThread* t) { 77 _thread = t; 78 _do_not_unlock = t->do_not_unlock_if_synchronized(); 79 t->set_do_not_unlock_if_synchronized(false); 80 } 81 ~UnlockFlagSaver() { 82 _thread->set_do_not_unlock_if_synchronized(_do_not_unlock); 83 } 84 }; 85 86 //------------------------------------------------------------------------------------------------------------------------ 87 // State accessors 88 89 void InterpreterRuntime::set_bcp_and_mdp(address bcp, JavaThread *thread) { 90 LastFrameAccessor last_frame(thread); 91 last_frame.set_bcp(bcp); 92 if (ProfileInterpreter) { 93 // ProfileTraps uses MDOs independently of ProfileInterpreter. 94 // That is why we must check both ProfileInterpreter and mdo != NULL. 95 MethodData* mdo = last_frame.method()->method_data(); 96 if (mdo != NULL) { 97 NEEDS_CLEANUP; 98 last_frame.set_mdp(mdo->bci_to_dp(last_frame.bci())); 99 } 100 } 101 } 102 103 //------------------------------------------------------------------------------------------------------------------------ 104 // Constants 105 106 107 IRT_ENTRY(void, InterpreterRuntime::ldc(JavaThread* thread, bool wide)) 108 // access constant pool 109 LastFrameAccessor last_frame(thread); 110 ConstantPool* pool = last_frame.method()->constants(); 111 int index = wide ? last_frame.get_index_u2(Bytecodes::_ldc_w) : last_frame.get_index_u1(Bytecodes::_ldc); 112 constantTag tag = pool->tag_at(index); 113 114 assert (tag.is_unresolved_klass() || tag.is_klass(), "wrong ldc call"); 115 Klass* klass = pool->klass_at(index, CHECK); 116 oop java_class = klass->java_mirror(); 117 thread->set_vm_result(java_class); 118 IRT_END 119 120 IRT_ENTRY(void, InterpreterRuntime::resolve_ldc(JavaThread* thread, Bytecodes::Code bytecode)) { 121 assert(bytecode == Bytecodes::_ldc || 122 bytecode == Bytecodes::_ldc_w || 123 bytecode == Bytecodes::_ldc2_w || 124 bytecode == Bytecodes::_fast_aldc || 125 bytecode == Bytecodes::_fast_aldc_w, "wrong bc"); 126 ResourceMark rm(thread); 127 const bool is_fast_aldc = (bytecode == Bytecodes::_fast_aldc || 128 bytecode == Bytecodes::_fast_aldc_w); 129 LastFrameAccessor last_frame(thread); 130 methodHandle m (thread, last_frame.method()); 131 Bytecode_loadconstant ldc(m, last_frame.bci()); 132 133 // Double-check the size. (Condy can have any type.) 134 BasicType type = ldc.result_type(); 135 switch (type2size[type]) { 136 case 2: guarantee(bytecode == Bytecodes::_ldc2_w, ""); break; 137 case 1: guarantee(bytecode != Bytecodes::_ldc2_w, ""); break; 138 default: ShouldNotReachHere(); 139 } 140 141 // Resolve the constant. This does not do unboxing. 142 // But it does replace Universe::the_null_sentinel by null. 143 oop result = ldc.resolve_constant(CHECK); 144 assert(result != NULL || is_fast_aldc, "null result only valid for fast_aldc"); 145 146 #ifdef ASSERT 147 { 148 // The bytecode wrappers aren't GC-safe so construct a new one 149 Bytecode_loadconstant ldc2(m, last_frame.bci()); 150 int rindex = ldc2.cache_index(); 151 if (rindex < 0) 152 rindex = m->constants()->cp_to_object_index(ldc2.pool_index()); 153 if (rindex >= 0) { 154 oop coop = m->constants()->resolved_references()->obj_at(rindex); 155 oop roop = (result == NULL ? Universe::the_null_sentinel() : result); 156 assert(roop == coop, "expected result for assembly code"); 157 } 158 } 159 #endif 160 thread->set_vm_result(result); 161 if (!is_fast_aldc) { 162 // Tell the interpreter how to unbox the primitive. 163 guarantee(java_lang_boxing_object::is_instance(result, type), ""); 164 int offset = java_lang_boxing_object::value_offset_in_bytes(type); 165 intptr_t flags = ((as_TosState(type) << ConstantPoolCacheEntry::tos_state_shift) 166 | (offset & ConstantPoolCacheEntry::field_index_mask)); 167 thread->set_vm_result_2((Metadata*)flags); 168 } 169 } 170 IRT_END 171 172 173 //------------------------------------------------------------------------------------------------------------------------ 174 // Allocation 175 176 IRT_ENTRY(void, InterpreterRuntime::_new(JavaThread* thread, ConstantPool* pool, int index)) 177 Klass* k = pool->klass_at(index, CHECK); 178 InstanceKlass* klass = InstanceKlass::cast(k); 179 180 // Make sure we are not instantiating an abstract klass 181 klass->check_valid_for_instantiation(true, CHECK); 182 183 // Make sure klass is initialized 184 klass->initialize(CHECK); 185 186 // At this point the class may not be fully initialized 187 // because of recursive initialization. If it is fully 188 // initialized & has_finalized is not set, we rewrite 189 // it into its fast version (Note: no locking is needed 190 // here since this is an atomic byte write and can be 191 // done more than once). 192 // 193 // Note: In case of classes with has_finalized we don't 194 // rewrite since that saves us an extra check in 195 // the fast version which then would call the 196 // slow version anyway (and do a call back into 197 // Java). 198 // If we have a breakpoint, then we don't rewrite 199 // because the _breakpoint bytecode would be lost. 200 oop obj = klass->allocate_instance(CHECK); 201 thread->set_vm_result(obj); 202 IRT_END 203 204 205 IRT_ENTRY(void, InterpreterRuntime::newarray(JavaThread* thread, BasicType type, jint size)) 206 oop obj = oopFactory::new_typeArray(type, size, CHECK); 207 thread->set_vm_result(obj); 208 IRT_END 209 210 211 IRT_ENTRY(void, InterpreterRuntime::anewarray(JavaThread* thread, ConstantPool* pool, int index, jint size)) 212 Klass* klass = pool->klass_at(index, CHECK); 213 objArrayOop obj = oopFactory::new_objArray(klass, size, CHECK); 214 thread->set_vm_result(obj); 215 IRT_END 216 217 218 IRT_ENTRY(void, InterpreterRuntime::multianewarray(JavaThread* thread, jint* first_size_address)) 219 // We may want to pass in more arguments - could make this slightly faster 220 LastFrameAccessor last_frame(thread); 221 ConstantPool* constants = last_frame.method()->constants(); 222 int i = last_frame.get_index_u2(Bytecodes::_multianewarray); 223 Klass* klass = constants->klass_at(i, CHECK); 224 int nof_dims = last_frame.number_of_dimensions(); 225 assert(klass->is_klass(), "not a class"); 226 assert(nof_dims >= 1, "multianewarray rank must be nonzero"); 227 228 // We must create an array of jints to pass to multi_allocate. 229 ResourceMark rm(thread); 230 const int small_dims = 10; 231 jint dim_array[small_dims]; 232 jint *dims = &dim_array[0]; 233 if (nof_dims > small_dims) { 234 dims = (jint*) NEW_RESOURCE_ARRAY(jint, nof_dims); 235 } 236 for (int index = 0; index < nof_dims; index++) { 237 // offset from first_size_address is addressed as local[index] 238 int n = Interpreter::local_offset_in_bytes(index)/jintSize; 239 dims[index] = first_size_address[n]; 240 } 241 oop obj = ArrayKlass::cast(klass)->multi_allocate(nof_dims, dims, CHECK); 242 thread->set_vm_result(obj); 243 IRT_END 244 245 246 IRT_ENTRY(void, InterpreterRuntime::register_finalizer(JavaThread* thread, oopDesc* obj)) 247 assert(oopDesc::is_oop(obj), "must be a valid oop"); 248 assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise"); 249 InstanceKlass::register_finalizer(instanceOop(obj), CHECK); 250 IRT_END 251 252 253 // Quicken instance-of and check-cast bytecodes 254 IRT_ENTRY(void, InterpreterRuntime::quicken_io_cc(JavaThread* thread)) 255 // Force resolving; quicken the bytecode 256 LastFrameAccessor last_frame(thread); 257 int which = last_frame.get_index_u2(Bytecodes::_checkcast); 258 ConstantPool* cpool = last_frame.method()->constants(); 259 // We'd expect to assert that we're only here to quicken bytecodes, but in a multithreaded 260 // program we might have seen an unquick'd bytecode in the interpreter but have another 261 // thread quicken the bytecode before we get here. 262 // assert( cpool->tag_at(which).is_unresolved_klass(), "should only come here to quicken bytecodes" ); 263 Klass* klass = cpool->klass_at(which, CHECK); 264 thread->set_vm_result_2(klass); 265 IRT_END 266 267 268 //------------------------------------------------------------------------------------------------------------------------ 269 // Exceptions 270 271 void InterpreterRuntime::note_trap_inner(JavaThread* thread, int reason, 272 const methodHandle& trap_method, int trap_bci, TRAPS) { 273 if (trap_method.not_null()) { 274 MethodData* trap_mdo = trap_method->method_data(); 275 if (trap_mdo == NULL) { 276 Method::build_interpreter_method_data(trap_method, THREAD); 277 if (HAS_PENDING_EXCEPTION) { 278 assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), 279 "we expect only an OOM error here"); 280 CLEAR_PENDING_EXCEPTION; 281 } 282 trap_mdo = trap_method->method_data(); 283 // and fall through... 284 } 285 if (trap_mdo != NULL) { 286 // Update per-method count of trap events. The interpreter 287 // is updating the MDO to simulate the effect of compiler traps. 288 Deoptimization::update_method_data_from_interpreter(trap_mdo, trap_bci, reason); 289 } 290 } 291 } 292 293 // Assume the compiler is (or will be) interested in this event. 294 // If necessary, create an MDO to hold the information, and record it. 295 void InterpreterRuntime::note_trap(JavaThread* thread, int reason, TRAPS) { 296 assert(ProfileTraps, "call me only if profiling"); 297 LastFrameAccessor last_frame(thread); 298 methodHandle trap_method(thread, last_frame.method()); 299 int trap_bci = trap_method->bci_from(last_frame.bcp()); 300 note_trap_inner(thread, reason, trap_method, trap_bci, THREAD); 301 } 302 303 #ifdef CC_INTERP 304 // As legacy note_trap, but we have more arguments. 305 IRT_ENTRY(void, InterpreterRuntime::note_trap(JavaThread* thread, int reason, Method *method, int trap_bci)) 306 methodHandle trap_method(method); 307 note_trap_inner(thread, reason, trap_method, trap_bci, THREAD); 308 IRT_END 309 310 // Class Deoptimization is not visible in BytecodeInterpreter, so we need a wrapper 311 // for each exception. 312 void InterpreterRuntime::note_nullCheck_trap(JavaThread* thread, Method *method, int trap_bci) 313 { if (ProfileTraps) note_trap(thread, Deoptimization::Reason_null_check, method, trap_bci); } 314 void InterpreterRuntime::note_div0Check_trap(JavaThread* thread, Method *method, int trap_bci) 315 { if (ProfileTraps) note_trap(thread, Deoptimization::Reason_div0_check, method, trap_bci); } 316 void InterpreterRuntime::note_rangeCheck_trap(JavaThread* thread, Method *method, int trap_bci) 317 { if (ProfileTraps) note_trap(thread, Deoptimization::Reason_range_check, method, trap_bci); } 318 void InterpreterRuntime::note_classCheck_trap(JavaThread* thread, Method *method, int trap_bci) 319 { if (ProfileTraps) note_trap(thread, Deoptimization::Reason_class_check, method, trap_bci); } 320 void InterpreterRuntime::note_arrayCheck_trap(JavaThread* thread, Method *method, int trap_bci) 321 { if (ProfileTraps) note_trap(thread, Deoptimization::Reason_array_check, method, trap_bci); } 322 #endif // CC_INTERP 323 324 325 static Handle get_preinitialized_exception(Klass* k, TRAPS) { 326 // get klass 327 InstanceKlass* klass = InstanceKlass::cast(k); 328 assert(klass->is_initialized(), 329 "this klass should have been initialized during VM initialization"); 330 // create instance - do not call constructor since we may have no 331 // (java) stack space left (should assert constructor is empty) 332 Handle exception; 333 oop exception_oop = klass->allocate_instance(CHECK_(exception)); 334 exception = Handle(THREAD, exception_oop); 335 if (StackTraceInThrowable) { 336 java_lang_Throwable::fill_in_stack_trace(exception); 337 } 338 return exception; 339 } 340 341 // Special handling for stack overflow: since we don't have any (java) stack 342 // space left we use the pre-allocated & pre-initialized StackOverflowError 343 // klass to create an stack overflow error instance. We do not call its 344 // constructor for the same reason (it is empty, anyway). 345 IRT_ENTRY(void, InterpreterRuntime::throw_StackOverflowError(JavaThread* thread)) 346 Handle exception = get_preinitialized_exception( 347 SystemDictionary::StackOverflowError_klass(), 348 CHECK); 349 // Increment counter for hs_err file reporting 350 Atomic::inc(&Exceptions::_stack_overflow_errors); 351 THROW_HANDLE(exception); 352 IRT_END 353 354 IRT_ENTRY(void, InterpreterRuntime::throw_delayed_StackOverflowError(JavaThread* thread)) 355 Handle exception = get_preinitialized_exception( 356 SystemDictionary::StackOverflowError_klass(), 357 CHECK); 358 java_lang_Throwable::set_message(exception(), 359 Universe::delayed_stack_overflow_error_message()); 360 // Increment counter for hs_err file reporting 361 Atomic::inc(&Exceptions::_stack_overflow_errors); 362 THROW_HANDLE(exception); 363 IRT_END 364 365 IRT_ENTRY(void, InterpreterRuntime::create_exception(JavaThread* thread, char* name, char* message)) 366 // lookup exception klass 367 TempNewSymbol s = SymbolTable::new_symbol(name, CHECK); 368 if (ProfileTraps) { 369 if (s == vmSymbols::java_lang_ArithmeticException()) { 370 note_trap(thread, Deoptimization::Reason_div0_check, CHECK); 371 } else if (s == vmSymbols::java_lang_NullPointerException()) { 372 note_trap(thread, Deoptimization::Reason_null_check, CHECK); 373 } 374 } 375 // create exception 376 Handle exception = Exceptions::new_exception(thread, s, message); 377 thread->set_vm_result(exception()); 378 IRT_END 379 380 381 IRT_ENTRY(void, InterpreterRuntime::create_klass_exception(JavaThread* thread, char* name, oopDesc* obj)) 382 ResourceMark rm(thread); 383 const char* klass_name = obj->klass()->external_name(); 384 // lookup exception klass 385 TempNewSymbol s = SymbolTable::new_symbol(name, CHECK); 386 if (ProfileTraps) { 387 note_trap(thread, Deoptimization::Reason_class_check, CHECK); 388 } 389 // create exception, with klass name as detail message 390 Handle exception = Exceptions::new_exception(thread, s, klass_name); 391 thread->set_vm_result(exception()); 392 IRT_END 393 394 395 IRT_ENTRY(void, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException(JavaThread* thread, char* name, jint index)) 396 char message[jintAsStringSize]; 397 // lookup exception klass 398 TempNewSymbol s = SymbolTable::new_symbol(name, CHECK); 399 if (ProfileTraps) { 400 note_trap(thread, Deoptimization::Reason_range_check, CHECK); 401 } 402 // create exception 403 sprintf(message, "%d", index); 404 THROW_MSG(s, message); 405 IRT_END 406 407 IRT_ENTRY(void, InterpreterRuntime::throw_ClassCastException( 408 JavaThread* thread, oopDesc* obj)) 409 410 ResourceMark rm(thread); 411 char* message = SharedRuntime::generate_class_cast_message( 412 thread, obj->klass()); 413 414 if (ProfileTraps) { 415 note_trap(thread, Deoptimization::Reason_class_check, CHECK); 416 } 417 418 // create exception 419 THROW_MSG(vmSymbols::java_lang_ClassCastException(), message); 420 IRT_END 421 422 // exception_handler_for_exception(...) returns the continuation address, 423 // the exception oop (via TLS) and sets the bci/bcp for the continuation. 424 // The exception oop is returned to make sure it is preserved over GC (it 425 // is only on the stack if the exception was thrown explicitly via athrow). 426 // During this operation, the expression stack contains the values for the 427 // bci where the exception happened. If the exception was propagated back 428 // from a call, the expression stack contains the values for the bci at the 429 // invoke w/o arguments (i.e., as if one were inside the call). 430 IRT_ENTRY(address, InterpreterRuntime::exception_handler_for_exception(JavaThread* thread, oopDesc* exception)) 431 432 LastFrameAccessor last_frame(thread); 433 Handle h_exception(thread, exception); 434 methodHandle h_method (thread, last_frame.method()); 435 constantPoolHandle h_constants(thread, h_method->constants()); 436 bool should_repeat; 437 int handler_bci; 438 int current_bci = last_frame.bci(); 439 440 if (thread->frames_to_pop_failed_realloc() > 0) { 441 // Allocation of scalar replaced object used in this frame 442 // failed. Unconditionally pop the frame. 443 thread->dec_frames_to_pop_failed_realloc(); 444 thread->set_vm_result(h_exception()); 445 // If the method is synchronized we already unlocked the monitor 446 // during deoptimization so the interpreter needs to skip it when 447 // the frame is popped. 448 thread->set_do_not_unlock_if_synchronized(true); 449 #ifdef CC_INTERP 450 return (address) -1; 451 #else 452 return Interpreter::remove_activation_entry(); 453 #endif 454 } 455 456 // Need to do this check first since when _do_not_unlock_if_synchronized 457 // is set, we don't want to trigger any classloading which may make calls 458 // into java, or surprisingly find a matching exception handler for bci 0 459 // since at this moment the method hasn't been "officially" entered yet. 460 if (thread->do_not_unlock_if_synchronized()) { 461 ResourceMark rm; 462 assert(current_bci == 0, "bci isn't zero for do_not_unlock_if_synchronized"); 463 thread->set_vm_result(exception); 464 #ifdef CC_INTERP 465 return (address) -1; 466 #else 467 return Interpreter::remove_activation_entry(); 468 #endif 469 } 470 471 do { 472 should_repeat = false; 473 474 // assertions 475 #ifdef ASSERT 476 assert(h_exception.not_null(), "NULL exceptions should be handled by athrow"); 477 // Check that exception is a subclass of Throwable, otherwise we have a VerifyError 478 if (!(h_exception->is_a(SystemDictionary::Throwable_klass()))) { 479 if (ExitVMOnVerifyError) vm_exit(-1); 480 ShouldNotReachHere(); 481 } 482 #endif 483 484 // tracing 485 if (log_is_enabled(Info, exceptions)) { 486 ResourceMark rm(thread); 487 stringStream tempst; 488 tempst.print("interpreter method <%s>\n" 489 " at bci %d for thread " INTPTR_FORMAT, 490 h_method->print_value_string(), current_bci, p2i(thread)); 491 Exceptions::log_exception(h_exception, tempst); 492 } 493 // Don't go paging in something which won't be used. 494 // else if (extable->length() == 0) { 495 // // disabled for now - interpreter is not using shortcut yet 496 // // (shortcut is not to call runtime if we have no exception handlers) 497 // // warning("performance bug: should not call runtime if method has no exception handlers"); 498 // } 499 // for AbortVMOnException flag 500 Exceptions::debug_check_abort(h_exception); 501 502 // exception handler lookup 503 Klass* klass = h_exception->klass(); 504 handler_bci = Method::fast_exception_handler_bci_for(h_method, klass, current_bci, THREAD); 505 if (HAS_PENDING_EXCEPTION) { 506 // We threw an exception while trying to find the exception handler. 507 // Transfer the new exception to the exception handle which will 508 // be set into thread local storage, and do another lookup for an 509 // exception handler for this exception, this time starting at the 510 // BCI of the exception handler which caused the exception to be 511 // thrown (bug 4307310). 512 h_exception = Handle(THREAD, PENDING_EXCEPTION); 513 CLEAR_PENDING_EXCEPTION; 514 if (handler_bci >= 0) { 515 current_bci = handler_bci; 516 should_repeat = true; 517 } 518 } 519 } while (should_repeat == true); 520 521 #if INCLUDE_JVMCI 522 if (EnableJVMCI && h_method->method_data() != NULL) { 523 ResourceMark rm(thread); 524 ProfileData* pdata = h_method->method_data()->allocate_bci_to_data(current_bci, NULL); 525 if (pdata != NULL && pdata->is_BitData()) { 526 BitData* bit_data = (BitData*) pdata; 527 bit_data->set_exception_seen(); 528 } 529 } 530 #endif 531 532 // notify JVMTI of an exception throw; JVMTI will detect if this is a first 533 // time throw or a stack unwinding throw and accordingly notify the debugger 534 if (JvmtiExport::can_post_on_exceptions()) { 535 JvmtiExport::post_exception_throw(thread, h_method(), last_frame.bcp(), h_exception()); 536 } 537 538 #ifdef CC_INTERP 539 address continuation = (address)(intptr_t) handler_bci; 540 #else 541 address continuation = NULL; 542 #endif 543 address handler_pc = NULL; 544 if (handler_bci < 0 || !thread->reguard_stack((address) &continuation)) { 545 // Forward exception to callee (leaving bci/bcp untouched) because (a) no 546 // handler in this method, or (b) after a stack overflow there is not yet 547 // enough stack space available to reprotect the stack. 548 #ifndef CC_INTERP 549 continuation = Interpreter::remove_activation_entry(); 550 #endif 551 #if COMPILER2_OR_JVMCI 552 // Count this for compilation purposes 553 h_method->interpreter_throwout_increment(THREAD); 554 #endif 555 } else { 556 // handler in this method => change bci/bcp to handler bci/bcp and continue there 557 handler_pc = h_method->code_base() + handler_bci; 558 #ifndef CC_INTERP 559 set_bcp_and_mdp(handler_pc, thread); 560 continuation = Interpreter::dispatch_table(vtos)[*handler_pc]; 561 #endif 562 } 563 // notify debugger of an exception catch 564 // (this is good for exceptions caught in native methods as well) 565 if (JvmtiExport::can_post_on_exceptions()) { 566 JvmtiExport::notice_unwind_due_to_exception(thread, h_method(), handler_pc, h_exception(), (handler_pc != NULL)); 567 } 568 569 thread->set_vm_result(h_exception()); 570 return continuation; 571 IRT_END 572 573 574 IRT_ENTRY(void, InterpreterRuntime::throw_pending_exception(JavaThread* thread)) 575 assert(thread->has_pending_exception(), "must only ne called if there's an exception pending"); 576 // nothing to do - eventually we should remove this code entirely (see comments @ call sites) 577 IRT_END 578 579 580 IRT_ENTRY(void, InterpreterRuntime::throw_AbstractMethodError(JavaThread* thread)) 581 THROW(vmSymbols::java_lang_AbstractMethodError()); 582 IRT_END 583 584 585 IRT_ENTRY(void, InterpreterRuntime::throw_IncompatibleClassChangeError(JavaThread* thread)) 586 THROW(vmSymbols::java_lang_IncompatibleClassChangeError()); 587 IRT_END 588 589 590 //------------------------------------------------------------------------------------------------------------------------ 591 // Fields 592 // 593 594 void InterpreterRuntime::resolve_get_put(JavaThread* thread, Bytecodes::Code bytecode) { 595 Thread* THREAD = thread; 596 // resolve field 597 fieldDescriptor info; 598 LastFrameAccessor last_frame(thread); 599 constantPoolHandle pool(thread, last_frame.method()->constants()); 600 methodHandle m(thread, last_frame.method()); 601 bool is_put = (bytecode == Bytecodes::_putfield || bytecode == Bytecodes::_nofast_putfield || 602 bytecode == Bytecodes::_putstatic); 603 bool is_static = (bytecode == Bytecodes::_getstatic || bytecode == Bytecodes::_putstatic); 604 605 { 606 JvmtiHideSingleStepping jhss(thread); 607 LinkResolver::resolve_field_access(info, pool, last_frame.get_index_u2_cpcache(bytecode), 608 m, bytecode, CHECK); 609 } // end JvmtiHideSingleStepping 610 611 // check if link resolution caused cpCache to be updated 612 ConstantPoolCacheEntry* cp_cache_entry = last_frame.cache_entry(); 613 if (cp_cache_entry->is_resolved(bytecode)) return; 614 615 // compute auxiliary field attributes 616 TosState state = as_TosState(info.field_type()); 617 618 // Resolution of put instructions on final fields is delayed. That is required so that 619 // exceptions are thrown at the correct place (when the instruction is actually invoked). 620 // If we do not resolve an instruction in the current pass, leaving the put_code 621 // set to zero will cause the next put instruction to the same field to reresolve. 622 623 // Resolution of put instructions to final instance fields with invalid updates (i.e., 624 // to final instance fields with updates originating from a method different than <init>) 625 // is inhibited. A putfield instruction targeting an instance final field must throw 626 // an IllegalAccessError if the instruction is not in an instance 627 // initializer method <init>. If resolution were not inhibited, a putfield 628 // in an initializer method could be resolved in the initializer. Subsequent 629 // putfield instructions to the same field would then use cached information. 630 // As a result, those instructions would not pass through the VM. That is, 631 // checks in resolve_field_access() would not be executed for those instructions 632 // and the required IllegalAccessError would not be thrown. 633 // 634 // Also, we need to delay resolving getstatic and putstatic instructions until the 635 // class is initialized. This is required so that access to the static 636 // field will call the initialization function every time until the class 637 // is completely initialized ala. in 2.17.5 in JVM Specification. 638 InstanceKlass* klass = InstanceKlass::cast(info.field_holder()); 639 bool uninitialized_static = is_static && !klass->is_initialized(); 640 bool has_initialized_final_update = info.field_holder()->major_version() >= 53 && 641 info.has_initialized_final_update(); 642 assert(!(has_initialized_final_update && !info.access_flags().is_final()), "Fields with initialized final updates must be final"); 643 644 Bytecodes::Code get_code = (Bytecodes::Code)0; 645 Bytecodes::Code put_code = (Bytecodes::Code)0; 646 if (!uninitialized_static) { 647 get_code = ((is_static) ? Bytecodes::_getstatic : Bytecodes::_getfield); 648 if ((is_put && !has_initialized_final_update) || !info.access_flags().is_final()) { 649 put_code = ((is_static) ? Bytecodes::_putstatic : Bytecodes::_putfield); 650 } 651 } 652 653 cp_cache_entry->set_field( 654 get_code, 655 put_code, 656 info.field_holder(), 657 info.index(), 658 info.offset(), 659 state, 660 info.access_flags().is_final(), 661 info.access_flags().is_volatile(), 662 pool->pool_holder() 663 ); 664 } 665 666 667 //------------------------------------------------------------------------------------------------------------------------ 668 // Synchronization 669 // 670 // The interpreter's synchronization code is factored out so that it can 671 // be shared by method invocation and synchronized blocks. 672 //%note synchronization_3 673 674 //%note monitor_1 675 IRT_ENTRY_NO_ASYNC(void, InterpreterRuntime::monitorenter(JavaThread* thread, BasicObjectLock* elem)) 676 #ifdef ASSERT 677 thread->last_frame().interpreter_frame_verify_monitor(elem); 678 #endif 679 if (PrintBiasedLockingStatistics) { 680 Atomic::inc(BiasedLocking::slow_path_entry_count_addr()); 681 } 682 Handle h_obj(thread, elem->obj()); 683 assert(Universe::heap()->is_in_reserved_or_null(h_obj()), 684 "must be NULL or an object"); 685 if (UseBiasedLocking) { 686 // Retry fast entry if bias is revoked to avoid unnecessary inflation 687 ObjectSynchronizer::fast_enter(h_obj, elem->lock(), true, CHECK); 688 } else { 689 ObjectSynchronizer::slow_enter(h_obj, elem->lock(), CHECK); 690 } 691 assert(Universe::heap()->is_in_reserved_or_null(elem->obj()), 692 "must be NULL or an object"); 693 #ifdef ASSERT 694 thread->last_frame().interpreter_frame_verify_monitor(elem); 695 #endif 696 IRT_END 697 698 699 //%note monitor_1 700 IRT_ENTRY_NO_ASYNC(void, InterpreterRuntime::monitorexit(JavaThread* thread, BasicObjectLock* elem)) 701 #ifdef ASSERT 702 thread->last_frame().interpreter_frame_verify_monitor(elem); 703 #endif 704 Handle h_obj(thread, elem->obj()); 705 assert(Universe::heap()->is_in_reserved_or_null(h_obj()), 706 "must be NULL or an object"); 707 if (elem == NULL || h_obj()->is_unlocked()) { 708 THROW(vmSymbols::java_lang_IllegalMonitorStateException()); 709 } 710 ObjectSynchronizer::slow_exit(h_obj(), elem->lock(), thread); 711 // Free entry. This must be done here, since a pending exception might be installed on 712 // exit. If it is not cleared, the exception handling code will try to unlock the monitor again. 713 elem->set_obj(NULL); 714 #ifdef ASSERT 715 thread->last_frame().interpreter_frame_verify_monitor(elem); 716 #endif 717 IRT_END 718 719 720 IRT_ENTRY(void, InterpreterRuntime::throw_illegal_monitor_state_exception(JavaThread* thread)) 721 THROW(vmSymbols::java_lang_IllegalMonitorStateException()); 722 IRT_END 723 724 725 IRT_ENTRY(void, InterpreterRuntime::new_illegal_monitor_state_exception(JavaThread* thread)) 726 // Returns an illegal exception to install into the current thread. The 727 // pending_exception flag is cleared so normal exception handling does not 728 // trigger. Any current installed exception will be overwritten. This 729 // method will be called during an exception unwind. 730 731 assert(!HAS_PENDING_EXCEPTION, "no pending exception"); 732 Handle exception(thread, thread->vm_result()); 733 assert(exception() != NULL, "vm result should be set"); 734 thread->set_vm_result(NULL); // clear vm result before continuing (may cause memory leaks and assert failures) 735 if (!exception->is_a(SystemDictionary::ThreadDeath_klass())) { 736 exception = get_preinitialized_exception( 737 SystemDictionary::IllegalMonitorStateException_klass(), 738 CATCH); 739 } 740 thread->set_vm_result(exception()); 741 IRT_END 742 743 744 //------------------------------------------------------------------------------------------------------------------------ 745 // Invokes 746 747 IRT_ENTRY(Bytecodes::Code, InterpreterRuntime::get_original_bytecode_at(JavaThread* thread, Method* method, address bcp)) 748 return method->orig_bytecode_at(method->bci_from(bcp)); 749 IRT_END 750 751 IRT_ENTRY(void, InterpreterRuntime::set_original_bytecode_at(JavaThread* thread, Method* method, address bcp, Bytecodes::Code new_code)) 752 method->set_orig_bytecode_at(method->bci_from(bcp), new_code); 753 IRT_END 754 755 IRT_ENTRY(void, InterpreterRuntime::_breakpoint(JavaThread* thread, Method* method, address bcp)) 756 JvmtiExport::post_raw_breakpoint(thread, method, bcp); 757 IRT_END 758 759 void InterpreterRuntime::resolve_invoke(JavaThread* thread, Bytecodes::Code bytecode) { 760 Thread* THREAD = thread; 761 LastFrameAccessor last_frame(thread); 762 // extract receiver from the outgoing argument list if necessary 763 Handle receiver(thread, NULL); 764 if (bytecode == Bytecodes::_invokevirtual || bytecode == Bytecodes::_invokeinterface || 765 bytecode == Bytecodes::_invokespecial) { 766 ResourceMark rm(thread); 767 methodHandle m (thread, last_frame.method()); 768 Bytecode_invoke call(m, last_frame.bci()); 769 Symbol* signature = call.signature(); 770 receiver = Handle(thread, last_frame.callee_receiver(signature)); 771 772 assert(Universe::heap()->is_in_reserved_or_null(receiver()), 773 "sanity check"); 774 assert(receiver.is_null() || 775 !Universe::heap()->is_in_reserved(receiver->klass()), 776 "sanity check"); 777 } 778 779 // resolve method 780 CallInfo info; 781 constantPoolHandle pool(thread, last_frame.method()->constants()); 782 783 { 784 JvmtiHideSingleStepping jhss(thread); 785 LinkResolver::resolve_invoke(info, receiver, pool, 786 last_frame.get_index_u2_cpcache(bytecode), bytecode, 787 CHECK); 788 if (JvmtiExport::can_hotswap_or_post_breakpoint()) { 789 int retry_count = 0; 790 while (info.resolved_method()->is_old()) { 791 // It is very unlikely that method is redefined more than 100 times 792 // in the middle of resolve. If it is looping here more than 100 times 793 // means then there could be a bug here. 794 guarantee((retry_count++ < 100), 795 "Could not resolve to latest version of redefined method"); 796 // method is redefined in the middle of resolve so re-try. 797 LinkResolver::resolve_invoke(info, receiver, pool, 798 last_frame.get_index_u2_cpcache(bytecode), bytecode, 799 CHECK); 800 } 801 } 802 } // end JvmtiHideSingleStepping 803 804 // check if link resolution caused cpCache to be updated 805 ConstantPoolCacheEntry* cp_cache_entry = last_frame.cache_entry(); 806 if (cp_cache_entry->is_resolved(bytecode)) return; 807 808 #ifdef ASSERT 809 if (bytecode == Bytecodes::_invokeinterface) { 810 if (info.resolved_method()->method_holder() == 811 SystemDictionary::Object_klass()) { 812 // NOTE: THIS IS A FIX FOR A CORNER CASE in the JVM spec 813 // (see also CallInfo::set_interface for details) 814 assert(info.call_kind() == CallInfo::vtable_call || 815 info.call_kind() == CallInfo::direct_call, ""); 816 methodHandle rm = info.resolved_method(); 817 assert(rm->is_final() || info.has_vtable_index(), 818 "should have been set already"); 819 } else if (!info.resolved_method()->has_itable_index()) { 820 // Resolved something like CharSequence.toString. Use vtable not itable. 821 assert(info.call_kind() != CallInfo::itable_call, ""); 822 } else { 823 // Setup itable entry 824 assert(info.call_kind() == CallInfo::itable_call, ""); 825 int index = info.resolved_method()->itable_index(); 826 assert(info.itable_index() == index, ""); 827 } 828 } else if (bytecode == Bytecodes::_invokespecial) { 829 assert(info.call_kind() == CallInfo::direct_call, "must be direct call"); 830 } else { 831 assert(info.call_kind() == CallInfo::direct_call || 832 info.call_kind() == CallInfo::vtable_call, ""); 833 } 834 #endif 835 // Get sender or sender's host_klass, and only set cpCache entry to resolved if 836 // it is not an interface. The receiver for invokespecial calls within interface 837 // methods must be checked for every call. 838 InstanceKlass* sender = pool->pool_holder(); 839 sender = sender->has_host_klass() ? sender->host_klass() : sender; 840 841 switch (info.call_kind()) { 842 case CallInfo::direct_call: 843 cp_cache_entry->set_direct_call( 844 bytecode, 845 info.resolved_method(), 846 sender->is_interface()); 847 break; 848 case CallInfo::vtable_call: 849 cp_cache_entry->set_vtable_call( 850 bytecode, 851 info.resolved_method(), 852 info.vtable_index()); 853 break; 854 case CallInfo::itable_call: 855 cp_cache_entry->set_itable_call( 856 bytecode, 857 info.resolved_klass(), 858 info.resolved_method(), 859 info.itable_index()); 860 break; 861 default: ShouldNotReachHere(); 862 } 863 } 864 865 866 // First time execution: Resolve symbols, create a permanent MethodType object. 867 void InterpreterRuntime::resolve_invokehandle(JavaThread* thread) { 868 Thread* THREAD = thread; 869 const Bytecodes::Code bytecode = Bytecodes::_invokehandle; 870 LastFrameAccessor last_frame(thread); 871 872 // resolve method 873 CallInfo info; 874 constantPoolHandle pool(thread, last_frame.method()->constants()); 875 { 876 JvmtiHideSingleStepping jhss(thread); 877 LinkResolver::resolve_invoke(info, Handle(), pool, 878 last_frame.get_index_u2_cpcache(bytecode), bytecode, 879 CHECK); 880 } // end JvmtiHideSingleStepping 881 882 ConstantPoolCacheEntry* cp_cache_entry = last_frame.cache_entry(); 883 cp_cache_entry->set_method_handle(pool, info); 884 } 885 886 // First time execution: Resolve symbols, create a permanent CallSite object. 887 void InterpreterRuntime::resolve_invokedynamic(JavaThread* thread) { 888 Thread* THREAD = thread; 889 LastFrameAccessor last_frame(thread); 890 const Bytecodes::Code bytecode = Bytecodes::_invokedynamic; 891 892 //TO DO: consider passing BCI to Java. 893 // int caller_bci = last_frame.method()->bci_from(last_frame.bcp()); 894 895 // resolve method 896 CallInfo info; 897 constantPoolHandle pool(thread, last_frame.method()->constants()); 898 int index = last_frame.get_index_u4(bytecode); 899 { 900 JvmtiHideSingleStepping jhss(thread); 901 LinkResolver::resolve_invoke(info, Handle(), pool, 902 index, bytecode, CHECK); 903 } // end JvmtiHideSingleStepping 904 905 ConstantPoolCacheEntry* cp_cache_entry = pool->invokedynamic_cp_cache_entry_at(index); 906 cp_cache_entry->set_dynamic_call(pool, info); 907 } 908 909 // This function is the interface to the assembly code. It returns the resolved 910 // cpCache entry. This doesn't safepoint, but the helper routines safepoint. 911 // This function will check for redefinition! 912 IRT_ENTRY(void, InterpreterRuntime::resolve_from_cache(JavaThread* thread, Bytecodes::Code bytecode)) { 913 switch (bytecode) { 914 case Bytecodes::_getstatic: 915 case Bytecodes::_putstatic: 916 case Bytecodes::_getfield: 917 case Bytecodes::_putfield: 918 resolve_get_put(thread, bytecode); 919 break; 920 case Bytecodes::_invokevirtual: 921 case Bytecodes::_invokespecial: 922 case Bytecodes::_invokestatic: 923 case Bytecodes::_invokeinterface: 924 resolve_invoke(thread, bytecode); 925 break; 926 case Bytecodes::_invokehandle: 927 resolve_invokehandle(thread); 928 break; 929 case Bytecodes::_invokedynamic: 930 resolve_invokedynamic(thread); 931 break; 932 default: 933 fatal("unexpected bytecode: %s", Bytecodes::name(bytecode)); 934 break; 935 } 936 } 937 IRT_END 938 939 //------------------------------------------------------------------------------------------------------------------------ 940 // Miscellaneous 941 942 943 nmethod* InterpreterRuntime::frequency_counter_overflow(JavaThread* thread, address branch_bcp) { 944 nmethod* nm = frequency_counter_overflow_inner(thread, branch_bcp); 945 assert(branch_bcp != NULL || nm == NULL, "always returns null for non OSR requests"); 946 if (branch_bcp != NULL && nm != NULL) { 947 // This was a successful request for an OSR nmethod. Because 948 // frequency_counter_overflow_inner ends with a safepoint check, 949 // nm could have been unloaded so look it up again. It's unsafe 950 // to examine nm directly since it might have been freed and used 951 // for something else. 952 LastFrameAccessor last_frame(thread); 953 Method* method = last_frame.method(); 954 int bci = method->bci_from(last_frame.bcp()); 955 nm = method->lookup_osr_nmethod_for(bci, CompLevel_none, false); 956 } 957 if (nm != NULL && thread->is_interp_only_mode()) { 958 // Normally we never get an nm if is_interp_only_mode() is true, because 959 // policy()->event has a check for this and won't compile the method when 960 // true. However, it's possible for is_interp_only_mode() to become true 961 // during the compilation. We don't want to return the nm in that case 962 // because we want to continue to execute interpreted. 963 nm = NULL; 964 } 965 #ifndef PRODUCT 966 if (TraceOnStackReplacement) { 967 if (nm != NULL) { 968 tty->print("OSR entry @ pc: " INTPTR_FORMAT ": ", p2i(nm->osr_entry())); 969 nm->print(); 970 } 971 } 972 #endif 973 return nm; 974 } 975 976 IRT_ENTRY(nmethod*, 977 InterpreterRuntime::frequency_counter_overflow_inner(JavaThread* thread, address branch_bcp)) 978 // use UnlockFlagSaver to clear and restore the _do_not_unlock_if_synchronized 979 // flag, in case this method triggers classloading which will call into Java. 980 UnlockFlagSaver fs(thread); 981 982 LastFrameAccessor last_frame(thread); 983 assert(last_frame.is_interpreted_frame(), "must come from interpreter"); 984 methodHandle method(thread, last_frame.method()); 985 const int branch_bci = branch_bcp != NULL ? method->bci_from(branch_bcp) : InvocationEntryBci; 986 const int bci = branch_bcp != NULL ? method->bci_from(last_frame.bcp()) : InvocationEntryBci; 987 988 assert(!HAS_PENDING_EXCEPTION, "Should not have any exceptions pending"); 989 nmethod* osr_nm = CompilationPolicy::policy()->event(method, method, branch_bci, bci, CompLevel_none, NULL, thread); 990 assert(!HAS_PENDING_EXCEPTION, "Event handler should not throw any exceptions"); 991 992 if (osr_nm != NULL) { 993 // We may need to do on-stack replacement which requires that no 994 // monitors in the activation are biased because their 995 // BasicObjectLocks will need to migrate during OSR. Force 996 // unbiasing of all monitors in the activation now (even though 997 // the OSR nmethod might be invalidated) because we don't have a 998 // safepoint opportunity later once the migration begins. 999 if (UseBiasedLocking) { 1000 ResourceMark rm; 1001 GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>(); 1002 for( BasicObjectLock *kptr = last_frame.monitor_end(); 1003 kptr < last_frame.monitor_begin(); 1004 kptr = last_frame.next_monitor(kptr) ) { 1005 if( kptr->obj() != NULL ) { 1006 objects_to_revoke->append(Handle(THREAD, kptr->obj())); 1007 } 1008 } 1009 BiasedLocking::revoke(objects_to_revoke); 1010 } 1011 } 1012 return osr_nm; 1013 IRT_END 1014 1015 IRT_LEAF(jint, InterpreterRuntime::bcp_to_di(Method* method, address cur_bcp)) 1016 assert(ProfileInterpreter, "must be profiling interpreter"); 1017 int bci = method->bci_from(cur_bcp); 1018 MethodData* mdo = method->method_data(); 1019 if (mdo == NULL) return 0; 1020 return mdo->bci_to_di(bci); 1021 IRT_END 1022 1023 IRT_ENTRY(void, InterpreterRuntime::profile_method(JavaThread* thread)) 1024 // use UnlockFlagSaver to clear and restore the _do_not_unlock_if_synchronized 1025 // flag, in case this method triggers classloading which will call into Java. 1026 UnlockFlagSaver fs(thread); 1027 1028 assert(ProfileInterpreter, "must be profiling interpreter"); 1029 LastFrameAccessor last_frame(thread); 1030 assert(last_frame.is_interpreted_frame(), "must come from interpreter"); 1031 methodHandle method(thread, last_frame.method()); 1032 Method::build_interpreter_method_data(method, THREAD); 1033 if (HAS_PENDING_EXCEPTION) { 1034 assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here"); 1035 CLEAR_PENDING_EXCEPTION; 1036 // and fall through... 1037 } 1038 IRT_END 1039 1040 1041 #ifdef ASSERT 1042 IRT_LEAF(void, InterpreterRuntime::verify_mdp(Method* method, address bcp, address mdp)) 1043 assert(ProfileInterpreter, "must be profiling interpreter"); 1044 1045 MethodData* mdo = method->method_data(); 1046 assert(mdo != NULL, "must not be null"); 1047 1048 int bci = method->bci_from(bcp); 1049 1050 address mdp2 = mdo->bci_to_dp(bci); 1051 if (mdp != mdp2) { 1052 ResourceMark rm; 1053 ResetNoHandleMark rnm; // In a LEAF entry. 1054 HandleMark hm; 1055 tty->print_cr("FAILED verify : actual mdp %p expected mdp %p @ bci %d", mdp, mdp2, bci); 1056 int current_di = mdo->dp_to_di(mdp); 1057 int expected_di = mdo->dp_to_di(mdp2); 1058 tty->print_cr(" actual di %d expected di %d", current_di, expected_di); 1059 int expected_approx_bci = mdo->data_at(expected_di)->bci(); 1060 int approx_bci = -1; 1061 if (current_di >= 0) { 1062 approx_bci = mdo->data_at(current_di)->bci(); 1063 } 1064 tty->print_cr(" actual bci is %d expected bci %d", approx_bci, expected_approx_bci); 1065 mdo->print_on(tty); 1066 method->print_codes(); 1067 } 1068 assert(mdp == mdp2, "wrong mdp"); 1069 IRT_END 1070 #endif // ASSERT 1071 1072 IRT_ENTRY(void, InterpreterRuntime::update_mdp_for_ret(JavaThread* thread, int return_bci)) 1073 assert(ProfileInterpreter, "must be profiling interpreter"); 1074 ResourceMark rm(thread); 1075 HandleMark hm(thread); 1076 LastFrameAccessor last_frame(thread); 1077 assert(last_frame.is_interpreted_frame(), "must come from interpreter"); 1078 MethodData* h_mdo = last_frame.method()->method_data(); 1079 1080 // Grab a lock to ensure atomic access to setting the return bci and 1081 // the displacement. This can block and GC, invalidating all naked oops. 1082 MutexLocker ml(RetData_lock); 1083 1084 // ProfileData is essentially a wrapper around a derived oop, so we 1085 // need to take the lock before making any ProfileData structures. 1086 ProfileData* data = h_mdo->data_at(h_mdo->dp_to_di(last_frame.mdp())); 1087 guarantee(data != NULL, "profile data must be valid"); 1088 RetData* rdata = data->as_RetData(); 1089 address new_mdp = rdata->fixup_ret(return_bci, h_mdo); 1090 last_frame.set_mdp(new_mdp); 1091 IRT_END 1092 1093 IRT_ENTRY(MethodCounters*, InterpreterRuntime::build_method_counters(JavaThread* thread, Method* m)) 1094 MethodCounters* mcs = Method::build_method_counters(m, thread); 1095 if (HAS_PENDING_EXCEPTION) { 1096 assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here"); 1097 CLEAR_PENDING_EXCEPTION; 1098 } 1099 return mcs; 1100 IRT_END 1101 1102 1103 IRT_ENTRY(void, InterpreterRuntime::at_safepoint(JavaThread* thread)) 1104 // We used to need an explict preserve_arguments here for invoke bytecodes. However, 1105 // stack traversal automatically takes care of preserving arguments for invoke, so 1106 // this is no longer needed. 1107 1108 // IRT_END does an implicit safepoint check, hence we are guaranteed to block 1109 // if this is called during a safepoint 1110 1111 if (JvmtiExport::should_post_single_step()) { 1112 // We are called during regular safepoints and when the VM is 1113 // single stepping. If any thread is marked for single stepping, 1114 // then we may have JVMTI work to do. 1115 LastFrameAccessor last_frame(thread); 1116 JvmtiExport::at_single_stepping_point(thread, last_frame.method(), last_frame.bcp()); 1117 } 1118 IRT_END 1119 1120 IRT_ENTRY(void, InterpreterRuntime::post_field_access(JavaThread *thread, oopDesc* obj, 1121 ConstantPoolCacheEntry *cp_entry)) 1122 1123 // check the access_flags for the field in the klass 1124 1125 InstanceKlass* ik = InstanceKlass::cast(cp_entry->f1_as_klass()); 1126 int index = cp_entry->field_index(); 1127 if ((ik->field_access_flags(index) & JVM_ACC_FIELD_ACCESS_WATCHED) == 0) return; 1128 1129 bool is_static = (obj == NULL); 1130 HandleMark hm(thread); 1131 1132 Handle h_obj; 1133 if (!is_static) { 1134 // non-static field accessors have an object, but we need a handle 1135 h_obj = Handle(thread, obj); 1136 } 1137 InstanceKlass* cp_entry_f1 = InstanceKlass::cast(cp_entry->f1_as_klass()); 1138 jfieldID fid = jfieldIDWorkaround::to_jfieldID(cp_entry_f1, cp_entry->f2_as_index(), is_static); 1139 LastFrameAccessor last_frame(thread); 1140 JvmtiExport::post_field_access(thread, last_frame.method(), last_frame.bcp(), cp_entry_f1, h_obj, fid); 1141 IRT_END 1142 1143 IRT_ENTRY(void, InterpreterRuntime::post_field_modification(JavaThread *thread, 1144 oopDesc* obj, ConstantPoolCacheEntry *cp_entry, jvalue *value)) 1145 1146 Klass* k = cp_entry->f1_as_klass(); 1147 1148 // check the access_flags for the field in the klass 1149 InstanceKlass* ik = InstanceKlass::cast(k); 1150 int index = cp_entry->field_index(); 1151 // bail out if field modifications are not watched 1152 if ((ik->field_access_flags(index) & JVM_ACC_FIELD_MODIFICATION_WATCHED) == 0) return; 1153 1154 char sig_type = '\0'; 1155 1156 switch(cp_entry->flag_state()) { 1157 case btos: sig_type = 'B'; break; 1158 case ztos: sig_type = 'Z'; break; 1159 case ctos: sig_type = 'C'; break; 1160 case stos: sig_type = 'S'; break; 1161 case itos: sig_type = 'I'; break; 1162 case ftos: sig_type = 'F'; break; 1163 case atos: sig_type = 'L'; break; 1164 case ltos: sig_type = 'J'; break; 1165 case dtos: sig_type = 'D'; break; 1166 default: ShouldNotReachHere(); return; 1167 } 1168 bool is_static = (obj == NULL); 1169 1170 HandleMark hm(thread); 1171 jfieldID fid = jfieldIDWorkaround::to_jfieldID(ik, cp_entry->f2_as_index(), is_static); 1172 jvalue fvalue; 1173 #ifdef _LP64 1174 fvalue = *value; 1175 #else 1176 // Long/double values are stored unaligned and also noncontiguously with 1177 // tagged stacks. We can't just do a simple assignment even in the non- 1178 // J/D cases because a C++ compiler is allowed to assume that a jvalue is 1179 // 8-byte aligned, and interpreter stack slots are only 4-byte aligned. 1180 // We assume that the two halves of longs/doubles are stored in interpreter 1181 // stack slots in platform-endian order. 1182 jlong_accessor u; 1183 jint* newval = (jint*)value; 1184 u.words[0] = newval[0]; 1185 u.words[1] = newval[Interpreter::stackElementWords]; // skip if tag 1186 fvalue.j = u.long_value; 1187 #endif // _LP64 1188 1189 Handle h_obj; 1190 if (!is_static) { 1191 // non-static field accessors have an object, but we need a handle 1192 h_obj = Handle(thread, obj); 1193 } 1194 1195 LastFrameAccessor last_frame(thread); 1196 JvmtiExport::post_raw_field_modification(thread, last_frame.method(), last_frame.bcp(), ik, h_obj, 1197 fid, sig_type, &fvalue); 1198 IRT_END 1199 1200 IRT_ENTRY(void, InterpreterRuntime::post_method_entry(JavaThread *thread)) 1201 LastFrameAccessor last_frame(thread); 1202 JvmtiExport::post_method_entry(thread, last_frame.method(), last_frame.get_frame()); 1203 IRT_END 1204 1205 1206 IRT_ENTRY(void, InterpreterRuntime::post_method_exit(JavaThread *thread)) 1207 LastFrameAccessor last_frame(thread); 1208 JvmtiExport::post_method_exit(thread, last_frame.method(), last_frame.get_frame()); 1209 IRT_END 1210 1211 IRT_LEAF(int, InterpreterRuntime::interpreter_contains(address pc)) 1212 { 1213 return (Interpreter::contains(pc) ? 1 : 0); 1214 } 1215 IRT_END 1216 1217 1218 // Implementation of SignatureHandlerLibrary 1219 1220 #ifndef SHARING_FAST_NATIVE_FINGERPRINTS 1221 // Dummy definition (else normalization method is defined in CPU 1222 // dependant code) 1223 uint64_t InterpreterRuntime::normalize_fast_native_fingerprint(uint64_t fingerprint) { 1224 return fingerprint; 1225 } 1226 #endif 1227 1228 address SignatureHandlerLibrary::set_handler_blob() { 1229 BufferBlob* handler_blob = BufferBlob::create("native signature handlers", blob_size); 1230 if (handler_blob == NULL) { 1231 return NULL; 1232 } 1233 address handler = handler_blob->code_begin(); 1234 _handler_blob = handler_blob; 1235 _handler = handler; 1236 return handler; 1237 } 1238 1239 void SignatureHandlerLibrary::initialize() { 1240 if (_fingerprints != NULL) { 1241 return; 1242 } 1243 if (set_handler_blob() == NULL) { 1244 vm_exit_out_of_memory(blob_size, OOM_MALLOC_ERROR, "native signature handlers"); 1245 } 1246 1247 BufferBlob* bb = BufferBlob::create("Signature Handler Temp Buffer", 1248 SignatureHandlerLibrary::buffer_size); 1249 _buffer = bb->code_begin(); 1250 1251 _fingerprints = new(ResourceObj::C_HEAP, mtCode)GrowableArray<uint64_t>(32, true); 1252 _handlers = new(ResourceObj::C_HEAP, mtCode)GrowableArray<address>(32, true); 1253 } 1254 1255 address SignatureHandlerLibrary::set_handler(CodeBuffer* buffer) { 1256 address handler = _handler; 1257 int insts_size = buffer->pure_insts_size(); 1258 if (handler + insts_size > _handler_blob->code_end()) { 1259 // get a new handler blob 1260 handler = set_handler_blob(); 1261 } 1262 if (handler != NULL) { 1263 memcpy(handler, buffer->insts_begin(), insts_size); 1264 pd_set_handler(handler); 1265 ICache::invalidate_range(handler, insts_size); 1266 _handler = handler + insts_size; 1267 } 1268 return handler; 1269 } 1270 1271 void SignatureHandlerLibrary::add(const methodHandle& method) { 1272 if (method->signature_handler() == NULL) { 1273 // use slow signature handler if we can't do better 1274 int handler_index = -1; 1275 // check if we can use customized (fast) signature handler 1276 if (UseFastSignatureHandlers && method->size_of_parameters() <= Fingerprinter::max_size_of_parameters) { 1277 // use customized signature handler 1278 MutexLocker mu(SignatureHandlerLibrary_lock); 1279 // make sure data structure is initialized 1280 initialize(); 1281 // lookup method signature's fingerprint 1282 uint64_t fingerprint = Fingerprinter(method).fingerprint(); 1283 // allow CPU dependant code to optimize the fingerprints for the fast handler 1284 fingerprint = InterpreterRuntime::normalize_fast_native_fingerprint(fingerprint); 1285 handler_index = _fingerprints->find(fingerprint); 1286 // create handler if necessary 1287 if (handler_index < 0) { 1288 ResourceMark rm; 1289 ptrdiff_t align_offset = align_up(_buffer, CodeEntryAlignment) - (address)_buffer; 1290 CodeBuffer buffer((address)(_buffer + align_offset), 1291 SignatureHandlerLibrary::buffer_size - align_offset); 1292 InterpreterRuntime::SignatureHandlerGenerator(method, &buffer).generate(fingerprint); 1293 // copy into code heap 1294 address handler = set_handler(&buffer); 1295 if (handler == NULL) { 1296 // use slow signature handler (without memorizing it in the fingerprints) 1297 } else { 1298 // debugging suppport 1299 if (PrintSignatureHandlers && (handler != Interpreter::slow_signature_handler())) { 1300 ttyLocker ttyl; 1301 tty->cr(); 1302 tty->print_cr("argument handler #%d for: %s %s (fingerprint = " UINT64_FORMAT ", %d bytes generated)", 1303 _handlers->length(), 1304 (method->is_static() ? "static" : "receiver"), 1305 method->name_and_sig_as_C_string(), 1306 fingerprint, 1307 buffer.insts_size()); 1308 if (buffer.insts_size() > 0) { 1309 Disassembler::decode(handler, handler + buffer.insts_size()); 1310 } 1311 #ifndef PRODUCT 1312 address rh_begin = Interpreter::result_handler(method()->result_type()); 1313 if (CodeCache::contains(rh_begin)) { 1314 // else it might be special platform dependent values 1315 tty->print_cr(" --- associated result handler ---"); 1316 address rh_end = rh_begin; 1317 while (*(int*)rh_end != 0) { 1318 rh_end += sizeof(int); 1319 } 1320 Disassembler::decode(rh_begin, rh_end); 1321 } else { 1322 tty->print_cr(" associated result handler: " PTR_FORMAT, p2i(rh_begin)); 1323 } 1324 #endif 1325 } 1326 // add handler to library 1327 _fingerprints->append(fingerprint); 1328 _handlers->append(handler); 1329 // set handler index 1330 assert(_fingerprints->length() == _handlers->length(), "sanity check"); 1331 handler_index = _fingerprints->length() - 1; 1332 } 1333 } 1334 // Set handler under SignatureHandlerLibrary_lock 1335 if (handler_index < 0) { 1336 // use generic signature handler 1337 method->set_signature_handler(Interpreter::slow_signature_handler()); 1338 } else { 1339 // set handler 1340 method->set_signature_handler(_handlers->at(handler_index)); 1341 } 1342 } else { 1343 CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops()); 1344 // use generic signature handler 1345 method->set_signature_handler(Interpreter::slow_signature_handler()); 1346 } 1347 } 1348 #ifdef ASSERT 1349 int handler_index = -1; 1350 int fingerprint_index = -2; 1351 { 1352 // '_handlers' and '_fingerprints' are 'GrowableArray's and are NOT synchronized 1353 // in any way if accessed from multiple threads. To avoid races with another 1354 // thread which may change the arrays in the above, mutex protected block, we 1355 // have to protect this read access here with the same mutex as well! 1356 MutexLocker mu(SignatureHandlerLibrary_lock); 1357 if (_handlers != NULL) { 1358 handler_index = _handlers->find(method->signature_handler()); 1359 uint64_t fingerprint = Fingerprinter(method).fingerprint(); 1360 fingerprint = InterpreterRuntime::normalize_fast_native_fingerprint(fingerprint); 1361 fingerprint_index = _fingerprints->find(fingerprint); 1362 } 1363 } 1364 assert(method->signature_handler() == Interpreter::slow_signature_handler() || 1365 handler_index == fingerprint_index, "sanity check"); 1366 #endif // ASSERT 1367 } 1368 1369 void SignatureHandlerLibrary::add(uint64_t fingerprint, address handler) { 1370 int handler_index = -1; 1371 // use customized signature handler 1372 MutexLocker mu(SignatureHandlerLibrary_lock); 1373 // make sure data structure is initialized 1374 initialize(); 1375 fingerprint = InterpreterRuntime::normalize_fast_native_fingerprint(fingerprint); 1376 handler_index = _fingerprints->find(fingerprint); 1377 // create handler if necessary 1378 if (handler_index < 0) { 1379 if (PrintSignatureHandlers && (handler != Interpreter::slow_signature_handler())) { 1380 tty->cr(); 1381 tty->print_cr("argument handler #%d at " PTR_FORMAT " for fingerprint " UINT64_FORMAT, 1382 _handlers->length(), 1383 p2i(handler), 1384 fingerprint); 1385 } 1386 _fingerprints->append(fingerprint); 1387 _handlers->append(handler); 1388 } else { 1389 if (PrintSignatureHandlers) { 1390 tty->cr(); 1391 tty->print_cr("duplicate argument handler #%d for fingerprint " UINT64_FORMAT "(old: " PTR_FORMAT ", new : " PTR_FORMAT ")", 1392 _handlers->length(), 1393 fingerprint, 1394 p2i(_handlers->at(handler_index)), 1395 p2i(handler)); 1396 } 1397 } 1398 } 1399 1400 1401 BufferBlob* SignatureHandlerLibrary::_handler_blob = NULL; 1402 address SignatureHandlerLibrary::_handler = NULL; 1403 GrowableArray<uint64_t>* SignatureHandlerLibrary::_fingerprints = NULL; 1404 GrowableArray<address>* SignatureHandlerLibrary::_handlers = NULL; 1405 address SignatureHandlerLibrary::_buffer = NULL; 1406 1407 1408 IRT_ENTRY(void, InterpreterRuntime::prepare_native_call(JavaThread* thread, Method* method)) 1409 methodHandle m(thread, method); 1410 assert(m->is_native(), "sanity check"); 1411 // lookup native function entry point if it doesn't exist 1412 bool in_base_library; 1413 if (!m->has_native_function()) { 1414 NativeLookup::lookup(m, in_base_library, CHECK); 1415 } 1416 // make sure signature handler is installed 1417 SignatureHandlerLibrary::add(m); 1418 // The interpreter entry point checks the signature handler first, 1419 // before trying to fetch the native entry point and klass mirror. 1420 // We must set the signature handler last, so that multiple processors 1421 // preparing the same method will be sure to see non-null entry & mirror. 1422 IRT_END 1423 1424 #if defined(IA32) || defined(AMD64) || defined(ARM) 1425 IRT_LEAF(void, InterpreterRuntime::popframe_move_outgoing_args(JavaThread* thread, void* src_address, void* dest_address)) 1426 if (src_address == dest_address) { 1427 return; 1428 } 1429 ResetNoHandleMark rnm; // In a LEAF entry. 1430 HandleMark hm; 1431 ResourceMark rm; 1432 LastFrameAccessor last_frame(thread); 1433 assert(last_frame.is_interpreted_frame(), ""); 1434 jint bci = last_frame.bci(); 1435 methodHandle mh(thread, last_frame.method()); 1436 Bytecode_invoke invoke(mh, bci); 1437 ArgumentSizeComputer asc(invoke.signature()); 1438 int size_of_arguments = (asc.size() + (invoke.has_receiver() ? 1 : 0)); // receiver 1439 Copy::conjoint_jbytes(src_address, dest_address, 1440 size_of_arguments * Interpreter::stackElementSize); 1441 IRT_END 1442 #endif 1443 1444 #if INCLUDE_JVMTI 1445 // This is a support of the JVMTI PopFrame interface. 1446 // Make sure it is an invokestatic of a polymorphic intrinsic that has a member_name argument 1447 // and return it as a vm_result so that it can be reloaded in the list of invokestatic parameters. 1448 // The member_name argument is a saved reference (in local#0) to the member_name. 1449 // For backward compatibility with some JDK versions (7, 8) it can also be a direct method handle. 1450 // FIXME: remove DMH case after j.l.i.InvokerBytecodeGenerator code shape is updated. 1451 IRT_ENTRY(void, InterpreterRuntime::member_name_arg_or_null(JavaThread* thread, address member_name, 1452 Method* method, address bcp)) 1453 Bytecodes::Code code = Bytecodes::code_at(method, bcp); 1454 if (code != Bytecodes::_invokestatic) { 1455 return; 1456 } 1457 ConstantPool* cpool = method->constants(); 1458 int cp_index = Bytes::get_native_u2(bcp + 1) + ConstantPool::CPCACHE_INDEX_TAG; 1459 Symbol* cname = cpool->klass_name_at(cpool->klass_ref_index_at(cp_index)); 1460 Symbol* mname = cpool->name_ref_at(cp_index); 1461 1462 if (MethodHandles::has_member_arg(cname, mname)) { 1463 oop member_name_oop = (oop) member_name; 1464 if (java_lang_invoke_DirectMethodHandle::is_instance(member_name_oop)) { 1465 // FIXME: remove after j.l.i.InvokerBytecodeGenerator code shape is updated. 1466 member_name_oop = java_lang_invoke_DirectMethodHandle::member(member_name_oop); 1467 } 1468 thread->set_vm_result(member_name_oop); 1469 } else { 1470 thread->set_vm_result(NULL); 1471 } 1472 IRT_END 1473 #endif // INCLUDE_JVMTI 1474 1475 #ifndef PRODUCT 1476 // This must be a IRT_LEAF function because the interpreter must save registers on x86 to 1477 // call this, which changes rsp and makes the interpreter's expression stack not walkable. 1478 // The generated code still uses call_VM because that will set up the frame pointer for 1479 // bcp and method. 1480 IRT_LEAF(intptr_t, InterpreterRuntime::trace_bytecode(JavaThread* thread, intptr_t preserve_this_value, intptr_t tos, intptr_t tos2)) 1481 LastFrameAccessor last_frame(thread); 1482 assert(last_frame.is_interpreted_frame(), "must be an interpreted frame"); 1483 methodHandle mh(thread, last_frame.method()); 1484 BytecodeTracer::trace(mh, last_frame.bcp(), tos, tos2); 1485 return preserve_this_value; 1486 IRT_END 1487 #endif // !PRODUCT