1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/javaClasses.inline.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "classfile/vmSymbols.hpp" 29 #include "code/codeCache.hpp" 30 #include "compiler/compileBroker.hpp" 31 #include "compiler/disassembler.hpp" 32 #include "gc/shared/collectedHeap.hpp" 33 #include "interpreter/interpreter.hpp" 34 #include "interpreter/interpreterRuntime.hpp" 35 #include "interpreter/linkResolver.hpp" 36 #include "interpreter/templateTable.hpp" 37 #include "logging/log.hpp" 38 #include "memory/oopFactory.hpp" 39 #include "memory/resourceArea.hpp" 40 #include "memory/universe.hpp" 41 #include "oops/constantPool.hpp" 42 #include "oops/cpCache.inline.hpp" 43 #include "oops/instanceKlass.hpp" 44 #include "oops/methodData.hpp" 45 #include "oops/objArrayKlass.hpp" 46 #include "oops/objArrayOop.inline.hpp" 47 #include "oops/oop.inline.hpp" 48 #include "oops/symbol.hpp" 49 #include "prims/jvmtiExport.hpp" 50 #include "prims/nativeLookup.hpp" 51 #include "runtime/atomic.hpp" 52 #include "runtime/biasedLocking.hpp" 53 #include "runtime/compilationPolicy.hpp" 54 #include "runtime/deoptimization.hpp" 55 #include "runtime/fieldDescriptor.hpp" 56 #include "runtime/frame.inline.hpp" 57 #include "runtime/handles.inline.hpp" 58 #include "runtime/icache.hpp" 59 #include "runtime/interfaceSupport.inline.hpp" 60 #include "runtime/java.hpp" 61 #include "runtime/jfieldIDWorkaround.hpp" 62 #include "runtime/osThread.hpp" 63 #include "runtime/sharedRuntime.hpp" 64 #include "runtime/stubRoutines.hpp" 65 #include "runtime/synchronizer.hpp" 66 #include "runtime/threadCritical.hpp" 67 #include "utilities/align.hpp" 68 #include "utilities/copy.hpp" 69 #include "utilities/events.hpp" 70 #ifdef COMPILER2 71 #include "opto/runtime.hpp" 72 #endif 73 74 class UnlockFlagSaver { 75 private: 76 JavaThread* _thread; 77 bool _do_not_unlock; 78 public: 79 UnlockFlagSaver(JavaThread* t) { 80 _thread = t; 81 _do_not_unlock = t->do_not_unlock_if_synchronized(); 82 t->set_do_not_unlock_if_synchronized(false); 83 } 84 ~UnlockFlagSaver() { 85 _thread->set_do_not_unlock_if_synchronized(_do_not_unlock); 86 } 87 }; 88 89 // Helper class to access current interpreter state 90 class LastFrameAccessor : public StackObj { 91 frame _last_frame; 92 public: 93 LastFrameAccessor(JavaThread* thread) { 94 assert(thread == Thread::current(), "sanity"); 95 _last_frame = thread->last_frame(); 96 } 97 bool is_interpreted_frame() const { return _last_frame.is_interpreted_frame(); } 98 Method* method() const { return _last_frame.interpreter_frame_method(); } 99 address bcp() const { return _last_frame.interpreter_frame_bcp(); } 100 int bci() const { return _last_frame.interpreter_frame_bci(); } 101 address mdp() const { return _last_frame.interpreter_frame_mdp(); } 102 103 void set_bcp(address bcp) { _last_frame.interpreter_frame_set_bcp(bcp); } 104 void set_mdp(address dp) { _last_frame.interpreter_frame_set_mdp(dp); } 105 106 // pass method to avoid calling unsafe bcp_to_method (partial fix 4926272) 107 Bytecodes::Code code() const { return Bytecodes::code_at(method(), bcp()); } 108 109 Bytecode bytecode() const { return Bytecode(method(), bcp()); } 110 int get_index_u1(Bytecodes::Code bc) const { return bytecode().get_index_u1(bc); } 111 int get_index_u2(Bytecodes::Code bc) const { return bytecode().get_index_u2(bc); } 112 int get_index_u2_cpcache(Bytecodes::Code bc) const 113 { return bytecode().get_index_u2_cpcache(bc); } 114 int get_index_u4(Bytecodes::Code bc) const { return bytecode().get_index_u4(bc); } 115 int number_of_dimensions() const { return bcp()[3]; } 116 ConstantPoolCacheEntry* cache_entry_at(int i) const 117 { return method()->constants()->cache()->entry_at(i); } 118 ConstantPoolCacheEntry* cache_entry() const { return cache_entry_at(Bytes::get_native_u2(bcp() + 1)); } 119 120 oop callee_receiver(Symbol* signature) { 121 return _last_frame.interpreter_callee_receiver(signature); 122 } 123 BasicObjectLock* monitor_begin() const { 124 return _last_frame.interpreter_frame_monitor_begin(); 125 } 126 BasicObjectLock* monitor_end() const { 127 return _last_frame.interpreter_frame_monitor_end(); 128 } 129 BasicObjectLock* next_monitor(BasicObjectLock* current) const { 130 return _last_frame.next_monitor_in_interpreter_frame(current); 131 } 132 133 frame& get_frame() { return _last_frame; } 134 }; 135 136 137 bool InterpreterRuntime::is_breakpoint(JavaThread *thread) { 138 return Bytecodes::code_or_bp_at(LastFrameAccessor(thread).bcp()) == Bytecodes::_breakpoint; 139 } 140 141 //------------------------------------------------------------------------------------------------------------------------ 142 // State accessors 143 144 void InterpreterRuntime::set_bcp_and_mdp(address bcp, JavaThread *thread) { 145 LastFrameAccessor last_frame(thread); 146 last_frame.set_bcp(bcp); 147 if (ProfileInterpreter) { 148 // ProfileTraps uses MDOs independently of ProfileInterpreter. 149 // That is why we must check both ProfileInterpreter and mdo != NULL. 150 MethodData* mdo = last_frame.method()->method_data(); 151 if (mdo != NULL) { 152 NEEDS_CLEANUP; 153 last_frame.set_mdp(mdo->bci_to_dp(last_frame.bci())); 154 } 155 } 156 } 157 158 //------------------------------------------------------------------------------------------------------------------------ 159 // Constants 160 161 162 IRT_ENTRY(void, InterpreterRuntime::ldc(JavaThread* thread, bool wide)) 163 // access constant pool 164 LastFrameAccessor last_frame(thread); 165 ConstantPool* pool = last_frame.method()->constants(); 166 int index = wide ? last_frame.get_index_u2(Bytecodes::_ldc_w) : last_frame.get_index_u1(Bytecodes::_ldc); 167 constantTag tag = pool->tag_at(index); 168 169 assert (tag.is_unresolved_klass() || tag.is_klass(), "wrong ldc call"); 170 Klass* klass = pool->klass_at(index, CHECK); 171 oop java_class = klass->java_mirror(); 172 thread->set_vm_result(java_class); 173 IRT_END 174 175 IRT_ENTRY(void, InterpreterRuntime::resolve_ldc(JavaThread* thread, Bytecodes::Code bytecode)) { 176 assert(bytecode == Bytecodes::_ldc || 177 bytecode == Bytecodes::_ldc_w || 178 bytecode == Bytecodes::_ldc2_w || 179 bytecode == Bytecodes::_fast_aldc || 180 bytecode == Bytecodes::_fast_aldc_w, "wrong bc"); 181 ResourceMark rm(thread); 182 const bool is_fast_aldc = (bytecode == Bytecodes::_fast_aldc || 183 bytecode == Bytecodes::_fast_aldc_w); 184 LastFrameAccessor last_frame(thread); 185 methodHandle m (thread, last_frame.method()); 186 Bytecode_loadconstant ldc(m, last_frame.bci()); 187 188 // Double-check the size. (Condy can have any type.) 189 BasicType type = ldc.result_type(); 190 switch (type2size[type]) { 191 case 2: guarantee(bytecode == Bytecodes::_ldc2_w, ""); break; 192 case 1: guarantee(bytecode != Bytecodes::_ldc2_w, ""); break; 193 default: ShouldNotReachHere(); 194 } 195 196 // Resolve the constant. This does not do unboxing. 197 // But it does replace Universe::the_null_sentinel by null. 198 oop result = ldc.resolve_constant(CHECK); 199 assert(result != NULL || is_fast_aldc, "null result only valid for fast_aldc"); 200 201 #ifdef ASSERT 202 { 203 // The bytecode wrappers aren't GC-safe so construct a new one 204 Bytecode_loadconstant ldc2(m, last_frame.bci()); 205 int rindex = ldc2.cache_index(); 206 if (rindex < 0) 207 rindex = m->constants()->cp_to_object_index(ldc2.pool_index()); 208 if (rindex >= 0) { 209 oop coop = m->constants()->resolved_references()->obj_at(rindex); 210 oop roop = (result == NULL ? Universe::the_null_sentinel() : result); 211 assert(roop == coop, "expected result for assembly code"); 212 } 213 } 214 #endif 215 thread->set_vm_result(result); 216 if (!is_fast_aldc) { 217 // Tell the interpreter how to unbox the primitive. 218 guarantee(java_lang_boxing_object::is_instance(result, type), ""); 219 int offset = java_lang_boxing_object::value_offset_in_bytes(type); 220 intptr_t flags = ((as_TosState(type) << ConstantPoolCacheEntry::tos_state_shift) 221 | (offset & ConstantPoolCacheEntry::field_index_mask)); 222 thread->set_vm_result_2((Metadata*)flags); 223 } 224 } 225 IRT_END 226 227 228 //------------------------------------------------------------------------------------------------------------------------ 229 // Allocation 230 231 IRT_ENTRY(void, InterpreterRuntime::_new(JavaThread* thread, ConstantPool* pool, int index)) 232 Klass* k = pool->klass_at(index, CHECK); 233 InstanceKlass* klass = InstanceKlass::cast(k); 234 235 // Make sure we are not instantiating an abstract klass 236 klass->check_valid_for_instantiation(true, CHECK); 237 238 // Make sure klass is initialized 239 klass->initialize(CHECK); 240 241 // At this point the class may not be fully initialized 242 // because of recursive initialization. If it is fully 243 // initialized & has_finalized is not set, we rewrite 244 // it into its fast version (Note: no locking is needed 245 // here since this is an atomic byte write and can be 246 // done more than once). 247 // 248 // Note: In case of classes with has_finalized we don't 249 // rewrite since that saves us an extra check in 250 // the fast version which then would call the 251 // slow version anyway (and do a call back into 252 // Java). 253 // If we have a breakpoint, then we don't rewrite 254 // because the _breakpoint bytecode would be lost. 255 oop obj = klass->allocate_instance(CHECK); 256 thread->set_vm_result(obj); 257 IRT_END 258 259 260 IRT_ENTRY(void, InterpreterRuntime::newarray(JavaThread* thread, BasicType type, jint size)) 261 oop obj = oopFactory::new_typeArray(type, size, CHECK); 262 thread->set_vm_result(obj); 263 IRT_END 264 265 266 IRT_ENTRY(void, InterpreterRuntime::anewarray(JavaThread* thread, ConstantPool* pool, int index, jint size)) 267 Klass* klass = pool->klass_at(index, CHECK); 268 objArrayOop obj = oopFactory::new_objArray(klass, size, CHECK); 269 thread->set_vm_result(obj); 270 IRT_END 271 272 273 IRT_ENTRY(void, InterpreterRuntime::multianewarray(JavaThread* thread, jint* first_size_address)) 274 // We may want to pass in more arguments - could make this slightly faster 275 LastFrameAccessor last_frame(thread); 276 ConstantPool* constants = last_frame.method()->constants(); 277 int i = last_frame.get_index_u2(Bytecodes::_multianewarray); 278 Klass* klass = constants->klass_at(i, CHECK); 279 int nof_dims = last_frame.number_of_dimensions(); 280 assert(klass->is_klass(), "not a class"); 281 assert(nof_dims >= 1, "multianewarray rank must be nonzero"); 282 283 // We must create an array of jints to pass to multi_allocate. 284 ResourceMark rm(thread); 285 const int small_dims = 10; 286 jint dim_array[small_dims]; 287 jint *dims = &dim_array[0]; 288 if (nof_dims > small_dims) { 289 dims = (jint*) NEW_RESOURCE_ARRAY(jint, nof_dims); 290 } 291 for (int index = 0; index < nof_dims; index++) { 292 // offset from first_size_address is addressed as local[index] 293 int n = Interpreter::local_offset_in_bytes(index)/jintSize; 294 dims[index] = first_size_address[n]; 295 } 296 oop obj = ArrayKlass::cast(klass)->multi_allocate(nof_dims, dims, CHECK); 297 thread->set_vm_result(obj); 298 IRT_END 299 300 301 IRT_ENTRY(void, InterpreterRuntime::register_finalizer(JavaThread* thread, oopDesc* obj)) 302 assert(oopDesc::is_oop(obj), "must be a valid oop"); 303 assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise"); 304 InstanceKlass::register_finalizer(instanceOop(obj), CHECK); 305 IRT_END 306 307 308 // Quicken instance-of and check-cast bytecodes 309 IRT_ENTRY(void, InterpreterRuntime::quicken_io_cc(JavaThread* thread)) 310 // Force resolving; quicken the bytecode 311 LastFrameAccessor last_frame(thread); 312 int which = last_frame.get_index_u2(Bytecodes::_checkcast); 313 ConstantPool* cpool = last_frame.method()->constants(); 314 // We'd expect to assert that we're only here to quicken bytecodes, but in a multithreaded 315 // program we might have seen an unquick'd bytecode in the interpreter but have another 316 // thread quicken the bytecode before we get here. 317 // assert( cpool->tag_at(which).is_unresolved_klass(), "should only come here to quicken bytecodes" ); 318 Klass* klass = cpool->klass_at(which, CHECK); 319 thread->set_vm_result_2(klass); 320 IRT_END 321 322 323 //------------------------------------------------------------------------------------------------------------------------ 324 // Exceptions 325 326 void InterpreterRuntime::note_trap_inner(JavaThread* thread, int reason, 327 const methodHandle& trap_method, int trap_bci, TRAPS) { 328 if (trap_method.not_null()) { 329 MethodData* trap_mdo = trap_method->method_data(); 330 if (trap_mdo == NULL) { 331 Method::build_interpreter_method_data(trap_method, THREAD); 332 if (HAS_PENDING_EXCEPTION) { 333 assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), 334 "we expect only an OOM error here"); 335 CLEAR_PENDING_EXCEPTION; 336 } 337 trap_mdo = trap_method->method_data(); 338 // and fall through... 339 } 340 if (trap_mdo != NULL) { 341 // Update per-method count of trap events. The interpreter 342 // is updating the MDO to simulate the effect of compiler traps. 343 Deoptimization::update_method_data_from_interpreter(trap_mdo, trap_bci, reason); 344 } 345 } 346 } 347 348 // Assume the compiler is (or will be) interested in this event. 349 // If necessary, create an MDO to hold the information, and record it. 350 void InterpreterRuntime::note_trap(JavaThread* thread, int reason, TRAPS) { 351 assert(ProfileTraps, "call me only if profiling"); 352 LastFrameAccessor last_frame(thread); 353 methodHandle trap_method(thread, last_frame.method()); 354 int trap_bci = trap_method->bci_from(last_frame.bcp()); 355 note_trap_inner(thread, reason, trap_method, trap_bci, THREAD); 356 } 357 358 #ifdef CC_INTERP 359 // As legacy note_trap, but we have more arguments. 360 IRT_ENTRY(void, InterpreterRuntime::note_trap(JavaThread* thread, int reason, Method *method, int trap_bci)) 361 methodHandle trap_method(method); 362 note_trap_inner(thread, reason, trap_method, trap_bci, THREAD); 363 IRT_END 364 365 // Class Deoptimization is not visible in BytecodeInterpreter, so we need a wrapper 366 // for each exception. 367 void InterpreterRuntime::note_nullCheck_trap(JavaThread* thread, Method *method, int trap_bci) 368 { if (ProfileTraps) note_trap(thread, Deoptimization::Reason_null_check, method, trap_bci); } 369 void InterpreterRuntime::note_div0Check_trap(JavaThread* thread, Method *method, int trap_bci) 370 { if (ProfileTraps) note_trap(thread, Deoptimization::Reason_div0_check, method, trap_bci); } 371 void InterpreterRuntime::note_rangeCheck_trap(JavaThread* thread, Method *method, int trap_bci) 372 { if (ProfileTraps) note_trap(thread, Deoptimization::Reason_range_check, method, trap_bci); } 373 void InterpreterRuntime::note_classCheck_trap(JavaThread* thread, Method *method, int trap_bci) 374 { if (ProfileTraps) note_trap(thread, Deoptimization::Reason_class_check, method, trap_bci); } 375 void InterpreterRuntime::note_arrayCheck_trap(JavaThread* thread, Method *method, int trap_bci) 376 { if (ProfileTraps) note_trap(thread, Deoptimization::Reason_array_check, method, trap_bci); } 377 #endif // CC_INTERP 378 379 380 static Handle get_preinitialized_exception(Klass* k, TRAPS) { 381 // get klass 382 InstanceKlass* klass = InstanceKlass::cast(k); 383 assert(klass->is_initialized(), 384 "this klass should have been initialized during VM initialization"); 385 // create instance - do not call constructor since we may have no 386 // (java) stack space left (should assert constructor is empty) 387 Handle exception; 388 oop exception_oop = klass->allocate_instance(CHECK_(exception)); 389 exception = Handle(THREAD, exception_oop); 390 if (StackTraceInThrowable) { 391 java_lang_Throwable::fill_in_stack_trace(exception); 392 } 393 return exception; 394 } 395 396 // Special handling for stack overflow: since we don't have any (java) stack 397 // space left we use the pre-allocated & pre-initialized StackOverflowError 398 // klass to create an stack overflow error instance. We do not call its 399 // constructor for the same reason (it is empty, anyway). 400 IRT_ENTRY(void, InterpreterRuntime::throw_StackOverflowError(JavaThread* thread)) 401 Handle exception = get_preinitialized_exception( 402 SystemDictionary::StackOverflowError_klass(), 403 CHECK); 404 // Increment counter for hs_err file reporting 405 Atomic::inc(&Exceptions::_stack_overflow_errors); 406 THROW_HANDLE(exception); 407 IRT_END 408 409 IRT_ENTRY(void, InterpreterRuntime::throw_delayed_StackOverflowError(JavaThread* thread)) 410 Handle exception = get_preinitialized_exception( 411 SystemDictionary::StackOverflowError_klass(), 412 CHECK); 413 java_lang_Throwable::set_message(exception(), 414 Universe::delayed_stack_overflow_error_message()); 415 // Increment counter for hs_err file reporting 416 Atomic::inc(&Exceptions::_stack_overflow_errors); 417 THROW_HANDLE(exception); 418 IRT_END 419 420 IRT_ENTRY(void, InterpreterRuntime::create_exception(JavaThread* thread, char* name, char* message)) 421 // lookup exception klass 422 TempNewSymbol s = SymbolTable::new_symbol(name, CHECK); 423 if (ProfileTraps) { 424 if (s == vmSymbols::java_lang_ArithmeticException()) { 425 note_trap(thread, Deoptimization::Reason_div0_check, CHECK); 426 } else if (s == vmSymbols::java_lang_NullPointerException()) { 427 note_trap(thread, Deoptimization::Reason_null_check, CHECK); 428 } 429 } 430 // create exception 431 Handle exception = Exceptions::new_exception(thread, s, message); 432 thread->set_vm_result(exception()); 433 IRT_END 434 435 436 IRT_ENTRY(void, InterpreterRuntime::create_klass_exception(JavaThread* thread, char* name, oopDesc* obj)) 437 ResourceMark rm(thread); 438 const char* klass_name = obj->klass()->external_name(); 439 // lookup exception klass 440 TempNewSymbol s = SymbolTable::new_symbol(name, CHECK); 441 if (ProfileTraps) { 442 note_trap(thread, Deoptimization::Reason_class_check, CHECK); 443 } 444 // create exception, with klass name as detail message 445 Handle exception = Exceptions::new_exception(thread, s, klass_name); 446 thread->set_vm_result(exception()); 447 IRT_END 448 449 450 IRT_ENTRY(void, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException(JavaThread* thread, char* name, jint index)) 451 char message[jintAsStringSize]; 452 // lookup exception klass 453 TempNewSymbol s = SymbolTable::new_symbol(name, CHECK); 454 if (ProfileTraps) { 455 note_trap(thread, Deoptimization::Reason_range_check, CHECK); 456 } 457 // create exception 458 sprintf(message, "%d", index); 459 THROW_MSG(s, message); 460 IRT_END 461 462 IRT_ENTRY(void, InterpreterRuntime::throw_ClassCastException( 463 JavaThread* thread, oopDesc* obj)) 464 465 ResourceMark rm(thread); 466 char* message = SharedRuntime::generate_class_cast_message( 467 thread, obj->klass()); 468 469 if (ProfileTraps) { 470 note_trap(thread, Deoptimization::Reason_class_check, CHECK); 471 } 472 473 // create exception 474 THROW_MSG(vmSymbols::java_lang_ClassCastException(), message); 475 IRT_END 476 477 // exception_handler_for_exception(...) returns the continuation address, 478 // the exception oop (via TLS) and sets the bci/bcp for the continuation. 479 // The exception oop is returned to make sure it is preserved over GC (it 480 // is only on the stack if the exception was thrown explicitly via athrow). 481 // During this operation, the expression stack contains the values for the 482 // bci where the exception happened. If the exception was propagated back 483 // from a call, the expression stack contains the values for the bci at the 484 // invoke w/o arguments (i.e., as if one were inside the call). 485 IRT_ENTRY(address, InterpreterRuntime::exception_handler_for_exception(JavaThread* thread, oopDesc* exception)) 486 487 LastFrameAccessor last_frame(thread); 488 Handle h_exception(thread, exception); 489 methodHandle h_method (thread, last_frame.method()); 490 constantPoolHandle h_constants(thread, h_method->constants()); 491 bool should_repeat; 492 int handler_bci; 493 int current_bci = last_frame.bci(); 494 495 if (thread->frames_to_pop_failed_realloc() > 0) { 496 // Allocation of scalar replaced object used in this frame 497 // failed. Unconditionally pop the frame. 498 thread->dec_frames_to_pop_failed_realloc(); 499 thread->set_vm_result(h_exception()); 500 // If the method is synchronized we already unlocked the monitor 501 // during deoptimization so the interpreter needs to skip it when 502 // the frame is popped. 503 thread->set_do_not_unlock_if_synchronized(true); 504 #ifdef CC_INTERP 505 return (address) -1; 506 #else 507 return Interpreter::remove_activation_entry(); 508 #endif 509 } 510 511 // Need to do this check first since when _do_not_unlock_if_synchronized 512 // is set, we don't want to trigger any classloading which may make calls 513 // into java, or surprisingly find a matching exception handler for bci 0 514 // since at this moment the method hasn't been "officially" entered yet. 515 if (thread->do_not_unlock_if_synchronized()) { 516 ResourceMark rm; 517 assert(current_bci == 0, "bci isn't zero for do_not_unlock_if_synchronized"); 518 thread->set_vm_result(exception); 519 #ifdef CC_INTERP 520 return (address) -1; 521 #else 522 return Interpreter::remove_activation_entry(); 523 #endif 524 } 525 526 do { 527 should_repeat = false; 528 529 // assertions 530 #ifdef ASSERT 531 assert(h_exception.not_null(), "NULL exceptions should be handled by athrow"); 532 // Check that exception is a subclass of Throwable, otherwise we have a VerifyError 533 if (!(h_exception->is_a(SystemDictionary::Throwable_klass()))) { 534 if (ExitVMOnVerifyError) vm_exit(-1); 535 ShouldNotReachHere(); 536 } 537 #endif 538 539 // tracing 540 if (log_is_enabled(Info, exceptions)) { 541 ResourceMark rm(thread); 542 stringStream tempst; 543 tempst.print("interpreter method <%s>\n" 544 " at bci %d for thread " INTPTR_FORMAT " (%s)", 545 h_method->print_value_string(), current_bci, p2i(thread), thread->name()); 546 Exceptions::log_exception(h_exception, tempst); 547 } 548 // Don't go paging in something which won't be used. 549 // else if (extable->length() == 0) { 550 // // disabled for now - interpreter is not using shortcut yet 551 // // (shortcut is not to call runtime if we have no exception handlers) 552 // // warning("performance bug: should not call runtime if method has no exception handlers"); 553 // } 554 // for AbortVMOnException flag 555 Exceptions::debug_check_abort(h_exception); 556 557 // exception handler lookup 558 Klass* klass = h_exception->klass(); 559 handler_bci = Method::fast_exception_handler_bci_for(h_method, klass, current_bci, THREAD); 560 if (HAS_PENDING_EXCEPTION) { 561 // We threw an exception while trying to find the exception handler. 562 // Transfer the new exception to the exception handle which will 563 // be set into thread local storage, and do another lookup for an 564 // exception handler for this exception, this time starting at the 565 // BCI of the exception handler which caused the exception to be 566 // thrown (bug 4307310). 567 h_exception = Handle(THREAD, PENDING_EXCEPTION); 568 CLEAR_PENDING_EXCEPTION; 569 if (handler_bci >= 0) { 570 current_bci = handler_bci; 571 should_repeat = true; 572 } 573 } 574 } while (should_repeat == true); 575 576 #if INCLUDE_JVMCI 577 if (EnableJVMCI && h_method->method_data() != NULL) { 578 ResourceMark rm(thread); 579 ProfileData* pdata = h_method->method_data()->allocate_bci_to_data(current_bci, NULL); 580 if (pdata != NULL && pdata->is_BitData()) { 581 BitData* bit_data = (BitData*) pdata; 582 bit_data->set_exception_seen(); 583 } 584 } 585 #endif 586 587 // notify JVMTI of an exception throw; JVMTI will detect if this is a first 588 // time throw or a stack unwinding throw and accordingly notify the debugger 589 if (JvmtiExport::can_post_on_exceptions()) { 590 JvmtiExport::post_exception_throw(thread, h_method(), last_frame.bcp(), h_exception()); 591 } 592 593 #ifdef CC_INTERP 594 address continuation = (address)(intptr_t) handler_bci; 595 #else 596 address continuation = NULL; 597 #endif 598 address handler_pc = NULL; 599 if (handler_bci < 0 || !thread->reguard_stack((address) &continuation)) { 600 // Forward exception to callee (leaving bci/bcp untouched) because (a) no 601 // handler in this method, or (b) after a stack overflow there is not yet 602 // enough stack space available to reprotect the stack. 603 #ifndef CC_INTERP 604 continuation = Interpreter::remove_activation_entry(); 605 #endif 606 #if COMPILER2_OR_JVMCI 607 // Count this for compilation purposes 608 h_method->interpreter_throwout_increment(THREAD); 609 #endif 610 } else { 611 // handler in this method => change bci/bcp to handler bci/bcp and continue there 612 handler_pc = h_method->code_base() + handler_bci; 613 #ifndef CC_INTERP 614 set_bcp_and_mdp(handler_pc, thread); 615 continuation = Interpreter::dispatch_table(vtos)[*handler_pc]; 616 #endif 617 } 618 // notify debugger of an exception catch 619 // (this is good for exceptions caught in native methods as well) 620 if (JvmtiExport::can_post_on_exceptions()) { 621 JvmtiExport::notice_unwind_due_to_exception(thread, h_method(), handler_pc, h_exception(), (handler_pc != NULL)); 622 } 623 624 thread->set_vm_result(h_exception()); 625 return continuation; 626 IRT_END 627 628 629 IRT_ENTRY(void, InterpreterRuntime::throw_pending_exception(JavaThread* thread)) 630 assert(thread->has_pending_exception(), "must only ne called if there's an exception pending"); 631 // nothing to do - eventually we should remove this code entirely (see comments @ call sites) 632 IRT_END 633 634 635 IRT_ENTRY(void, InterpreterRuntime::throw_AbstractMethodError(JavaThread* thread)) 636 THROW(vmSymbols::java_lang_AbstractMethodError()); 637 IRT_END 638 639 // This method is called from the "abstract_entry" of the interpreter. 640 // At that point, the arguments have already been removed from the stack 641 // and therefore we don't have the receiver object at our fingertips. (Though, 642 // on some platforms the receiver still resides in a register...). Thus, 643 // we have no choice but print an error message not containing the receiver 644 // type. 645 IRT_ENTRY(void, InterpreterRuntime::throw_AbstractMethodErrorWithMethod(JavaThread* thread, 646 Method* missingMethod)) 647 ResourceMark rm(thread); 648 assert(missingMethod != NULL, "sanity"); 649 methodHandle m(thread, missingMethod); 650 LinkResolver::throw_abstract_method_error(m, THREAD); 651 IRT_END 652 653 IRT_ENTRY(void, InterpreterRuntime::throw_AbstractMethodErrorVerbose(JavaThread* thread, 654 Klass* recvKlass, 655 Method* missingMethod)) 656 ResourceMark rm(thread); 657 methodHandle mh = methodHandle(thread, missingMethod); 658 LinkResolver::throw_abstract_method_error(mh, recvKlass, THREAD); 659 IRT_END 660 661 662 IRT_ENTRY(void, InterpreterRuntime::throw_IncompatibleClassChangeError(JavaThread* thread)) 663 THROW(vmSymbols::java_lang_IncompatibleClassChangeError()); 664 IRT_END 665 666 IRT_ENTRY(void, InterpreterRuntime::throw_IncompatibleClassChangeErrorVerbose(JavaThread* thread, 667 Klass* recvKlass, 668 Klass* interfaceKlass)) 669 ResourceMark rm(thread); 670 char buf[1000]; 671 buf[0] = '\0'; 672 jio_snprintf(buf, sizeof(buf), 673 "Class %s does not implement the requested interface %s", 674 recvKlass ? recvKlass->external_name() : "NULL", 675 interfaceKlass ? interfaceKlass->external_name() : "NULL"); 676 THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf); 677 IRT_END 678 679 //------------------------------------------------------------------------------------------------------------------------ 680 // Fields 681 // 682 683 void InterpreterRuntime::resolve_get_put(JavaThread* thread, Bytecodes::Code bytecode) { 684 Thread* THREAD = thread; 685 // resolve field 686 fieldDescriptor info; 687 LastFrameAccessor last_frame(thread); 688 constantPoolHandle pool(thread, last_frame.method()->constants()); 689 methodHandle m(thread, last_frame.method()); 690 bool is_put = (bytecode == Bytecodes::_putfield || bytecode == Bytecodes::_nofast_putfield || 691 bytecode == Bytecodes::_putstatic); 692 bool is_static = (bytecode == Bytecodes::_getstatic || bytecode == Bytecodes::_putstatic); 693 694 { 695 JvmtiHideSingleStepping jhss(thread); 696 LinkResolver::resolve_field_access(info, pool, last_frame.get_index_u2_cpcache(bytecode), 697 m, bytecode, CHECK); 698 } // end JvmtiHideSingleStepping 699 700 // check if link resolution caused cpCache to be updated 701 ConstantPoolCacheEntry* cp_cache_entry = last_frame.cache_entry(); 702 if (cp_cache_entry->is_resolved(bytecode)) return; 703 704 // compute auxiliary field attributes 705 TosState state = as_TosState(info.field_type()); 706 707 // Resolution of put instructions on final fields is delayed. That is required so that 708 // exceptions are thrown at the correct place (when the instruction is actually invoked). 709 // If we do not resolve an instruction in the current pass, leaving the put_code 710 // set to zero will cause the next put instruction to the same field to reresolve. 711 712 // Resolution of put instructions to final instance fields with invalid updates (i.e., 713 // to final instance fields with updates originating from a method different than <init>) 714 // is inhibited. A putfield instruction targeting an instance final field must throw 715 // an IllegalAccessError if the instruction is not in an instance 716 // initializer method <init>. If resolution were not inhibited, a putfield 717 // in an initializer method could be resolved in the initializer. Subsequent 718 // putfield instructions to the same field would then use cached information. 719 // As a result, those instructions would not pass through the VM. That is, 720 // checks in resolve_field_access() would not be executed for those instructions 721 // and the required IllegalAccessError would not be thrown. 722 // 723 // Also, we need to delay resolving getstatic and putstatic instructions until the 724 // class is initialized. This is required so that access to the static 725 // field will call the initialization function every time until the class 726 // is completely initialized ala. in 2.17.5 in JVM Specification. 727 InstanceKlass* klass = InstanceKlass::cast(info.field_holder()); 728 bool uninitialized_static = is_static && !klass->is_initialized(); 729 bool has_initialized_final_update = info.field_holder()->major_version() >= 53 && 730 info.has_initialized_final_update(); 731 assert(!(has_initialized_final_update && !info.access_flags().is_final()), "Fields with initialized final updates must be final"); 732 733 Bytecodes::Code get_code = (Bytecodes::Code)0; 734 Bytecodes::Code put_code = (Bytecodes::Code)0; 735 if (!uninitialized_static) { 736 get_code = ((is_static) ? Bytecodes::_getstatic : Bytecodes::_getfield); 737 if ((is_put && !has_initialized_final_update) || !info.access_flags().is_final()) { 738 put_code = ((is_static) ? Bytecodes::_putstatic : Bytecodes::_putfield); 739 } 740 } 741 742 cp_cache_entry->set_field( 743 get_code, 744 put_code, 745 info.field_holder(), 746 info.index(), 747 info.offset(), 748 state, 749 info.access_flags().is_final(), 750 info.access_flags().is_volatile(), 751 pool->pool_holder() 752 ); 753 } 754 755 756 //------------------------------------------------------------------------------------------------------------------------ 757 // Synchronization 758 // 759 // The interpreter's synchronization code is factored out so that it can 760 // be shared by method invocation and synchronized blocks. 761 //%note synchronization_3 762 763 //%note monitor_1 764 IRT_ENTRY_NO_ASYNC(void, InterpreterRuntime::monitorenter(JavaThread* thread, BasicObjectLock* elem)) 765 #ifdef ASSERT 766 thread->last_frame().interpreter_frame_verify_monitor(elem); 767 #endif 768 if (PrintBiasedLockingStatistics) { 769 Atomic::inc(BiasedLocking::slow_path_entry_count_addr()); 770 } 771 Handle h_obj(thread, elem->obj()); 772 assert(Universe::heap()->is_in_reserved_or_null(h_obj()), 773 "must be NULL or an object"); 774 if (UseBiasedLocking) { 775 // Retry fast entry if bias is revoked to avoid unnecessary inflation 776 ObjectSynchronizer::fast_enter(h_obj, elem->lock(), true, CHECK); 777 } else { 778 ObjectSynchronizer::slow_enter(h_obj, elem->lock(), CHECK); 779 } 780 assert(Universe::heap()->is_in_reserved_or_null(elem->obj()), 781 "must be NULL or an object"); 782 #ifdef ASSERT 783 thread->last_frame().interpreter_frame_verify_monitor(elem); 784 #endif 785 IRT_END 786 787 788 //%note monitor_1 789 IRT_ENTRY_NO_ASYNC(void, InterpreterRuntime::monitorexit(JavaThread* thread, BasicObjectLock* elem)) 790 #ifdef ASSERT 791 thread->last_frame().interpreter_frame_verify_monitor(elem); 792 #endif 793 Handle h_obj(thread, elem->obj()); 794 assert(Universe::heap()->is_in_reserved_or_null(h_obj()), 795 "must be NULL or an object"); 796 if (elem == NULL || h_obj()->is_unlocked()) { 797 THROW(vmSymbols::java_lang_IllegalMonitorStateException()); 798 } 799 ObjectSynchronizer::slow_exit(h_obj(), elem->lock(), thread); 800 // Free entry. This must be done here, since a pending exception might be installed on 801 // exit. If it is not cleared, the exception handling code will try to unlock the monitor again. 802 elem->set_obj(NULL); 803 #ifdef ASSERT 804 thread->last_frame().interpreter_frame_verify_monitor(elem); 805 #endif 806 IRT_END 807 808 809 IRT_ENTRY(void, InterpreterRuntime::throw_illegal_monitor_state_exception(JavaThread* thread)) 810 THROW(vmSymbols::java_lang_IllegalMonitorStateException()); 811 IRT_END 812 813 814 IRT_ENTRY(void, InterpreterRuntime::new_illegal_monitor_state_exception(JavaThread* thread)) 815 // Returns an illegal exception to install into the current thread. The 816 // pending_exception flag is cleared so normal exception handling does not 817 // trigger. Any current installed exception will be overwritten. This 818 // method will be called during an exception unwind. 819 820 assert(!HAS_PENDING_EXCEPTION, "no pending exception"); 821 Handle exception(thread, thread->vm_result()); 822 assert(exception() != NULL, "vm result should be set"); 823 thread->set_vm_result(NULL); // clear vm result before continuing (may cause memory leaks and assert failures) 824 if (!exception->is_a(SystemDictionary::ThreadDeath_klass())) { 825 exception = get_preinitialized_exception( 826 SystemDictionary::IllegalMonitorStateException_klass(), 827 CATCH); 828 } 829 thread->set_vm_result(exception()); 830 IRT_END 831 832 833 //------------------------------------------------------------------------------------------------------------------------ 834 // Invokes 835 836 IRT_ENTRY(Bytecodes::Code, InterpreterRuntime::get_original_bytecode_at(JavaThread* thread, Method* method, address bcp)) 837 return method->orig_bytecode_at(method->bci_from(bcp)); 838 IRT_END 839 840 IRT_ENTRY(void, InterpreterRuntime::set_original_bytecode_at(JavaThread* thread, Method* method, address bcp, Bytecodes::Code new_code)) 841 method->set_orig_bytecode_at(method->bci_from(bcp), new_code); 842 IRT_END 843 844 IRT_ENTRY(void, InterpreterRuntime::_breakpoint(JavaThread* thread, Method* method, address bcp)) 845 JvmtiExport::post_raw_breakpoint(thread, method, bcp); 846 IRT_END 847 848 void InterpreterRuntime::resolve_invoke(JavaThread* thread, Bytecodes::Code bytecode) { 849 Thread* THREAD = thread; 850 LastFrameAccessor last_frame(thread); 851 // extract receiver from the outgoing argument list if necessary 852 Handle receiver(thread, NULL); 853 if (bytecode == Bytecodes::_invokevirtual || bytecode == Bytecodes::_invokeinterface || 854 bytecode == Bytecodes::_invokespecial) { 855 ResourceMark rm(thread); 856 methodHandle m (thread, last_frame.method()); 857 Bytecode_invoke call(m, last_frame.bci()); 858 Symbol* signature = call.signature(); 859 receiver = Handle(thread, last_frame.callee_receiver(signature)); 860 861 assert(Universe::heap()->is_in_reserved_or_null(receiver()), 862 "sanity check"); 863 assert(receiver.is_null() || 864 !Universe::heap()->is_in_reserved(receiver->klass()), 865 "sanity check"); 866 } 867 868 // resolve method 869 CallInfo info; 870 constantPoolHandle pool(thread, last_frame.method()->constants()); 871 872 { 873 JvmtiHideSingleStepping jhss(thread); 874 LinkResolver::resolve_invoke(info, receiver, pool, 875 last_frame.get_index_u2_cpcache(bytecode), bytecode, 876 CHECK); 877 if (JvmtiExport::can_hotswap_or_post_breakpoint()) { 878 int retry_count = 0; 879 while (info.resolved_method()->is_old()) { 880 // It is very unlikely that method is redefined more than 100 times 881 // in the middle of resolve. If it is looping here more than 100 times 882 // means then there could be a bug here. 883 guarantee((retry_count++ < 100), 884 "Could not resolve to latest version of redefined method"); 885 // method is redefined in the middle of resolve so re-try. 886 LinkResolver::resolve_invoke(info, receiver, pool, 887 last_frame.get_index_u2_cpcache(bytecode), bytecode, 888 CHECK); 889 } 890 } 891 } // end JvmtiHideSingleStepping 892 893 // check if link resolution caused cpCache to be updated 894 ConstantPoolCacheEntry* cp_cache_entry = last_frame.cache_entry(); 895 if (cp_cache_entry->is_resolved(bytecode)) return; 896 897 #ifdef ASSERT 898 if (bytecode == Bytecodes::_invokeinterface) { 899 if (info.resolved_method()->method_holder() == 900 SystemDictionary::Object_klass()) { 901 // NOTE: THIS IS A FIX FOR A CORNER CASE in the JVM spec 902 // (see also CallInfo::set_interface for details) 903 assert(info.call_kind() == CallInfo::vtable_call || 904 info.call_kind() == CallInfo::direct_call, ""); 905 methodHandle rm = info.resolved_method(); 906 assert(rm->is_final() || info.has_vtable_index(), 907 "should have been set already"); 908 } else if (!info.resolved_method()->has_itable_index()) { 909 // Resolved something like CharSequence.toString. Use vtable not itable. 910 assert(info.call_kind() != CallInfo::itable_call, ""); 911 } else { 912 // Setup itable entry 913 assert(info.call_kind() == CallInfo::itable_call, ""); 914 int index = info.resolved_method()->itable_index(); 915 assert(info.itable_index() == index, ""); 916 } 917 } else if (bytecode == Bytecodes::_invokespecial) { 918 assert(info.call_kind() == CallInfo::direct_call, "must be direct call"); 919 } else { 920 assert(info.call_kind() == CallInfo::direct_call || 921 info.call_kind() == CallInfo::vtable_call, ""); 922 } 923 #endif 924 // Get sender or sender's host_klass, and only set cpCache entry to resolved if 925 // it is not an interface. The receiver for invokespecial calls within interface 926 // methods must be checked for every call. 927 InstanceKlass* sender = pool->pool_holder(); 928 sender = sender->has_host_klass() ? sender->host_klass() : sender; 929 930 switch (info.call_kind()) { 931 case CallInfo::direct_call: 932 cp_cache_entry->set_direct_call( 933 bytecode, 934 info.resolved_method(), 935 sender->is_interface()); 936 break; 937 case CallInfo::vtable_call: 938 cp_cache_entry->set_vtable_call( 939 bytecode, 940 info.resolved_method(), 941 info.vtable_index()); 942 break; 943 case CallInfo::itable_call: 944 cp_cache_entry->set_itable_call( 945 bytecode, 946 info.resolved_klass(), 947 info.resolved_method(), 948 info.itable_index()); 949 break; 950 default: ShouldNotReachHere(); 951 } 952 } 953 954 955 // First time execution: Resolve symbols, create a permanent MethodType object. 956 void InterpreterRuntime::resolve_invokehandle(JavaThread* thread) { 957 Thread* THREAD = thread; 958 const Bytecodes::Code bytecode = Bytecodes::_invokehandle; 959 LastFrameAccessor last_frame(thread); 960 961 // resolve method 962 CallInfo info; 963 constantPoolHandle pool(thread, last_frame.method()->constants()); 964 { 965 JvmtiHideSingleStepping jhss(thread); 966 LinkResolver::resolve_invoke(info, Handle(), pool, 967 last_frame.get_index_u2_cpcache(bytecode), bytecode, 968 CHECK); 969 } // end JvmtiHideSingleStepping 970 971 ConstantPoolCacheEntry* cp_cache_entry = last_frame.cache_entry(); 972 cp_cache_entry->set_method_handle(pool, info); 973 } 974 975 // First time execution: Resolve symbols, create a permanent CallSite object. 976 void InterpreterRuntime::resolve_invokedynamic(JavaThread* thread) { 977 Thread* THREAD = thread; 978 LastFrameAccessor last_frame(thread); 979 const Bytecodes::Code bytecode = Bytecodes::_invokedynamic; 980 981 //TO DO: consider passing BCI to Java. 982 // int caller_bci = last_frame.method()->bci_from(last_frame.bcp()); 983 984 // resolve method 985 CallInfo info; 986 constantPoolHandle pool(thread, last_frame.method()->constants()); 987 int index = last_frame.get_index_u4(bytecode); 988 { 989 JvmtiHideSingleStepping jhss(thread); 990 LinkResolver::resolve_invoke(info, Handle(), pool, 991 index, bytecode, CHECK); 992 } // end JvmtiHideSingleStepping 993 994 ConstantPoolCacheEntry* cp_cache_entry = pool->invokedynamic_cp_cache_entry_at(index); 995 cp_cache_entry->set_dynamic_call(pool, info); 996 } 997 998 // This function is the interface to the assembly code. It returns the resolved 999 // cpCache entry. This doesn't safepoint, but the helper routines safepoint. 1000 // This function will check for redefinition! 1001 IRT_ENTRY(void, InterpreterRuntime::resolve_from_cache(JavaThread* thread, Bytecodes::Code bytecode)) { 1002 switch (bytecode) { 1003 case Bytecodes::_getstatic: 1004 case Bytecodes::_putstatic: 1005 case Bytecodes::_getfield: 1006 case Bytecodes::_putfield: 1007 resolve_get_put(thread, bytecode); 1008 break; 1009 case Bytecodes::_invokevirtual: 1010 case Bytecodes::_invokespecial: 1011 case Bytecodes::_invokestatic: 1012 case Bytecodes::_invokeinterface: 1013 resolve_invoke(thread, bytecode); 1014 break; 1015 case Bytecodes::_invokehandle: 1016 resolve_invokehandle(thread); 1017 break; 1018 case Bytecodes::_invokedynamic: 1019 resolve_invokedynamic(thread); 1020 break; 1021 default: 1022 fatal("unexpected bytecode: %s", Bytecodes::name(bytecode)); 1023 break; 1024 } 1025 } 1026 IRT_END 1027 1028 //------------------------------------------------------------------------------------------------------------------------ 1029 // Miscellaneous 1030 1031 1032 nmethod* InterpreterRuntime::frequency_counter_overflow(JavaThread* thread, address branch_bcp) { 1033 nmethod* nm = frequency_counter_overflow_inner(thread, branch_bcp); 1034 assert(branch_bcp != NULL || nm == NULL, "always returns null for non OSR requests"); 1035 if (branch_bcp != NULL && nm != NULL) { 1036 // This was a successful request for an OSR nmethod. Because 1037 // frequency_counter_overflow_inner ends with a safepoint check, 1038 // nm could have been unloaded so look it up again. It's unsafe 1039 // to examine nm directly since it might have been freed and used 1040 // for something else. 1041 LastFrameAccessor last_frame(thread); 1042 Method* method = last_frame.method(); 1043 int bci = method->bci_from(last_frame.bcp()); 1044 nm = method->lookup_osr_nmethod_for(bci, CompLevel_none, false); 1045 } 1046 if (nm != NULL && thread->is_interp_only_mode()) { 1047 // Normally we never get an nm if is_interp_only_mode() is true, because 1048 // policy()->event has a check for this and won't compile the method when 1049 // true. However, it's possible for is_interp_only_mode() to become true 1050 // during the compilation. We don't want to return the nm in that case 1051 // because we want to continue to execute interpreted. 1052 nm = NULL; 1053 } 1054 #ifndef PRODUCT 1055 if (TraceOnStackReplacement) { 1056 if (nm != NULL) { 1057 tty->print("OSR entry @ pc: " INTPTR_FORMAT ": ", p2i(nm->osr_entry())); 1058 nm->print(); 1059 } 1060 } 1061 #endif 1062 return nm; 1063 } 1064 1065 IRT_ENTRY(nmethod*, 1066 InterpreterRuntime::frequency_counter_overflow_inner(JavaThread* thread, address branch_bcp)) 1067 // use UnlockFlagSaver to clear and restore the _do_not_unlock_if_synchronized 1068 // flag, in case this method triggers classloading which will call into Java. 1069 UnlockFlagSaver fs(thread); 1070 1071 LastFrameAccessor last_frame(thread); 1072 assert(last_frame.is_interpreted_frame(), "must come from interpreter"); 1073 methodHandle method(thread, last_frame.method()); 1074 const int branch_bci = branch_bcp != NULL ? method->bci_from(branch_bcp) : InvocationEntryBci; 1075 const int bci = branch_bcp != NULL ? method->bci_from(last_frame.bcp()) : InvocationEntryBci; 1076 1077 assert(!HAS_PENDING_EXCEPTION, "Should not have any exceptions pending"); 1078 nmethod* osr_nm = CompilationPolicy::policy()->event(method, method, branch_bci, bci, CompLevel_none, NULL, thread); 1079 assert(!HAS_PENDING_EXCEPTION, "Event handler should not throw any exceptions"); 1080 1081 if (osr_nm != NULL) { 1082 // We may need to do on-stack replacement which requires that no 1083 // monitors in the activation are biased because their 1084 // BasicObjectLocks will need to migrate during OSR. Force 1085 // unbiasing of all monitors in the activation now (even though 1086 // the OSR nmethod might be invalidated) because we don't have a 1087 // safepoint opportunity later once the migration begins. 1088 if (UseBiasedLocking) { 1089 ResourceMark rm; 1090 GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>(); 1091 for( BasicObjectLock *kptr = last_frame.monitor_end(); 1092 kptr < last_frame.monitor_begin(); 1093 kptr = last_frame.next_monitor(kptr) ) { 1094 if( kptr->obj() != NULL ) { 1095 objects_to_revoke->append(Handle(THREAD, kptr->obj())); 1096 } 1097 } 1098 BiasedLocking::revoke(objects_to_revoke); 1099 } 1100 } 1101 return osr_nm; 1102 IRT_END 1103 1104 IRT_LEAF(jint, InterpreterRuntime::bcp_to_di(Method* method, address cur_bcp)) 1105 assert(ProfileInterpreter, "must be profiling interpreter"); 1106 int bci = method->bci_from(cur_bcp); 1107 MethodData* mdo = method->method_data(); 1108 if (mdo == NULL) return 0; 1109 return mdo->bci_to_di(bci); 1110 IRT_END 1111 1112 IRT_ENTRY(void, InterpreterRuntime::profile_method(JavaThread* thread)) 1113 // use UnlockFlagSaver to clear and restore the _do_not_unlock_if_synchronized 1114 // flag, in case this method triggers classloading which will call into Java. 1115 UnlockFlagSaver fs(thread); 1116 1117 assert(ProfileInterpreter, "must be profiling interpreter"); 1118 LastFrameAccessor last_frame(thread); 1119 assert(last_frame.is_interpreted_frame(), "must come from interpreter"); 1120 methodHandle method(thread, last_frame.method()); 1121 Method::build_interpreter_method_data(method, THREAD); 1122 if (HAS_PENDING_EXCEPTION) { 1123 assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here"); 1124 CLEAR_PENDING_EXCEPTION; 1125 // and fall through... 1126 } 1127 IRT_END 1128 1129 1130 #ifdef ASSERT 1131 IRT_LEAF(void, InterpreterRuntime::verify_mdp(Method* method, address bcp, address mdp)) 1132 assert(ProfileInterpreter, "must be profiling interpreter"); 1133 1134 MethodData* mdo = method->method_data(); 1135 assert(mdo != NULL, "must not be null"); 1136 1137 int bci = method->bci_from(bcp); 1138 1139 address mdp2 = mdo->bci_to_dp(bci); 1140 if (mdp != mdp2) { 1141 ResourceMark rm; 1142 ResetNoHandleMark rnm; // In a LEAF entry. 1143 HandleMark hm; 1144 tty->print_cr("FAILED verify : actual mdp %p expected mdp %p @ bci %d", mdp, mdp2, bci); 1145 int current_di = mdo->dp_to_di(mdp); 1146 int expected_di = mdo->dp_to_di(mdp2); 1147 tty->print_cr(" actual di %d expected di %d", current_di, expected_di); 1148 int expected_approx_bci = mdo->data_at(expected_di)->bci(); 1149 int approx_bci = -1; 1150 if (current_di >= 0) { 1151 approx_bci = mdo->data_at(current_di)->bci(); 1152 } 1153 tty->print_cr(" actual bci is %d expected bci %d", approx_bci, expected_approx_bci); 1154 mdo->print_on(tty); 1155 method->print_codes(); 1156 } 1157 assert(mdp == mdp2, "wrong mdp"); 1158 IRT_END 1159 #endif // ASSERT 1160 1161 IRT_ENTRY(void, InterpreterRuntime::update_mdp_for_ret(JavaThread* thread, int return_bci)) 1162 assert(ProfileInterpreter, "must be profiling interpreter"); 1163 ResourceMark rm(thread); 1164 HandleMark hm(thread); 1165 LastFrameAccessor last_frame(thread); 1166 assert(last_frame.is_interpreted_frame(), "must come from interpreter"); 1167 MethodData* h_mdo = last_frame.method()->method_data(); 1168 1169 // Grab a lock to ensure atomic access to setting the return bci and 1170 // the displacement. This can block and GC, invalidating all naked oops. 1171 MutexLocker ml(RetData_lock); 1172 1173 // ProfileData is essentially a wrapper around a derived oop, so we 1174 // need to take the lock before making any ProfileData structures. 1175 ProfileData* data = h_mdo->data_at(h_mdo->dp_to_di(last_frame.mdp())); 1176 guarantee(data != NULL, "profile data must be valid"); 1177 RetData* rdata = data->as_RetData(); 1178 address new_mdp = rdata->fixup_ret(return_bci, h_mdo); 1179 last_frame.set_mdp(new_mdp); 1180 IRT_END 1181 1182 IRT_ENTRY(MethodCounters*, InterpreterRuntime::build_method_counters(JavaThread* thread, Method* m)) 1183 MethodCounters* mcs = Method::build_method_counters(m, thread); 1184 if (HAS_PENDING_EXCEPTION) { 1185 assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here"); 1186 CLEAR_PENDING_EXCEPTION; 1187 } 1188 return mcs; 1189 IRT_END 1190 1191 1192 IRT_ENTRY(void, InterpreterRuntime::at_safepoint(JavaThread* thread)) 1193 // We used to need an explict preserve_arguments here for invoke bytecodes. However, 1194 // stack traversal automatically takes care of preserving arguments for invoke, so 1195 // this is no longer needed. 1196 1197 // IRT_END does an implicit safepoint check, hence we are guaranteed to block 1198 // if this is called during a safepoint 1199 1200 if (JvmtiExport::should_post_single_step()) { 1201 // We are called during regular safepoints and when the VM is 1202 // single stepping. If any thread is marked for single stepping, 1203 // then we may have JVMTI work to do. 1204 LastFrameAccessor last_frame(thread); 1205 JvmtiExport::at_single_stepping_point(thread, last_frame.method(), last_frame.bcp()); 1206 } 1207 IRT_END 1208 1209 IRT_ENTRY(void, InterpreterRuntime::post_field_access(JavaThread *thread, oopDesc* obj, 1210 ConstantPoolCacheEntry *cp_entry)) 1211 1212 // check the access_flags for the field in the klass 1213 1214 InstanceKlass* ik = InstanceKlass::cast(cp_entry->f1_as_klass()); 1215 int index = cp_entry->field_index(); 1216 if ((ik->field_access_flags(index) & JVM_ACC_FIELD_ACCESS_WATCHED) == 0) return; 1217 1218 bool is_static = (obj == NULL); 1219 HandleMark hm(thread); 1220 1221 Handle h_obj; 1222 if (!is_static) { 1223 // non-static field accessors have an object, but we need a handle 1224 h_obj = Handle(thread, obj); 1225 } 1226 InstanceKlass* cp_entry_f1 = InstanceKlass::cast(cp_entry->f1_as_klass()); 1227 jfieldID fid = jfieldIDWorkaround::to_jfieldID(cp_entry_f1, cp_entry->f2_as_index(), is_static); 1228 LastFrameAccessor last_frame(thread); 1229 JvmtiExport::post_field_access(thread, last_frame.method(), last_frame.bcp(), cp_entry_f1, h_obj, fid); 1230 IRT_END 1231 1232 IRT_ENTRY(void, InterpreterRuntime::post_field_modification(JavaThread *thread, 1233 oopDesc* obj, ConstantPoolCacheEntry *cp_entry, jvalue *value)) 1234 1235 Klass* k = cp_entry->f1_as_klass(); 1236 1237 // check the access_flags for the field in the klass 1238 InstanceKlass* ik = InstanceKlass::cast(k); 1239 int index = cp_entry->field_index(); 1240 // bail out if field modifications are not watched 1241 if ((ik->field_access_flags(index) & JVM_ACC_FIELD_MODIFICATION_WATCHED) == 0) return; 1242 1243 char sig_type = '\0'; 1244 1245 switch(cp_entry->flag_state()) { 1246 case btos: sig_type = 'B'; break; 1247 case ztos: sig_type = 'Z'; break; 1248 case ctos: sig_type = 'C'; break; 1249 case stos: sig_type = 'S'; break; 1250 case itos: sig_type = 'I'; break; 1251 case ftos: sig_type = 'F'; break; 1252 case atos: sig_type = 'L'; break; 1253 case ltos: sig_type = 'J'; break; 1254 case dtos: sig_type = 'D'; break; 1255 default: ShouldNotReachHere(); return; 1256 } 1257 bool is_static = (obj == NULL); 1258 1259 HandleMark hm(thread); 1260 jfieldID fid = jfieldIDWorkaround::to_jfieldID(ik, cp_entry->f2_as_index(), is_static); 1261 jvalue fvalue; 1262 #ifdef _LP64 1263 fvalue = *value; 1264 #else 1265 // Long/double values are stored unaligned and also noncontiguously with 1266 // tagged stacks. We can't just do a simple assignment even in the non- 1267 // J/D cases because a C++ compiler is allowed to assume that a jvalue is 1268 // 8-byte aligned, and interpreter stack slots are only 4-byte aligned. 1269 // We assume that the two halves of longs/doubles are stored in interpreter 1270 // stack slots in platform-endian order. 1271 jlong_accessor u; 1272 jint* newval = (jint*)value; 1273 u.words[0] = newval[0]; 1274 u.words[1] = newval[Interpreter::stackElementWords]; // skip if tag 1275 fvalue.j = u.long_value; 1276 #endif // _LP64 1277 1278 Handle h_obj; 1279 if (!is_static) { 1280 // non-static field accessors have an object, but we need a handle 1281 h_obj = Handle(thread, obj); 1282 } 1283 1284 LastFrameAccessor last_frame(thread); 1285 JvmtiExport::post_raw_field_modification(thread, last_frame.method(), last_frame.bcp(), ik, h_obj, 1286 fid, sig_type, &fvalue); 1287 IRT_END 1288 1289 IRT_ENTRY(void, InterpreterRuntime::post_method_entry(JavaThread *thread)) 1290 LastFrameAccessor last_frame(thread); 1291 JvmtiExport::post_method_entry(thread, last_frame.method(), last_frame.get_frame()); 1292 IRT_END 1293 1294 1295 IRT_ENTRY(void, InterpreterRuntime::post_method_exit(JavaThread *thread)) 1296 LastFrameAccessor last_frame(thread); 1297 JvmtiExport::post_method_exit(thread, last_frame.method(), last_frame.get_frame()); 1298 IRT_END 1299 1300 IRT_LEAF(int, InterpreterRuntime::interpreter_contains(address pc)) 1301 { 1302 return (Interpreter::contains(pc) ? 1 : 0); 1303 } 1304 IRT_END 1305 1306 1307 // Implementation of SignatureHandlerLibrary 1308 1309 #ifndef SHARING_FAST_NATIVE_FINGERPRINTS 1310 // Dummy definition (else normalization method is defined in CPU 1311 // dependant code) 1312 uint64_t InterpreterRuntime::normalize_fast_native_fingerprint(uint64_t fingerprint) { 1313 return fingerprint; 1314 } 1315 #endif 1316 1317 address SignatureHandlerLibrary::set_handler_blob() { 1318 BufferBlob* handler_blob = BufferBlob::create("native signature handlers", blob_size); 1319 if (handler_blob == NULL) { 1320 return NULL; 1321 } 1322 address handler = handler_blob->code_begin(); 1323 _handler_blob = handler_blob; 1324 _handler = handler; 1325 return handler; 1326 } 1327 1328 void SignatureHandlerLibrary::initialize() { 1329 if (_fingerprints != NULL) { 1330 return; 1331 } 1332 if (set_handler_blob() == NULL) { 1333 vm_exit_out_of_memory(blob_size, OOM_MALLOC_ERROR, "native signature handlers"); 1334 } 1335 1336 BufferBlob* bb = BufferBlob::create("Signature Handler Temp Buffer", 1337 SignatureHandlerLibrary::buffer_size); 1338 _buffer = bb->code_begin(); 1339 1340 _fingerprints = new(ResourceObj::C_HEAP, mtCode)GrowableArray<uint64_t>(32, true); 1341 _handlers = new(ResourceObj::C_HEAP, mtCode)GrowableArray<address>(32, true); 1342 } 1343 1344 address SignatureHandlerLibrary::set_handler(CodeBuffer* buffer) { 1345 address handler = _handler; 1346 int insts_size = buffer->pure_insts_size(); 1347 if (handler + insts_size > _handler_blob->code_end()) { 1348 // get a new handler blob 1349 handler = set_handler_blob(); 1350 } 1351 if (handler != NULL) { 1352 memcpy(handler, buffer->insts_begin(), insts_size); 1353 pd_set_handler(handler); 1354 ICache::invalidate_range(handler, insts_size); 1355 _handler = handler + insts_size; 1356 } 1357 return handler; 1358 } 1359 1360 void SignatureHandlerLibrary::add(const methodHandle& method) { 1361 if (method->signature_handler() == NULL) { 1362 // use slow signature handler if we can't do better 1363 int handler_index = -1; 1364 // check if we can use customized (fast) signature handler 1365 if (UseFastSignatureHandlers && method->size_of_parameters() <= Fingerprinter::max_size_of_parameters) { 1366 // use customized signature handler 1367 MutexLocker mu(SignatureHandlerLibrary_lock); 1368 // make sure data structure is initialized 1369 initialize(); 1370 // lookup method signature's fingerprint 1371 uint64_t fingerprint = Fingerprinter(method).fingerprint(); 1372 // allow CPU dependant code to optimize the fingerprints for the fast handler 1373 fingerprint = InterpreterRuntime::normalize_fast_native_fingerprint(fingerprint); 1374 handler_index = _fingerprints->find(fingerprint); 1375 // create handler if necessary 1376 if (handler_index < 0) { 1377 ResourceMark rm; 1378 ptrdiff_t align_offset = align_up(_buffer, CodeEntryAlignment) - (address)_buffer; 1379 CodeBuffer buffer((address)(_buffer + align_offset), 1380 SignatureHandlerLibrary::buffer_size - align_offset); 1381 InterpreterRuntime::SignatureHandlerGenerator(method, &buffer).generate(fingerprint); 1382 // copy into code heap 1383 address handler = set_handler(&buffer); 1384 if (handler == NULL) { 1385 // use slow signature handler (without memorizing it in the fingerprints) 1386 } else { 1387 // debugging suppport 1388 if (PrintSignatureHandlers && (handler != Interpreter::slow_signature_handler())) { 1389 ttyLocker ttyl; 1390 tty->cr(); 1391 tty->print_cr("argument handler #%d for: %s %s (fingerprint = " UINT64_FORMAT ", %d bytes generated)", 1392 _handlers->length(), 1393 (method->is_static() ? "static" : "receiver"), 1394 method->name_and_sig_as_C_string(), 1395 fingerprint, 1396 buffer.insts_size()); 1397 if (buffer.insts_size() > 0) { 1398 Disassembler::decode(handler, handler + buffer.insts_size()); 1399 } 1400 #ifndef PRODUCT 1401 address rh_begin = Interpreter::result_handler(method()->result_type()); 1402 if (CodeCache::contains(rh_begin)) { 1403 // else it might be special platform dependent values 1404 tty->print_cr(" --- associated result handler ---"); 1405 address rh_end = rh_begin; 1406 while (*(int*)rh_end != 0) { 1407 rh_end += sizeof(int); 1408 } 1409 Disassembler::decode(rh_begin, rh_end); 1410 } else { 1411 tty->print_cr(" associated result handler: " PTR_FORMAT, p2i(rh_begin)); 1412 } 1413 #endif 1414 } 1415 // add handler to library 1416 _fingerprints->append(fingerprint); 1417 _handlers->append(handler); 1418 // set handler index 1419 assert(_fingerprints->length() == _handlers->length(), "sanity check"); 1420 handler_index = _fingerprints->length() - 1; 1421 } 1422 } 1423 // Set handler under SignatureHandlerLibrary_lock 1424 if (handler_index < 0) { 1425 // use generic signature handler 1426 method->set_signature_handler(Interpreter::slow_signature_handler()); 1427 } else { 1428 // set handler 1429 method->set_signature_handler(_handlers->at(handler_index)); 1430 } 1431 } else { 1432 CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops()); 1433 // use generic signature handler 1434 method->set_signature_handler(Interpreter::slow_signature_handler()); 1435 } 1436 } 1437 #ifdef ASSERT 1438 int handler_index = -1; 1439 int fingerprint_index = -2; 1440 { 1441 // '_handlers' and '_fingerprints' are 'GrowableArray's and are NOT synchronized 1442 // in any way if accessed from multiple threads. To avoid races with another 1443 // thread which may change the arrays in the above, mutex protected block, we 1444 // have to protect this read access here with the same mutex as well! 1445 MutexLocker mu(SignatureHandlerLibrary_lock); 1446 if (_handlers != NULL) { 1447 handler_index = _handlers->find(method->signature_handler()); 1448 uint64_t fingerprint = Fingerprinter(method).fingerprint(); 1449 fingerprint = InterpreterRuntime::normalize_fast_native_fingerprint(fingerprint); 1450 fingerprint_index = _fingerprints->find(fingerprint); 1451 } 1452 } 1453 assert(method->signature_handler() == Interpreter::slow_signature_handler() || 1454 handler_index == fingerprint_index, "sanity check"); 1455 #endif // ASSERT 1456 } 1457 1458 void SignatureHandlerLibrary::add(uint64_t fingerprint, address handler) { 1459 int handler_index = -1; 1460 // use customized signature handler 1461 MutexLocker mu(SignatureHandlerLibrary_lock); 1462 // make sure data structure is initialized 1463 initialize(); 1464 fingerprint = InterpreterRuntime::normalize_fast_native_fingerprint(fingerprint); 1465 handler_index = _fingerprints->find(fingerprint); 1466 // create handler if necessary 1467 if (handler_index < 0) { 1468 if (PrintSignatureHandlers && (handler != Interpreter::slow_signature_handler())) { 1469 tty->cr(); 1470 tty->print_cr("argument handler #%d at " PTR_FORMAT " for fingerprint " UINT64_FORMAT, 1471 _handlers->length(), 1472 p2i(handler), 1473 fingerprint); 1474 } 1475 _fingerprints->append(fingerprint); 1476 _handlers->append(handler); 1477 } else { 1478 if (PrintSignatureHandlers) { 1479 tty->cr(); 1480 tty->print_cr("duplicate argument handler #%d for fingerprint " UINT64_FORMAT "(old: " PTR_FORMAT ", new : " PTR_FORMAT ")", 1481 _handlers->length(), 1482 fingerprint, 1483 p2i(_handlers->at(handler_index)), 1484 p2i(handler)); 1485 } 1486 } 1487 } 1488 1489 1490 BufferBlob* SignatureHandlerLibrary::_handler_blob = NULL; 1491 address SignatureHandlerLibrary::_handler = NULL; 1492 GrowableArray<uint64_t>* SignatureHandlerLibrary::_fingerprints = NULL; 1493 GrowableArray<address>* SignatureHandlerLibrary::_handlers = NULL; 1494 address SignatureHandlerLibrary::_buffer = NULL; 1495 1496 1497 IRT_ENTRY(void, InterpreterRuntime::prepare_native_call(JavaThread* thread, Method* method)) 1498 methodHandle m(thread, method); 1499 assert(m->is_native(), "sanity check"); 1500 // lookup native function entry point if it doesn't exist 1501 bool in_base_library; 1502 if (!m->has_native_function()) { 1503 NativeLookup::lookup(m, in_base_library, CHECK); 1504 } 1505 // make sure signature handler is installed 1506 SignatureHandlerLibrary::add(m); 1507 // The interpreter entry point checks the signature handler first, 1508 // before trying to fetch the native entry point and klass mirror. 1509 // We must set the signature handler last, so that multiple processors 1510 // preparing the same method will be sure to see non-null entry & mirror. 1511 IRT_END 1512 1513 #if defined(IA32) || defined(AMD64) || defined(ARM) 1514 IRT_LEAF(void, InterpreterRuntime::popframe_move_outgoing_args(JavaThread* thread, void* src_address, void* dest_address)) 1515 if (src_address == dest_address) { 1516 return; 1517 } 1518 ResetNoHandleMark rnm; // In a LEAF entry. 1519 HandleMark hm; 1520 ResourceMark rm; 1521 LastFrameAccessor last_frame(thread); 1522 assert(last_frame.is_interpreted_frame(), ""); 1523 jint bci = last_frame.bci(); 1524 methodHandle mh(thread, last_frame.method()); 1525 Bytecode_invoke invoke(mh, bci); 1526 ArgumentSizeComputer asc(invoke.signature()); 1527 int size_of_arguments = (asc.size() + (invoke.has_receiver() ? 1 : 0)); // receiver 1528 Copy::conjoint_jbytes(src_address, dest_address, 1529 size_of_arguments * Interpreter::stackElementSize); 1530 IRT_END 1531 #endif 1532 1533 #if INCLUDE_JVMTI 1534 // This is a support of the JVMTI PopFrame interface. 1535 // Make sure it is an invokestatic of a polymorphic intrinsic that has a member_name argument 1536 // and return it as a vm_result so that it can be reloaded in the list of invokestatic parameters. 1537 // The member_name argument is a saved reference (in local#0) to the member_name. 1538 // For backward compatibility with some JDK versions (7, 8) it can also be a direct method handle. 1539 // FIXME: remove DMH case after j.l.i.InvokerBytecodeGenerator code shape is updated. 1540 IRT_ENTRY(void, InterpreterRuntime::member_name_arg_or_null(JavaThread* thread, address member_name, 1541 Method* method, address bcp)) 1542 Bytecodes::Code code = Bytecodes::code_at(method, bcp); 1543 if (code != Bytecodes::_invokestatic) { 1544 return; 1545 } 1546 ConstantPool* cpool = method->constants(); 1547 int cp_index = Bytes::get_native_u2(bcp + 1) + ConstantPool::CPCACHE_INDEX_TAG; 1548 Symbol* cname = cpool->klass_name_at(cpool->klass_ref_index_at(cp_index)); 1549 Symbol* mname = cpool->name_ref_at(cp_index); 1550 1551 if (MethodHandles::has_member_arg(cname, mname)) { 1552 oop member_name_oop = (oop) member_name; 1553 if (java_lang_invoke_DirectMethodHandle::is_instance(member_name_oop)) { 1554 // FIXME: remove after j.l.i.InvokerBytecodeGenerator code shape is updated. 1555 member_name_oop = java_lang_invoke_DirectMethodHandle::member(member_name_oop); 1556 } 1557 thread->set_vm_result(member_name_oop); 1558 } else { 1559 thread->set_vm_result(NULL); 1560 } 1561 IRT_END 1562 #endif // INCLUDE_JVMTI 1563 1564 #ifndef PRODUCT 1565 // This must be a IRT_LEAF function because the interpreter must save registers on x86 to 1566 // call this, which changes rsp and makes the interpreter's expression stack not walkable. 1567 // The generated code still uses call_VM because that will set up the frame pointer for 1568 // bcp and method. 1569 IRT_LEAF(intptr_t, InterpreterRuntime::trace_bytecode(JavaThread* thread, intptr_t preserve_this_value, intptr_t tos, intptr_t tos2)) 1570 LastFrameAccessor last_frame(thread); 1571 assert(last_frame.is_interpreted_frame(), "must be an interpreted frame"); 1572 methodHandle mh(thread, last_frame.method()); 1573 BytecodeTracer::trace(mh, last_frame.bcp(), tos, tos2); 1574 return preserve_this_value; 1575 IRT_END 1576 #endif // !PRODUCT