1 /* 2 * Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 */ 23 24 #include "precompiled.hpp" 25 #include "compiler/compileBroker.hpp" 26 #include "jvmci/jniAccessMark.inline.hpp" 27 #include "jvmci/jvmciCompilerToVM.hpp" 28 #include "jvmci/jvmciRuntime.hpp" 29 #include "logging/log.hpp" 30 #include "memory/oopFactory.hpp" 31 #include "oops/constantPool.inline.hpp" 32 #include "oops/method.inline.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "runtime/biasedLocking.hpp" 35 #include "runtime/deoptimization.hpp" 36 #include "runtime/frame.inline.hpp" 37 #include "runtime/sharedRuntime.hpp" 38 #if INCLUDE_G1GC 39 #include "gc/g1/g1ThreadLocalData.hpp" 40 #endif // INCLUDE_G1GC 41 42 // Simple helper to see if the caller of a runtime stub which 43 // entered the VM has been deoptimized 44 45 static bool caller_is_deopted() { 46 JavaThread* thread = JavaThread::current(); 47 RegisterMap reg_map(thread, false); 48 frame runtime_frame = thread->last_frame(); 49 frame caller_frame = runtime_frame.sender(®_map); 50 assert(caller_frame.is_compiled_frame(), "must be compiled"); 51 return caller_frame.is_deoptimized_frame(); 52 } 53 54 // Stress deoptimization 55 static void deopt_caller() { 56 if ( !caller_is_deopted()) { 57 JavaThread* thread = JavaThread::current(); 58 RegisterMap reg_map(thread, false); 59 frame runtime_frame = thread->last_frame(); 60 frame caller_frame = runtime_frame.sender(®_map); 61 Deoptimization::deoptimize_frame(thread, caller_frame.id(), Deoptimization::Reason_constraint); 62 assert(caller_is_deopted(), "Must be deoptimized"); 63 } 64 } 65 66 // Manages a scope for a JVMCI runtime call that attempts a heap allocation. 67 // If there is a pending exception upon closing the scope and the runtime 68 // call is of the variety where allocation failure returns NULL without an 69 // exception, the following action is taken: 70 // 1. The pending exception is cleared 71 // 2. NULL is written to JavaThread::_vm_result 72 // 3. Checks that an OutOfMemoryError is Universe::out_of_memory_error_retry(). 73 class RetryableAllocationMark: public StackObj { 74 private: 75 JavaThread* _thread; 76 public: 77 RetryableAllocationMark(JavaThread* thread, bool activate) { 78 if (activate) { 79 assert(!thread->in_retryable_allocation(), "retryable allocation scope is non-reentrant"); 80 _thread = thread; 81 _thread->set_in_retryable_allocation(true); 82 } else { 83 _thread = NULL; 84 } 85 } 86 ~RetryableAllocationMark() { 87 if (_thread != NULL) { 88 _thread->set_in_retryable_allocation(false); 89 JavaThread* THREAD = _thread; 90 if (HAS_PENDING_EXCEPTION) { 91 oop ex = PENDING_EXCEPTION; 92 CLEAR_PENDING_EXCEPTION; 93 oop retry_oome = Universe::out_of_memory_error_retry(); 94 if (ex->is_a(retry_oome->klass()) && retry_oome != ex) { 95 ResourceMark rm; 96 fatal("Unexpected exception in scope of retryable allocation: " INTPTR_FORMAT " of type %s", p2i(ex), ex->klass()->external_name()); 97 } 98 _thread->set_vm_result(NULL); 99 } 100 } 101 } 102 }; 103 104 JRT_BLOCK_ENTRY(void, JVMCIRuntime::new_instance_common(JavaThread* thread, Klass* klass, bool null_on_fail)) 105 JRT_BLOCK; 106 assert(klass->is_klass(), "not a class"); 107 Handle holder(THREAD, klass->klass_holder()); // keep the klass alive 108 InstanceKlass* h = InstanceKlass::cast(klass); 109 { 110 RetryableAllocationMark ram(thread, null_on_fail); 111 h->check_valid_for_instantiation(true, CHECK); 112 oop obj; 113 if (null_on_fail) { 114 if (!h->is_initialized()) { 115 // Cannot re-execute class initialization without side effects 116 // so return without attempting the initialization 117 return; 118 } 119 } else { 120 // make sure klass is initialized 121 h->initialize(CHECK); 122 } 123 // allocate instance and return via TLS 124 obj = h->allocate_instance(CHECK); 125 thread->set_vm_result(obj); 126 } 127 JRT_BLOCK_END; 128 SharedRuntime::on_slowpath_allocation_exit(thread); 129 JRT_END 130 131 JRT_BLOCK_ENTRY(void, JVMCIRuntime::new_array_common(JavaThread* thread, Klass* array_klass, jint length, bool null_on_fail)) 132 JRT_BLOCK; 133 // Note: no handle for klass needed since they are not used 134 // anymore after new_objArray() and no GC can happen before. 135 // (This may have to change if this code changes!) 136 assert(array_klass->is_klass(), "not a class"); 137 oop obj; 138 if (array_klass->is_typeArray_klass()) { 139 BasicType elt_type = TypeArrayKlass::cast(array_klass)->element_type(); 140 RetryableAllocationMark ram(thread, null_on_fail); 141 obj = oopFactory::new_typeArray(elt_type, length, CHECK); 142 } else { 143 Handle holder(THREAD, array_klass->klass_holder()); // keep the klass alive 144 Klass* elem_klass = ObjArrayKlass::cast(array_klass)->element_klass(); 145 RetryableAllocationMark ram(thread, null_on_fail); 146 obj = oopFactory::new_objArray(elem_klass, length, CHECK); 147 } 148 thread->set_vm_result(obj); 149 // This is pretty rare but this runtime patch is stressful to deoptimization 150 // if we deoptimize here so force a deopt to stress the path. 151 if (DeoptimizeALot) { 152 static int deopts = 0; 153 // Alternate between deoptimizing and raising an error (which will also cause a deopt) 154 if (deopts++ % 2 == 0) { 155 if (null_on_fail) { 156 return; 157 } else { 158 ResourceMark rm(THREAD); 159 THROW(vmSymbols::java_lang_OutOfMemoryError()); 160 } 161 } else { 162 deopt_caller(); 163 } 164 } 165 JRT_BLOCK_END; 166 SharedRuntime::on_slowpath_allocation_exit(thread); 167 JRT_END 168 169 JRT_ENTRY(void, JVMCIRuntime::new_multi_array_common(JavaThread* thread, Klass* klass, int rank, jint* dims, bool null_on_fail)) 170 assert(klass->is_klass(), "not a class"); 171 assert(rank >= 1, "rank must be nonzero"); 172 Handle holder(THREAD, klass->klass_holder()); // keep the klass alive 173 RetryableAllocationMark ram(thread, null_on_fail); 174 oop obj = ArrayKlass::cast(klass)->multi_allocate(rank, dims, CHECK); 175 thread->set_vm_result(obj); 176 JRT_END 177 178 JRT_ENTRY(void, JVMCIRuntime::dynamic_new_array_common(JavaThread* thread, oopDesc* element_mirror, jint length, bool null_on_fail)) 179 RetryableAllocationMark ram(thread, null_on_fail); 180 oop obj = Reflection::reflect_new_array(element_mirror, length, CHECK); 181 thread->set_vm_result(obj); 182 JRT_END 183 184 JRT_ENTRY(void, JVMCIRuntime::dynamic_new_instance_common(JavaThread* thread, oopDesc* type_mirror, bool null_on_fail)) 185 InstanceKlass* klass = InstanceKlass::cast(java_lang_Class::as_Klass(type_mirror)); 186 187 if (klass == NULL) { 188 ResourceMark rm(THREAD); 189 THROW(vmSymbols::java_lang_InstantiationException()); 190 } 191 RetryableAllocationMark ram(thread, null_on_fail); 192 193 // Create new instance (the receiver) 194 klass->check_valid_for_instantiation(false, CHECK); 195 196 if (null_on_fail) { 197 if (!klass->is_initialized()) { 198 // Cannot re-execute class initialization without side effects 199 // so return without attempting the initialization 200 return; 201 } 202 } else { 203 // Make sure klass gets initialized 204 klass->initialize(CHECK); 205 } 206 207 oop obj = klass->allocate_instance(CHECK); 208 thread->set_vm_result(obj); 209 JRT_END 210 211 extern void vm_exit(int code); 212 213 // Enter this method from compiled code handler below. This is where we transition 214 // to VM mode. This is done as a helper routine so that the method called directly 215 // from compiled code does not have to transition to VM. This allows the entry 216 // method to see if the nmethod that we have just looked up a handler for has 217 // been deoptimized while we were in the vm. This simplifies the assembly code 218 // cpu directories. 219 // 220 // We are entering here from exception stub (via the entry method below) 221 // If there is a compiled exception handler in this method, we will continue there; 222 // otherwise we will unwind the stack and continue at the caller of top frame method 223 // Note: we enter in Java using a special JRT wrapper. This wrapper allows us to 224 // control the area where we can allow a safepoint. After we exit the safepoint area we can 225 // check to see if the handler we are going to return is now in a nmethod that has 226 // been deoptimized. If that is the case we return the deopt blob 227 // unpack_with_exception entry instead. This makes life for the exception blob easier 228 // because making that same check and diverting is painful from assembly language. 229 JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* thread, oopDesc* ex, address pc, CompiledMethod*& cm)) 230 // Reset method handle flag. 231 thread->set_is_method_handle_return(false); 232 233 Handle exception(thread, ex); 234 cm = CodeCache::find_compiled(pc); 235 assert(cm != NULL, "this is not a compiled method"); 236 // Adjust the pc as needed/ 237 if (cm->is_deopt_pc(pc)) { 238 RegisterMap map(thread, false); 239 frame exception_frame = thread->last_frame().sender(&map); 240 // if the frame isn't deopted then pc must not correspond to the caller of last_frame 241 assert(exception_frame.is_deoptimized_frame(), "must be deopted"); 242 pc = exception_frame.pc(); 243 } 244 #ifdef ASSERT 245 assert(exception.not_null(), "NULL exceptions should be handled by throw_exception"); 246 assert(oopDesc::is_oop(exception()), "just checking"); 247 // Check that exception is a subclass of Throwable, otherwise we have a VerifyError 248 if (!(exception->is_a(SystemDictionary::Throwable_klass()))) { 249 if (ExitVMOnVerifyError) vm_exit(-1); 250 ShouldNotReachHere(); 251 } 252 #endif 253 254 // Check the stack guard pages and reenable them if necessary and there is 255 // enough space on the stack to do so. Use fast exceptions only if the guard 256 // pages are enabled. 257 bool guard_pages_enabled = thread->stack_guards_enabled(); 258 if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack(); 259 260 if (JvmtiExport::can_post_on_exceptions()) { 261 // To ensure correct notification of exception catches and throws 262 // we have to deoptimize here. If we attempted to notify the 263 // catches and throws during this exception lookup it's possible 264 // we could deoptimize on the way out of the VM and end back in 265 // the interpreter at the throw site. This would result in double 266 // notifications since the interpreter would also notify about 267 // these same catches and throws as it unwound the frame. 268 269 RegisterMap reg_map(thread); 270 frame stub_frame = thread->last_frame(); 271 frame caller_frame = stub_frame.sender(®_map); 272 273 // We don't really want to deoptimize the nmethod itself since we 274 // can actually continue in the exception handler ourselves but I 275 // don't see an easy way to have the desired effect. 276 Deoptimization::deoptimize_frame(thread, caller_frame.id(), Deoptimization::Reason_constraint); 277 assert(caller_is_deopted(), "Must be deoptimized"); 278 279 return SharedRuntime::deopt_blob()->unpack_with_exception_in_tls(); 280 } 281 282 // ExceptionCache is used only for exceptions at call sites and not for implicit exceptions 283 if (guard_pages_enabled) { 284 address fast_continuation = cm->handler_for_exception_and_pc(exception, pc); 285 if (fast_continuation != NULL) { 286 // Set flag if return address is a method handle call site. 287 thread->set_is_method_handle_return(cm->is_method_handle_return(pc)); 288 return fast_continuation; 289 } 290 } 291 292 // If the stack guard pages are enabled, check whether there is a handler in 293 // the current method. Otherwise (guard pages disabled), force an unwind and 294 // skip the exception cache update (i.e., just leave continuation==NULL). 295 address continuation = NULL; 296 if (guard_pages_enabled) { 297 298 // New exception handling mechanism can support inlined methods 299 // with exception handlers since the mappings are from PC to PC 300 301 // debugging support 302 // tracing 303 if (log_is_enabled(Info, exceptions)) { 304 ResourceMark rm; 305 stringStream tempst; 306 tempst.print("compiled method <%s>\n" 307 " at PC" INTPTR_FORMAT " for thread " INTPTR_FORMAT, 308 cm->method()->print_value_string(), p2i(pc), p2i(thread)); 309 Exceptions::log_exception(exception, tempst.as_string()); 310 } 311 // for AbortVMOnException flag 312 NOT_PRODUCT(Exceptions::debug_check_abort(exception)); 313 314 // Clear out the exception oop and pc since looking up an 315 // exception handler can cause class loading, which might throw an 316 // exception and those fields are expected to be clear during 317 // normal bytecode execution. 318 thread->clear_exception_oop_and_pc(); 319 320 bool recursive_exception = false; 321 continuation = SharedRuntime::compute_compiled_exc_handler(cm, pc, exception, false, false, recursive_exception); 322 // If an exception was thrown during exception dispatch, the exception oop may have changed 323 thread->set_exception_oop(exception()); 324 thread->set_exception_pc(pc); 325 326 // The exception cache is used only for non-implicit exceptions 327 // Update the exception cache only when another exception did 328 // occur during the computation of the compiled exception handler 329 // (e.g., when loading the class of the catch type). 330 // Checking for exception oop equality is not 331 // sufficient because some exceptions are pre-allocated and reused. 332 if (continuation != NULL && !recursive_exception && !SharedRuntime::deopt_blob()->contains(continuation)) { 333 cm->add_handler_for_exception_and_pc(exception, pc, continuation); 334 } 335 } 336 337 // Set flag if return address is a method handle call site. 338 thread->set_is_method_handle_return(cm->is_method_handle_return(pc)); 339 340 if (log_is_enabled(Info, exceptions)) { 341 ResourceMark rm; 342 log_info(exceptions)("Thread " PTR_FORMAT " continuing at PC " PTR_FORMAT 343 " for exception thrown at PC " PTR_FORMAT, 344 p2i(thread), p2i(continuation), p2i(pc)); 345 } 346 347 return continuation; 348 JRT_END 349 350 // Enter this method from compiled code only if there is a Java exception handler 351 // in the method handling the exception. 352 // We are entering here from exception stub. We don't do a normal VM transition here. 353 // We do it in a helper. This is so we can check to see if the nmethod we have just 354 // searched for an exception handler has been deoptimized in the meantime. 355 address JVMCIRuntime::exception_handler_for_pc(JavaThread* thread) { 356 oop exception = thread->exception_oop(); 357 address pc = thread->exception_pc(); 358 // Still in Java mode 359 DEBUG_ONLY(ResetNoHandleMark rnhm); 360 CompiledMethod* cm = NULL; 361 address continuation = NULL; 362 { 363 // Enter VM mode by calling the helper 364 ResetNoHandleMark rnhm; 365 continuation = exception_handler_for_pc_helper(thread, exception, pc, cm); 366 } 367 // Back in JAVA, use no oops DON'T safepoint 368 369 // Now check to see if the compiled method we were called from is now deoptimized. 370 // If so we must return to the deopt blob and deoptimize the nmethod 371 if (cm != NULL && caller_is_deopted()) { 372 continuation = SharedRuntime::deopt_blob()->unpack_with_exception_in_tls(); 373 } 374 375 assert(continuation != NULL, "no handler found"); 376 return continuation; 377 } 378 379 JRT_BLOCK_ENTRY(void, JVMCIRuntime::monitorenter(JavaThread* thread, oopDesc* obj, BasicLock* lock)) 380 SharedRuntime::monitor_enter_helper(obj, lock, thread, JVMCIUseFastLocking); 381 JRT_END 382 383 JRT_LEAF(void, JVMCIRuntime::monitorexit(JavaThread* thread, oopDesc* obj, BasicLock* lock)) 384 assert(thread->last_Java_sp(), "last_Java_sp must be set"); 385 assert(oopDesc::is_oop(obj), "invalid lock object pointer dected"); 386 SharedRuntime::monitor_exit_helper(obj, lock, thread, JVMCIUseFastLocking); 387 JRT_END 388 389 // Object.notify() fast path, caller does slow path 390 JRT_LEAF(jboolean, JVMCIRuntime::object_notify(JavaThread *thread, oopDesc* obj)) 391 392 // Very few notify/notifyAll operations find any threads on the waitset, so 393 // the dominant fast-path is to simply return. 394 // Relatedly, it's critical that notify/notifyAll be fast in order to 395 // reduce lock hold times. 396 if (!SafepointSynchronize::is_synchronizing()) { 397 if (ObjectSynchronizer::quick_notify(obj, thread, false)) { 398 return true; 399 } 400 } 401 return false; // caller must perform slow path 402 403 JRT_END 404 405 // Object.notifyAll() fast path, caller does slow path 406 JRT_LEAF(jboolean, JVMCIRuntime::object_notifyAll(JavaThread *thread, oopDesc* obj)) 407 408 if (!SafepointSynchronize::is_synchronizing() ) { 409 if (ObjectSynchronizer::quick_notify(obj, thread, true)) { 410 return true; 411 } 412 } 413 return false; // caller must perform slow path 414 415 JRT_END 416 417 JRT_BLOCK_ENTRY(int, JVMCIRuntime::throw_and_post_jvmti_exception(JavaThread* thread, const char* exception, const char* message)) 418 JRT_BLOCK; 419 TempNewSymbol symbol = SymbolTable::new_symbol(exception, CHECK_EXIT_(0)); 420 SharedRuntime::throw_and_post_jvmti_exception(thread, symbol, message); 421 JRT_BLOCK_END; 422 return caller_is_deopted(); 423 JRT_END 424 425 JRT_BLOCK_ENTRY(int, JVMCIRuntime::throw_klass_external_name_exception(JavaThread* thread, const char* exception, Klass* klass)) 426 JRT_BLOCK; 427 ResourceMark rm(thread); 428 TempNewSymbol symbol = SymbolTable::new_symbol(exception, CHECK_EXIT_(0)); 429 SharedRuntime::throw_and_post_jvmti_exception(thread, symbol, klass->external_name()); 430 JRT_BLOCK_END; 431 return caller_is_deopted(); 432 JRT_END 433 434 JRT_BLOCK_ENTRY(int, JVMCIRuntime::throw_class_cast_exception(JavaThread* thread, const char* exception, Klass* caster_klass, Klass* target_klass)) 435 JRT_BLOCK; 436 ResourceMark rm(thread); 437 const char* message = SharedRuntime::generate_class_cast_message(caster_klass, target_klass); 438 TempNewSymbol symbol = SymbolTable::new_symbol(exception, CHECK_EXIT_(0)); 439 SharedRuntime::throw_and_post_jvmti_exception(thread, symbol, message); 440 JRT_BLOCK_END; 441 return caller_is_deopted(); 442 JRT_END 443 444 JRT_LEAF(void, JVMCIRuntime::log_object(JavaThread* thread, oopDesc* obj, bool as_string, bool newline)) 445 ttyLocker ttyl; 446 447 if (obj == NULL) { 448 tty->print("NULL"); 449 } else if (oopDesc::is_oop_or_null(obj, true) && (!as_string || !java_lang_String::is_instance(obj))) { 450 if (oopDesc::is_oop_or_null(obj, true)) { 451 char buf[O_BUFLEN]; 452 tty->print("%s@" INTPTR_FORMAT, obj->klass()->name()->as_C_string(buf, O_BUFLEN), p2i(obj)); 453 } else { 454 tty->print(INTPTR_FORMAT, p2i(obj)); 455 } 456 } else { 457 ResourceMark rm; 458 assert(obj != NULL && java_lang_String::is_instance(obj), "must be"); 459 char *buf = java_lang_String::as_utf8_string(obj); 460 tty->print_raw(buf); 461 } 462 if (newline) { 463 tty->cr(); 464 } 465 JRT_END 466 467 #if INCLUDE_G1GC 468 469 JRT_LEAF(void, JVMCIRuntime::write_barrier_pre(JavaThread* thread, oopDesc* obj)) 470 G1ThreadLocalData::satb_mark_queue(thread).enqueue(obj); 471 JRT_END 472 473 JRT_LEAF(void, JVMCIRuntime::write_barrier_post(JavaThread* thread, void* card_addr)) 474 G1ThreadLocalData::dirty_card_queue(thread).enqueue(card_addr); 475 JRT_END 476 477 #endif // INCLUDE_G1GC 478 479 JRT_LEAF(jboolean, JVMCIRuntime::validate_object(JavaThread* thread, oopDesc* parent, oopDesc* child)) 480 bool ret = true; 481 if(!Universe::heap()->is_in_closed_subset(parent)) { 482 tty->print_cr("Parent Object " INTPTR_FORMAT " not in heap", p2i(parent)); 483 parent->print(); 484 ret=false; 485 } 486 if(!Universe::heap()->is_in_closed_subset(child)) { 487 tty->print_cr("Child Object " INTPTR_FORMAT " not in heap", p2i(child)); 488 child->print(); 489 ret=false; 490 } 491 return (jint)ret; 492 JRT_END 493 494 JRT_ENTRY(void, JVMCIRuntime::vm_error(JavaThread* thread, jlong where, jlong format, jlong value)) 495 ResourceMark rm; 496 const char *error_msg = where == 0L ? "<internal JVMCI error>" : (char*) (address) where; 497 char *detail_msg = NULL; 498 if (format != 0L) { 499 const char* buf = (char*) (address) format; 500 size_t detail_msg_length = strlen(buf) * 2; 501 detail_msg = (char *) NEW_RESOURCE_ARRAY(u_char, detail_msg_length); 502 jio_snprintf(detail_msg, detail_msg_length, buf, value); 503 } 504 report_vm_error(__FILE__, __LINE__, error_msg, "%s", detail_msg); 505 JRT_END 506 507 JRT_LEAF(oopDesc*, JVMCIRuntime::load_and_clear_exception(JavaThread* thread)) 508 oop exception = thread->exception_oop(); 509 assert(exception != NULL, "npe"); 510 thread->set_exception_oop(NULL); 511 thread->set_exception_pc(0); 512 return exception; 513 JRT_END 514 515 PRAGMA_DIAG_PUSH 516 PRAGMA_FORMAT_NONLITERAL_IGNORED 517 JRT_LEAF(void, JVMCIRuntime::log_printf(JavaThread* thread, const char* format, jlong v1, jlong v2, jlong v3)) 518 ResourceMark rm; 519 tty->print(format, v1, v2, v3); 520 JRT_END 521 PRAGMA_DIAG_POP 522 523 static void decipher(jlong v, bool ignoreZero) { 524 if (v != 0 || !ignoreZero) { 525 void* p = (void *)(address) v; 526 CodeBlob* cb = CodeCache::find_blob(p); 527 if (cb) { 528 if (cb->is_nmethod()) { 529 char buf[O_BUFLEN]; 530 tty->print("%s [" INTPTR_FORMAT "+" JLONG_FORMAT "]", cb->as_nmethod_or_null()->method()->name_and_sig_as_C_string(buf, O_BUFLEN), p2i(cb->code_begin()), (jlong)((address)v - cb->code_begin())); 531 return; 532 } 533 cb->print_value_on(tty); 534 return; 535 } 536 if (Universe::heap()->is_in(p)) { 537 oop obj = oop(p); 538 obj->print_value_on(tty); 539 return; 540 } 541 tty->print(INTPTR_FORMAT " [long: " JLONG_FORMAT ", double %lf, char %c]",p2i((void *)v), (jlong)v, (jdouble)v, (char)v); 542 } 543 } 544 545 PRAGMA_DIAG_PUSH 546 PRAGMA_FORMAT_NONLITERAL_IGNORED 547 JRT_LEAF(void, JVMCIRuntime::vm_message(jboolean vmError, jlong format, jlong v1, jlong v2, jlong v3)) 548 ResourceMark rm; 549 const char *buf = (const char*) (address) format; 550 if (vmError) { 551 if (buf != NULL) { 552 fatal(buf, v1, v2, v3); 553 } else { 554 fatal("<anonymous error>"); 555 } 556 } else if (buf != NULL) { 557 tty->print(buf, v1, v2, v3); 558 } else { 559 assert(v2 == 0, "v2 != 0"); 560 assert(v3 == 0, "v3 != 0"); 561 decipher(v1, false); 562 } 563 JRT_END 564 PRAGMA_DIAG_POP 565 566 JRT_LEAF(void, JVMCIRuntime::log_primitive(JavaThread* thread, jchar typeChar, jlong value, jboolean newline)) 567 union { 568 jlong l; 569 jdouble d; 570 jfloat f; 571 } uu; 572 uu.l = value; 573 switch (typeChar) { 574 case 'Z': tty->print(value == 0 ? "false" : "true"); break; 575 case 'B': tty->print("%d", (jbyte) value); break; 576 case 'C': tty->print("%c", (jchar) value); break; 577 case 'S': tty->print("%d", (jshort) value); break; 578 case 'I': tty->print("%d", (jint) value); break; 579 case 'F': tty->print("%f", uu.f); break; 580 case 'J': tty->print(JLONG_FORMAT, value); break; 581 case 'D': tty->print("%lf", uu.d); break; 582 default: assert(false, "unknown typeChar"); break; 583 } 584 if (newline) { 585 tty->cr(); 586 } 587 JRT_END 588 589 JRT_ENTRY(jint, JVMCIRuntime::identity_hash_code(JavaThread* thread, oopDesc* obj)) 590 return (jint) obj->identity_hash(); 591 JRT_END 592 593 JRT_ENTRY(jboolean, JVMCIRuntime::thread_is_interrupted(JavaThread* thread, oopDesc* receiver, jboolean clear_interrupted)) 594 Handle receiverHandle(thread, receiver); 595 // A nested ThreadsListHandle may require the Threads_lock which 596 // requires thread_in_vm which is why this method cannot be JRT_LEAF. 597 ThreadsListHandle tlh; 598 599 JavaThread* receiverThread = java_lang_Thread::thread(receiverHandle()); 600 if (receiverThread == NULL || (EnableThreadSMRExtraValidityChecks && !tlh.includes(receiverThread))) { 601 // The other thread may exit during this process, which is ok so return false. 602 return JNI_FALSE; 603 } else { 604 return (jint) Thread::is_interrupted(receiverThread, clear_interrupted != 0); 605 } 606 JRT_END 607 608 JRT_ENTRY(jint, JVMCIRuntime::test_deoptimize_call_int(JavaThread* thread, int value)) 609 deopt_caller(); 610 return (jint) value; 611 JRT_END 612 613 614 // private static JVMCIRuntime JVMCI.initializeRuntime() 615 JVM_ENTRY_NO_ENV(jobject, JVM_GetJVMCIRuntime(JNIEnv *env, jclass c)) 616 JNI_JVMCIENV(thread, env); 617 if (!EnableJVMCI) { 618 JVMCI_THROW_MSG_NULL(InternalError, "JVMCI is not enabled"); 619 } 620 JVMCIENV->runtime()->initialize_HotSpotJVMCIRuntime(JVMCI_CHECK_NULL); 621 JVMCIObject runtime = JVMCIENV->runtime()->get_HotSpotJVMCIRuntime(JVMCI_CHECK_NULL); 622 return JVMCIENV->get_jobject(runtime); 623 JVM_END 624 625 void JVMCIRuntime::call_getCompiler(TRAPS) { 626 THREAD_JVMCIENV(JavaThread::current()); 627 JVMCIObject jvmciRuntime = JVMCIRuntime::get_HotSpotJVMCIRuntime(JVMCI_CHECK); 628 initialize(JVMCIENV); 629 JVMCIENV->call_HotSpotJVMCIRuntime_getCompiler(jvmciRuntime, JVMCI_CHECK); 630 } 631 632 void JVMCINMethodData::initialize( 633 int nmethod_mirror_index, 634 const char* name, 635 FailedSpeculation** failed_speculations) 636 { 637 _failed_speculations = failed_speculations; 638 _nmethod_mirror_index = nmethod_mirror_index; 639 if (name != NULL) { 640 _has_name = true; 641 char* dest = (char*) this->name(); 642 strcpy(dest, name); 643 } else { 644 _has_name = false; 645 } 646 } 647 648 void JVMCINMethodData::add_failed_speculation(nmethod* nm, jlong speculation) { 649 uint index = (speculation >> 32) & 0xFFFFFFFF; 650 int length = (int) speculation; 651 if (index + length > (uint) nm->speculations_size()) { 652 fatal(INTPTR_FORMAT "[index: %d, length: %d] out of bounds wrt encoded speculations of length %u", speculation, index, length, nm->speculations_size()); 653 } 654 address data = nm->speculations_begin() + index; 655 FailedSpeculation::add_failed_speculation(nm, _failed_speculations, data, length); 656 } 657 658 oop JVMCINMethodData::get_nmethod_mirror(nmethod* nm, bool phantom_ref) { 659 if (_nmethod_mirror_index == -1) { 660 return NULL; 661 } 662 if (phantom_ref) { 663 return nm->oop_at_phantom(_nmethod_mirror_index); 664 } else { 665 return nm->oop_at(_nmethod_mirror_index); 666 } 667 } 668 669 void JVMCINMethodData::set_nmethod_mirror(nmethod* nm, oop new_mirror) { 670 assert(_nmethod_mirror_index != -1, "cannot set JVMCI mirror for nmethod"); 671 oop* addr = nm->oop_addr_at(_nmethod_mirror_index); 672 assert(new_mirror != NULL, "use clear_nmethod_mirror to clear the mirror"); 673 assert(*addr == NULL, "cannot overwrite non-null mirror"); 674 675 *addr = new_mirror; 676 677 // Since we've patched some oops in the nmethod, 678 // (re)register it with the heap. 679 Universe::heap()->register_nmethod(nm); 680 } 681 682 void JVMCINMethodData::clear_nmethod_mirror(nmethod* nm) { 683 if (_nmethod_mirror_index != -1) { 684 oop* addr = nm->oop_addr_at(_nmethod_mirror_index); 685 *addr = NULL; 686 } 687 } 688 689 void JVMCINMethodData::invalidate_nmethod_mirror(nmethod* nm) { 690 oop nmethod_mirror = get_nmethod_mirror(nm, /* phantom_ref */ false); 691 if (nmethod_mirror == NULL) { 692 return; 693 } 694 695 // Update the values in the mirror if it still refers to nm. 696 // We cannot use JVMCIObject to wrap the mirror as this is called 697 // during GC, forbidding the creation of JNIHandles. 698 JVMCIEnv* jvmciEnv = NULL; 699 nmethod* current = (nmethod*) HotSpotJVMCI::InstalledCode::address(jvmciEnv, nmethod_mirror); 700 if (nm == current) { 701 if (!nm->is_alive()) { 702 // Break the link from the mirror to nm such that 703 // future invocations via the mirror will result in 704 // an InvalidInstalledCodeException. 705 HotSpotJVMCI::InstalledCode::set_address(jvmciEnv, nmethod_mirror, 0); 706 HotSpotJVMCI::InstalledCode::set_entryPoint(jvmciEnv, nmethod_mirror, 0); 707 } else if (nm->is_not_entrant()) { 708 // Zero the entry point so any new invocation will fail but keep 709 // the address link around that so that existing activations can 710 // be deoptimized via the mirror (i.e. JVMCIEnv::invalidate_installed_code). 711 HotSpotJVMCI::InstalledCode::set_entryPoint(jvmciEnv, nmethod_mirror, 0); 712 } 713 } 714 } 715 716 void JVMCIRuntime::initialize_HotSpotJVMCIRuntime(JVMCI_TRAPS) { 717 if (is_HotSpotJVMCIRuntime_initialized()) { 718 if (JVMCIENV->is_hotspot() && UseJVMCINativeLibrary) { 719 JVMCI_THROW_MSG(InternalError, "JVMCI has already been enabled in the JVMCI shared library"); 720 } 721 } 722 723 initialize(JVMCIENV); 724 725 // This should only be called in the context of the JVMCI class being initialized 726 JVMCIObject result = JVMCIENV->call_HotSpotJVMCIRuntime_runtime(JVMCI_CHECK); 727 728 _HotSpotJVMCIRuntime_instance = JVMCIENV->make_global(result); 729 } 730 731 void JVMCIRuntime::initialize(JVMCIEnv* JVMCIENV) { 732 assert(this != NULL, "sanity"); 733 // Check first without JVMCI_lock 734 if (_initialized) { 735 return; 736 } 737 738 MutexLocker locker(JVMCI_lock); 739 // Check again under JVMCI_lock 740 if (_initialized) { 741 return; 742 } 743 744 while (_being_initialized) { 745 JVMCI_lock->wait(); 746 if (_initialized) { 747 return; 748 } 749 } 750 751 _being_initialized = true; 752 753 { 754 MutexUnlocker unlock(JVMCI_lock); 755 756 HandleMark hm; 757 ResourceMark rm; 758 JavaThread* THREAD = JavaThread::current(); 759 if (JVMCIENV->is_hotspot()) { 760 HotSpotJVMCI::compute_offsets(CHECK_EXIT); 761 } else { 762 JNIAccessMark jni(JVMCIENV); 763 764 JNIJVMCI::initialize_ids(jni.env()); 765 if (jni()->ExceptionCheck()) { 766 jni()->ExceptionDescribe(); 767 fatal("JNI exception during init"); 768 } 769 } 770 create_jvmci_primitive_type(T_BOOLEAN, JVMCI_CHECK_EXIT_((void)0)); 771 create_jvmci_primitive_type(T_BYTE, JVMCI_CHECK_EXIT_((void)0)); 772 create_jvmci_primitive_type(T_CHAR, JVMCI_CHECK_EXIT_((void)0)); 773 create_jvmci_primitive_type(T_SHORT, JVMCI_CHECK_EXIT_((void)0)); 774 create_jvmci_primitive_type(T_INT, JVMCI_CHECK_EXIT_((void)0)); 775 create_jvmci_primitive_type(T_LONG, JVMCI_CHECK_EXIT_((void)0)); 776 create_jvmci_primitive_type(T_FLOAT, JVMCI_CHECK_EXIT_((void)0)); 777 create_jvmci_primitive_type(T_DOUBLE, JVMCI_CHECK_EXIT_((void)0)); 778 create_jvmci_primitive_type(T_VOID, JVMCI_CHECK_EXIT_((void)0)); 779 780 if (!JVMCIENV->is_hotspot()) { 781 JVMCIENV->copy_saved_properties(); 782 } 783 } 784 785 _initialized = true; 786 _being_initialized = false; 787 JVMCI_lock->notify_all(); 788 } 789 790 JVMCIObject JVMCIRuntime::create_jvmci_primitive_type(BasicType type, JVMCI_TRAPS) { 791 Thread* THREAD = Thread::current(); 792 // These primitive types are long lived and are created before the runtime is fully set up 793 // so skip registering them for scanning. 794 JVMCIObject mirror = JVMCIENV->get_object_constant(java_lang_Class::primitive_mirror(type), false, true); 795 if (JVMCIENV->is_hotspot()) { 796 JavaValue result(T_OBJECT); 797 JavaCallArguments args; 798 args.push_oop(Handle(THREAD, HotSpotJVMCI::resolve(mirror))); 799 args.push_int(type2char(type)); 800 JavaCalls::call_static(&result, HotSpotJVMCI::HotSpotResolvedPrimitiveType::klass(), vmSymbols::fromMetaspace_name(), vmSymbols::primitive_fromMetaspace_signature(), &args, CHECK_(JVMCIObject())); 801 802 return JVMCIENV->wrap(JNIHandles::make_local((oop)result.get_jobject())); 803 } else { 804 JNIAccessMark jni(JVMCIENV); 805 jobject result = jni()->CallStaticObjectMethod(JNIJVMCI::HotSpotResolvedPrimitiveType::clazz(), 806 JNIJVMCI::HotSpotResolvedPrimitiveType_fromMetaspace_method(), 807 mirror.as_jobject(), type2char(type)); 808 if (jni()->ExceptionCheck()) { 809 return JVMCIObject(); 810 } 811 return JVMCIENV->wrap(result); 812 } 813 } 814 815 void JVMCIRuntime::initialize_JVMCI(JVMCI_TRAPS) { 816 if (!is_HotSpotJVMCIRuntime_initialized()) { 817 initialize(JVMCI_CHECK); 818 JVMCIENV->call_JVMCI_getRuntime(JVMCI_CHECK); 819 } 820 } 821 822 JVMCIObject JVMCIRuntime::get_HotSpotJVMCIRuntime(JVMCI_TRAPS) { 823 initialize(JVMCIENV); 824 initialize_JVMCI(JVMCI_CHECK_(JVMCIObject())); 825 return _HotSpotJVMCIRuntime_instance; 826 } 827 828 829 // private void CompilerToVM.registerNatives() 830 JVM_ENTRY_NO_ENV(void, JVM_RegisterJVMCINatives(JNIEnv *env, jclass c2vmClass)) 831 832 #ifdef _LP64 833 #ifndef TARGET_ARCH_sparc 834 uintptr_t heap_end = (uintptr_t) Universe::heap()->reserved_region().end(); 835 uintptr_t allocation_end = heap_end + ((uintptr_t)16) * 1024 * 1024 * 1024; 836 guarantee(heap_end < allocation_end, "heap end too close to end of address space (might lead to erroneous TLAB allocations)"); 837 #endif // TARGET_ARCH_sparc 838 #else 839 fatal("check TLAB allocation code for address space conflicts"); 840 #endif 841 842 JNI_JVMCIENV(thread, env); 843 844 if (!EnableJVMCI) { 845 JVMCI_THROW_MSG(InternalError, "JVMCI is not enabled"); 846 } 847 848 JVMCIENV->runtime()->initialize(JVMCIENV); 849 850 { 851 ResourceMark rm; 852 HandleMark hm(thread); 853 ThreadToNativeFromVM trans(thread); 854 855 // Ensure _non_oop_bits is initialized 856 Universe::non_oop_word(); 857 858 if (JNI_OK != env->RegisterNatives(c2vmClass, CompilerToVM::methods, CompilerToVM::methods_count())) { 859 if (!env->ExceptionCheck()) { 860 for (int i = 0; i < CompilerToVM::methods_count(); i++) { 861 if (JNI_OK != env->RegisterNatives(c2vmClass, CompilerToVM::methods + i, 1)) { 862 guarantee(false, "Error registering JNI method %s%s", CompilerToVM::methods[i].name, CompilerToVM::methods[i].signature); 863 break; 864 } 865 } 866 } else { 867 env->ExceptionDescribe(); 868 } 869 guarantee(false, "Failed registering CompilerToVM native methods"); 870 } 871 } 872 JVM_END 873 874 875 void JVMCIRuntime::shutdown() { 876 if (is_HotSpotJVMCIRuntime_initialized()) { 877 _shutdown_called = true; 878 879 THREAD_JVMCIENV(JavaThread::current()); 880 JVMCIENV->call_HotSpotJVMCIRuntime_shutdown(_HotSpotJVMCIRuntime_instance); 881 } 882 } 883 884 void JVMCIRuntime::bootstrap_finished(TRAPS) { 885 if (is_HotSpotJVMCIRuntime_initialized()) { 886 THREAD_JVMCIENV(JavaThread::current()); 887 JVMCIENV->call_HotSpotJVMCIRuntime_bootstrapFinished(_HotSpotJVMCIRuntime_instance, JVMCIENV); 888 } 889 } 890 891 void JVMCIRuntime::describe_pending_hotspot_exception(JavaThread* THREAD, bool clear) { 892 if (HAS_PENDING_EXCEPTION) { 893 Handle exception(THREAD, PENDING_EXCEPTION); 894 const char* exception_file = THREAD->exception_file(); 895 int exception_line = THREAD->exception_line(); 896 CLEAR_PENDING_EXCEPTION; 897 if (exception->is_a(SystemDictionary::ThreadDeath_klass())) { 898 // Don't print anything if we are being killed. 899 } else { 900 java_lang_Throwable::print_stack_trace(exception, tty); 901 902 // Clear and ignore any exceptions raised during printing 903 CLEAR_PENDING_EXCEPTION; 904 } 905 if (!clear) { 906 THREAD->set_pending_exception(exception(), exception_file, exception_line); 907 } 908 } 909 } 910 911 912 void JVMCIRuntime::exit_on_pending_exception(JVMCIEnv* JVMCIENV, const char* message) { 913 JavaThread* THREAD = JavaThread::current(); 914 915 static volatile int report_error = 0; 916 if (!report_error && Atomic::cmpxchg(1, &report_error, 0) == 0) { 917 // Only report an error once 918 tty->print_raw_cr(message); 919 if (JVMCIENV != NULL) { 920 JVMCIENV->describe_pending_exception(true); 921 } else { 922 describe_pending_hotspot_exception(THREAD, true); 923 } 924 } else { 925 // Allow error reporting thread to print the stack trace. Windows 926 // doesn't allow uninterruptible wait for JavaThreads 927 const bool interruptible = true; 928 os::sleep(THREAD, 200, interruptible); 929 } 930 931 before_exit(THREAD); 932 vm_exit(-1); 933 } 934 935 // ------------------------------------------------------------------ 936 // Note: the logic of this method should mirror the logic of 937 // constantPoolOopDesc::verify_constant_pool_resolve. 938 bool JVMCIRuntime::check_klass_accessibility(Klass* accessing_klass, Klass* resolved_klass) { 939 if (accessing_klass->is_objArray_klass()) { 940 accessing_klass = ObjArrayKlass::cast(accessing_klass)->bottom_klass(); 941 } 942 if (!accessing_klass->is_instance_klass()) { 943 return true; 944 } 945 946 if (resolved_klass->is_objArray_klass()) { 947 // Find the element klass, if this is an array. 948 resolved_klass = ObjArrayKlass::cast(resolved_klass)->bottom_klass(); 949 } 950 if (resolved_klass->is_instance_klass()) { 951 Reflection::VerifyClassAccessResults result = 952 Reflection::verify_class_access(accessing_klass, InstanceKlass::cast(resolved_klass), true); 953 return result == Reflection::ACCESS_OK; 954 } 955 return true; 956 } 957 958 // ------------------------------------------------------------------ 959 Klass* JVMCIRuntime::get_klass_by_name_impl(Klass*& accessing_klass, 960 const constantPoolHandle& cpool, 961 Symbol* sym, 962 bool require_local) { 963 JVMCI_EXCEPTION_CONTEXT; 964 965 // Now we need to check the SystemDictionary 966 if (sym->byte_at(0) == 'L' && 967 sym->byte_at(sym->utf8_length()-1) == ';') { 968 // This is a name from a signature. Strip off the trimmings. 969 // Call recursive to keep scope of strippedsym. 970 TempNewSymbol strippedsym = SymbolTable::new_symbol(sym->as_utf8()+1, 971 sym->utf8_length()-2, 972 CHECK_NULL); 973 return get_klass_by_name_impl(accessing_klass, cpool, strippedsym, require_local); 974 } 975 976 Handle loader(THREAD, (oop)NULL); 977 Handle domain(THREAD, (oop)NULL); 978 if (accessing_klass != NULL) { 979 loader = Handle(THREAD, accessing_klass->class_loader()); 980 domain = Handle(THREAD, accessing_klass->protection_domain()); 981 } 982 983 Klass* found_klass; 984 { 985 ttyUnlocker ttyul; // release tty lock to avoid ordering problems 986 MutexLocker ml(Compile_lock); 987 if (!require_local) { 988 found_klass = SystemDictionary::find_constrained_instance_or_array_klass(sym, loader, CHECK_NULL); 989 } else { 990 found_klass = SystemDictionary::find_instance_or_array_klass(sym, loader, domain, CHECK_NULL); 991 } 992 } 993 994 // If we fail to find an array klass, look again for its element type. 995 // The element type may be available either locally or via constraints. 996 // In either case, if we can find the element type in the system dictionary, 997 // we must build an array type around it. The CI requires array klasses 998 // to be loaded if their element klasses are loaded, except when memory 999 // is exhausted. 1000 if (sym->byte_at(0) == '[' && 1001 (sym->byte_at(1) == '[' || sym->byte_at(1) == 'L')) { 1002 // We have an unloaded array. 1003 // Build it on the fly if the element class exists. 1004 TempNewSymbol elem_sym = SymbolTable::new_symbol(sym->as_utf8()+1, 1005 sym->utf8_length()-1, 1006 CHECK_NULL); 1007 1008 // Get element Klass recursively. 1009 Klass* elem_klass = 1010 get_klass_by_name_impl(accessing_klass, 1011 cpool, 1012 elem_sym, 1013 require_local); 1014 if (elem_klass != NULL) { 1015 // Now make an array for it 1016 return elem_klass->array_klass(CHECK_NULL); 1017 } 1018 } 1019 1020 if (found_klass == NULL && !cpool.is_null() && cpool->has_preresolution()) { 1021 // Look inside the constant pool for pre-resolved class entries. 1022 for (int i = cpool->length() - 1; i >= 1; i--) { 1023 if (cpool->tag_at(i).is_klass()) { 1024 Klass* kls = cpool->resolved_klass_at(i); 1025 if (kls->name() == sym) { 1026 return kls; 1027 } 1028 } 1029 } 1030 } 1031 1032 return found_klass; 1033 } 1034 1035 // ------------------------------------------------------------------ 1036 Klass* JVMCIRuntime::get_klass_by_name(Klass* accessing_klass, 1037 Symbol* klass_name, 1038 bool require_local) { 1039 ResourceMark rm; 1040 constantPoolHandle cpool; 1041 return get_klass_by_name_impl(accessing_klass, 1042 cpool, 1043 klass_name, 1044 require_local); 1045 } 1046 1047 // ------------------------------------------------------------------ 1048 // Implementation of get_klass_by_index. 1049 Klass* JVMCIRuntime::get_klass_by_index_impl(const constantPoolHandle& cpool, 1050 int index, 1051 bool& is_accessible, 1052 Klass* accessor) { 1053 JVMCI_EXCEPTION_CONTEXT; 1054 Klass* klass = ConstantPool::klass_at_if_loaded(cpool, index); 1055 Symbol* klass_name = NULL; 1056 if (klass == NULL) { 1057 klass_name = cpool->klass_name_at(index); 1058 } 1059 1060 if (klass == NULL) { 1061 // Not found in constant pool. Use the name to do the lookup. 1062 Klass* k = get_klass_by_name_impl(accessor, 1063 cpool, 1064 klass_name, 1065 false); 1066 // Calculate accessibility the hard way. 1067 if (k == NULL) { 1068 is_accessible = false; 1069 } else if (k->class_loader() != accessor->class_loader() && 1070 get_klass_by_name_impl(accessor, cpool, k->name(), true) == NULL) { 1071 // Loaded only remotely. Not linked yet. 1072 is_accessible = false; 1073 } else { 1074 // Linked locally, and we must also check public/private, etc. 1075 is_accessible = check_klass_accessibility(accessor, k); 1076 } 1077 if (!is_accessible) { 1078 return NULL; 1079 } 1080 return k; 1081 } 1082 1083 // It is known to be accessible, since it was found in the constant pool. 1084 is_accessible = true; 1085 return klass; 1086 } 1087 1088 // ------------------------------------------------------------------ 1089 // Get a klass from the constant pool. 1090 Klass* JVMCIRuntime::get_klass_by_index(const constantPoolHandle& cpool, 1091 int index, 1092 bool& is_accessible, 1093 Klass* accessor) { 1094 ResourceMark rm; 1095 Klass* result = get_klass_by_index_impl(cpool, index, is_accessible, accessor); 1096 return result; 1097 } 1098 1099 // ------------------------------------------------------------------ 1100 // Implementation of get_field_by_index. 1101 // 1102 // Implementation note: the results of field lookups are cached 1103 // in the accessor klass. 1104 void JVMCIRuntime::get_field_by_index_impl(InstanceKlass* klass, fieldDescriptor& field_desc, 1105 int index) { 1106 JVMCI_EXCEPTION_CONTEXT; 1107 1108 assert(klass->is_linked(), "must be linked before using its constant-pool"); 1109 1110 constantPoolHandle cpool(thread, klass->constants()); 1111 1112 // Get the field's name, signature, and type. 1113 Symbol* name = cpool->name_ref_at(index); 1114 1115 int nt_index = cpool->name_and_type_ref_index_at(index); 1116 int sig_index = cpool->signature_ref_index_at(nt_index); 1117 Symbol* signature = cpool->symbol_at(sig_index); 1118 1119 // Get the field's declared holder. 1120 int holder_index = cpool->klass_ref_index_at(index); 1121 bool holder_is_accessible; 1122 Klass* declared_holder = get_klass_by_index(cpool, holder_index, 1123 holder_is_accessible, 1124 klass); 1125 1126 // The declared holder of this field may not have been loaded. 1127 // Bail out with partial field information. 1128 if (!holder_is_accessible) { 1129 return; 1130 } 1131 1132 1133 // Perform the field lookup. 1134 Klass* canonical_holder = 1135 InstanceKlass::cast(declared_holder)->find_field(name, signature, &field_desc); 1136 if (canonical_holder == NULL) { 1137 return; 1138 } 1139 1140 assert(canonical_holder == field_desc.field_holder(), "just checking"); 1141 } 1142 1143 // ------------------------------------------------------------------ 1144 // Get a field by index from a klass's constant pool. 1145 void JVMCIRuntime::get_field_by_index(InstanceKlass* accessor, fieldDescriptor& fd, int index) { 1146 ResourceMark rm; 1147 return get_field_by_index_impl(accessor, fd, index); 1148 } 1149 1150 // ------------------------------------------------------------------ 1151 // Perform an appropriate method lookup based on accessor, holder, 1152 // name, signature, and bytecode. 1153 methodHandle JVMCIRuntime::lookup_method(InstanceKlass* accessor, 1154 Klass* holder, 1155 Symbol* name, 1156 Symbol* sig, 1157 Bytecodes::Code bc, 1158 constantTag tag) { 1159 // Accessibility checks are performed in JVMCIEnv::get_method_by_index_impl(). 1160 assert(check_klass_accessibility(accessor, holder), "holder not accessible"); 1161 1162 methodHandle dest_method; 1163 LinkInfo link_info(holder, name, sig, accessor, LinkInfo::needs_access_check, tag); 1164 switch (bc) { 1165 case Bytecodes::_invokestatic: 1166 dest_method = 1167 LinkResolver::resolve_static_call_or_null(link_info); 1168 break; 1169 case Bytecodes::_invokespecial: 1170 dest_method = 1171 LinkResolver::resolve_special_call_or_null(link_info); 1172 break; 1173 case Bytecodes::_invokeinterface: 1174 dest_method = 1175 LinkResolver::linktime_resolve_interface_method_or_null(link_info); 1176 break; 1177 case Bytecodes::_invokevirtual: 1178 dest_method = 1179 LinkResolver::linktime_resolve_virtual_method_or_null(link_info); 1180 break; 1181 default: ShouldNotReachHere(); 1182 } 1183 1184 return dest_method; 1185 } 1186 1187 1188 // ------------------------------------------------------------------ 1189 methodHandle JVMCIRuntime::get_method_by_index_impl(const constantPoolHandle& cpool, 1190 int index, Bytecodes::Code bc, 1191 InstanceKlass* accessor) { 1192 if (bc == Bytecodes::_invokedynamic) { 1193 ConstantPoolCacheEntry* cpce = cpool->invokedynamic_cp_cache_entry_at(index); 1194 bool is_resolved = !cpce->is_f1_null(); 1195 if (is_resolved) { 1196 // Get the invoker Method* from the constant pool. 1197 // (The appendix argument, if any, will be noted in the method's signature.) 1198 Method* adapter = cpce->f1_as_method(); 1199 return methodHandle(adapter); 1200 } 1201 1202 return NULL; 1203 } 1204 1205 int holder_index = cpool->klass_ref_index_at(index); 1206 bool holder_is_accessible; 1207 Klass* holder = get_klass_by_index_impl(cpool, holder_index, holder_is_accessible, accessor); 1208 1209 // Get the method's name and signature. 1210 Symbol* name_sym = cpool->name_ref_at(index); 1211 Symbol* sig_sym = cpool->signature_ref_at(index); 1212 1213 if (cpool->has_preresolution() 1214 || ((holder == SystemDictionary::MethodHandle_klass() || holder == SystemDictionary::VarHandle_klass()) && 1215 MethodHandles::is_signature_polymorphic_name(holder, name_sym))) { 1216 // Short-circuit lookups for JSR 292-related call sites. 1217 // That is, do not rely only on name-based lookups, because they may fail 1218 // if the names are not resolvable in the boot class loader (7056328). 1219 switch (bc) { 1220 case Bytecodes::_invokevirtual: 1221 case Bytecodes::_invokeinterface: 1222 case Bytecodes::_invokespecial: 1223 case Bytecodes::_invokestatic: 1224 { 1225 Method* m = ConstantPool::method_at_if_loaded(cpool, index); 1226 if (m != NULL) { 1227 return m; 1228 } 1229 } 1230 break; 1231 default: 1232 break; 1233 } 1234 } 1235 1236 if (holder_is_accessible) { // Our declared holder is loaded. 1237 constantTag tag = cpool->tag_ref_at(index); 1238 methodHandle m = lookup_method(accessor, holder, name_sym, sig_sym, bc, tag); 1239 if (!m.is_null()) { 1240 // We found the method. 1241 return m; 1242 } 1243 } 1244 1245 // Either the declared holder was not loaded, or the method could 1246 // not be found. 1247 1248 return NULL; 1249 } 1250 1251 // ------------------------------------------------------------------ 1252 InstanceKlass* JVMCIRuntime::get_instance_klass_for_declared_method_holder(Klass* method_holder) { 1253 // For the case of <array>.clone(), the method holder can be an ArrayKlass* 1254 // instead of an InstanceKlass*. For that case simply pretend that the 1255 // declared holder is Object.clone since that's where the call will bottom out. 1256 if (method_holder->is_instance_klass()) { 1257 return InstanceKlass::cast(method_holder); 1258 } else if (method_holder->is_array_klass()) { 1259 return InstanceKlass::cast(SystemDictionary::Object_klass()); 1260 } else { 1261 ShouldNotReachHere(); 1262 } 1263 return NULL; 1264 } 1265 1266 1267 // ------------------------------------------------------------------ 1268 methodHandle JVMCIRuntime::get_method_by_index(const constantPoolHandle& cpool, 1269 int index, Bytecodes::Code bc, 1270 InstanceKlass* accessor) { 1271 ResourceMark rm; 1272 return get_method_by_index_impl(cpool, index, bc, accessor); 1273 } 1274 1275 // ------------------------------------------------------------------ 1276 // Check for changes to the system dictionary during compilation 1277 // class loads, evolution, breakpoints 1278 JVMCI::CodeInstallResult JVMCIRuntime::validate_compile_task_dependencies(Dependencies* dependencies, JVMCICompileState* compile_state, char** failure_detail) { 1279 // If JVMTI capabilities were enabled during compile, the compilation is invalidated. 1280 if (compile_state != NULL && compile_state->jvmti_state_changed()) { 1281 *failure_detail = (char*) "Jvmti state change during compilation invalidated dependencies"; 1282 return JVMCI::dependencies_failed; 1283 } 1284 1285 // Dependencies must be checked when the system dictionary changes 1286 // or if we don't know whether it has changed (i.e., compile_state == NULL). 1287 bool counter_changed = compile_state == NULL || compile_state->system_dictionary_modification_counter() != SystemDictionary::number_of_modifications(); 1288 CompileTask* task = compile_state == NULL ? NULL : compile_state->task(); 1289 Dependencies::DepType result = dependencies->validate_dependencies(task, counter_changed, failure_detail); 1290 if (result == Dependencies::end_marker) { 1291 return JVMCI::ok; 1292 } 1293 1294 if (!Dependencies::is_klass_type(result) || counter_changed) { 1295 return JVMCI::dependencies_failed; 1296 } 1297 // The dependencies were invalid at the time of installation 1298 // without any intervening modification of the system 1299 // dictionary. That means they were invalidly constructed. 1300 return JVMCI::dependencies_invalid; 1301 } 1302 1303 // Reports a pending exception and exits the VM. 1304 static void fatal_exception_in_compile(JVMCIEnv* JVMCIENV, JavaThread* thread, const char* msg) { 1305 // Only report a fatal JVMCI compilation exception once 1306 static volatile int report_init_failure = 0; 1307 if (!report_init_failure && Atomic::cmpxchg(1, &report_init_failure, 0) == 0) { 1308 tty->print_cr("%s:", msg); 1309 JVMCIENV->describe_pending_exception(true); 1310 } 1311 JVMCIENV->clear_pending_exception(); 1312 before_exit(thread); 1313 vm_exit(-1); 1314 } 1315 1316 void JVMCIRuntime::compile_method(JVMCIEnv* JVMCIENV, JVMCICompiler* compiler, const methodHandle& method, int entry_bci) { 1317 JVMCI_EXCEPTION_CONTEXT 1318 1319 JVMCICompileState* compile_state = JVMCIENV->compile_state(); 1320 1321 bool is_osr = entry_bci != InvocationEntryBci; 1322 if (compiler->is_bootstrapping() && is_osr) { 1323 // no OSR compilations during bootstrap - the compiler is just too slow at this point, 1324 // and we know that there are no endless loops 1325 compile_state->set_failure(true, "No OSR during boostrap"); 1326 return; 1327 } 1328 if (JVMCI::shutdown_called()) { 1329 compile_state->set_failure(false, "Avoiding compilation during shutdown"); 1330 return; 1331 } 1332 1333 HandleMark hm; 1334 JVMCIObject receiver = get_HotSpotJVMCIRuntime(JVMCIENV); 1335 if (JVMCIENV->has_pending_exception()) { 1336 fatal_exception_in_compile(JVMCIENV, thread, "Exception during HotSpotJVMCIRuntime initialization"); 1337 } 1338 JVMCIObject jvmci_method = JVMCIENV->get_jvmci_method(method, JVMCIENV); 1339 if (JVMCIENV->has_pending_exception()) { 1340 JVMCIENV->describe_pending_exception(true); 1341 compile_state->set_failure(false, "exception getting JVMCI wrapper method"); 1342 return; 1343 } 1344 1345 JVMCIObject result_object = JVMCIENV->call_HotSpotJVMCIRuntime_compileMethod(receiver, jvmci_method, entry_bci, 1346 (jlong) compile_state, compile_state->task()->compile_id()); 1347 if (!JVMCIENV->has_pending_exception()) { 1348 if (result_object.is_non_null()) { 1349 JVMCIObject failure_message = JVMCIENV->get_HotSpotCompilationRequestResult_failureMessage(result_object); 1350 if (failure_message.is_non_null()) { 1351 // Copy failure reason into resource memory first ... 1352 const char* failure_reason = JVMCIENV->as_utf8_string(failure_message); 1353 // ... and then into the C heap. 1354 failure_reason = os::strdup(failure_reason, mtJVMCI); 1355 bool retryable = JVMCIENV->get_HotSpotCompilationRequestResult_retry(result_object) != 0; 1356 compile_state->set_failure(retryable, failure_reason, true); 1357 } else { 1358 if (compile_state->task()->code() == NULL) { 1359 compile_state->set_failure(true, "no nmethod produced"); 1360 } else { 1361 compile_state->task()->set_num_inlined_bytecodes(JVMCIENV->get_HotSpotCompilationRequestResult_inlinedBytecodes(result_object)); 1362 compiler->inc_methods_compiled(); 1363 } 1364 } 1365 } else { 1366 assert(false, "JVMCICompiler.compileMethod should always return non-null"); 1367 } 1368 } else { 1369 // An uncaught exception here implies failure during compiler initialization. 1370 // The only sensible thing to do here is to exit the VM. 1371 fatal_exception_in_compile(JVMCIENV, thread, "Exception during JVMCI compiler initialization"); 1372 } 1373 if (compiler->is_bootstrapping()) { 1374 compiler->set_bootstrap_compilation_request_handled(); 1375 } 1376 } 1377 1378 1379 // ------------------------------------------------------------------ 1380 JVMCI::CodeInstallResult JVMCIRuntime::register_method(JVMCIEnv* JVMCIENV, 1381 const methodHandle& method, 1382 nmethod*& nm, 1383 int entry_bci, 1384 CodeOffsets* offsets, 1385 int orig_pc_offset, 1386 CodeBuffer* code_buffer, 1387 int frame_words, 1388 OopMapSet* oop_map_set, 1389 ExceptionHandlerTable* handler_table, 1390 ImplicitExceptionTable* implicit_exception_table, 1391 AbstractCompiler* compiler, 1392 DebugInformationRecorder* debug_info, 1393 Dependencies* dependencies, 1394 int compile_id, 1395 bool has_unsafe_access, 1396 bool has_wide_vector, 1397 JVMCIObject compiled_code, 1398 JVMCIObject nmethod_mirror, 1399 FailedSpeculation** failed_speculations, 1400 char* speculations, 1401 int speculations_len) { 1402 JVMCI_EXCEPTION_CONTEXT; 1403 nm = NULL; 1404 int comp_level = CompLevel_full_optimization; 1405 char* failure_detail = NULL; 1406 1407 bool install_default = JVMCIENV->get_HotSpotNmethod_isDefault(nmethod_mirror) != 0; 1408 assert(JVMCIENV->isa_HotSpotNmethod(nmethod_mirror), "must be"); 1409 JVMCIObject name = JVMCIENV->get_InstalledCode_name(nmethod_mirror); 1410 const char* nmethod_mirror_name = name.is_null() ? NULL : JVMCIENV->as_utf8_string(name); 1411 int nmethod_mirror_index; 1412 if (!install_default) { 1413 // Reserve or initialize mirror slot in the oops table. 1414 OopRecorder* oop_recorder = debug_info->oop_recorder(); 1415 nmethod_mirror_index = oop_recorder->allocate_oop_index(nmethod_mirror.is_hotspot() ? nmethod_mirror.as_jobject() : NULL); 1416 } else { 1417 // A default HotSpotNmethod mirror is never tracked by the nmethod 1418 nmethod_mirror_index = -1; 1419 } 1420 1421 JVMCI::CodeInstallResult result; 1422 { 1423 // To prevent compile queue updates. 1424 MutexLocker locker(MethodCompileQueue_lock, THREAD); 1425 1426 // Prevent SystemDictionary::add_to_hierarchy from running 1427 // and invalidating our dependencies until we install this method. 1428 MutexLocker ml(Compile_lock); 1429 1430 // Encode the dependencies now, so we can check them right away. 1431 dependencies->encode_content_bytes(); 1432 1433 // Record the dependencies for the current compile in the log 1434 if (LogCompilation) { 1435 for (Dependencies::DepStream deps(dependencies); deps.next(); ) { 1436 deps.log_dependency(); 1437 } 1438 } 1439 1440 // Check for {class loads, evolution, breakpoints} during compilation 1441 result = validate_compile_task_dependencies(dependencies, JVMCIENV->compile_state(), &failure_detail); 1442 if (result != JVMCI::ok) { 1443 // While not a true deoptimization, it is a preemptive decompile. 1444 MethodData* mdp = method()->method_data(); 1445 if (mdp != NULL) { 1446 mdp->inc_decompile_count(); 1447 #ifdef ASSERT 1448 if (mdp->decompile_count() > (uint)PerMethodRecompilationCutoff) { 1449 ResourceMark m; 1450 tty->print_cr("WARN: endless recompilation of %s. Method was set to not compilable.", method()->name_and_sig_as_C_string()); 1451 } 1452 #endif 1453 } 1454 1455 // All buffers in the CodeBuffer are allocated in the CodeCache. 1456 // If the code buffer is created on each compile attempt 1457 // as in C2, then it must be freed. 1458 //code_buffer->free_blob(); 1459 } else { 1460 nm = nmethod::new_nmethod(method, 1461 compile_id, 1462 entry_bci, 1463 offsets, 1464 orig_pc_offset, 1465 debug_info, dependencies, code_buffer, 1466 frame_words, oop_map_set, 1467 handler_table, implicit_exception_table, 1468 compiler, comp_level, 1469 speculations, speculations_len, 1470 nmethod_mirror_index, nmethod_mirror_name, failed_speculations); 1471 1472 1473 // Free codeBlobs 1474 if (nm == NULL) { 1475 // The CodeCache is full. Print out warning and disable compilation. 1476 { 1477 MutexUnlocker ml(Compile_lock); 1478 MutexUnlocker locker(MethodCompileQueue_lock); 1479 CompileBroker::handle_full_code_cache(CodeCache::get_code_blob_type(comp_level)); 1480 } 1481 } else { 1482 nm->set_has_unsafe_access(has_unsafe_access); 1483 nm->set_has_wide_vectors(has_wide_vector); 1484 1485 // Record successful registration. 1486 // (Put nm into the task handle *before* publishing to the Java heap.) 1487 if (JVMCIENV->compile_state() != NULL) { 1488 JVMCIENV->compile_state()->task()->set_code(nm); 1489 } 1490 1491 JVMCINMethodData* data = nm->jvmci_nmethod_data(); 1492 assert(data != NULL, "must be"); 1493 if (install_default) { 1494 assert(!nmethod_mirror.is_hotspot() || data->get_nmethod_mirror(nm, /* phantom_ref */ false) == NULL, "must be"); 1495 if (entry_bci == InvocationEntryBci) { 1496 if (TieredCompilation) { 1497 // If there is an old version we're done with it 1498 CompiledMethod* old = method->code(); 1499 if (TraceMethodReplacement && old != NULL) { 1500 ResourceMark rm; 1501 char *method_name = method->name_and_sig_as_C_string(); 1502 tty->print_cr("Replacing method %s", method_name); 1503 } 1504 if (old != NULL ) { 1505 old->make_not_entrant(); 1506 } 1507 } 1508 if (TraceNMethodInstalls) { 1509 ResourceMark rm; 1510 char *method_name = method->name_and_sig_as_C_string(); 1511 ttyLocker ttyl; 1512 tty->print_cr("Installing method (%d) %s [entry point: %p]", 1513 comp_level, 1514 method_name, nm->entry_point()); 1515 } 1516 // Allow the code to be executed 1517 method->set_code(method, nm); 1518 } else { 1519 if (TraceNMethodInstalls ) { 1520 ResourceMark rm; 1521 char *method_name = method->name_and_sig_as_C_string(); 1522 ttyLocker ttyl; 1523 tty->print_cr("Installing osr method (%d) %s @ %d", 1524 comp_level, 1525 method_name, 1526 entry_bci); 1527 } 1528 InstanceKlass::cast(method->method_holder())->add_osr_nmethod(nm); 1529 } 1530 } else { 1531 assert(!nmethod_mirror.is_hotspot() || data->get_nmethod_mirror(nm, /* phantom_ref */ false) == HotSpotJVMCI::resolve(nmethod_mirror), "must be"); 1532 } 1533 nm->make_in_use(); 1534 } 1535 result = nm != NULL ? JVMCI::ok :JVMCI::cache_full; 1536 } 1537 } 1538 1539 // String creation must be done outside lock 1540 if (failure_detail != NULL) { 1541 // A failure to allocate the string is silently ignored. 1542 JVMCIObject message = JVMCIENV->create_string(failure_detail, JVMCIENV); 1543 JVMCIENV->set_HotSpotCompiledNmethod_installationFailureMessage(compiled_code, message); 1544 } 1545 1546 // JVMTI -- compiled method notification (must be done outside lock) 1547 if (nm != NULL) { 1548 nm->post_compiled_method_load_event(); 1549 } 1550 1551 return result; 1552 } --- EOF ---