1 /*
   2  * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "jvm.h"
  26 #include "asm/codeBuffer.hpp"
  27 #include "ci/ciUtilities.inline.hpp"
  28 #include "classfile/javaClasses.inline.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "code/compiledMethod.inline.hpp"
  31 #include "compiler/compileBroker.hpp"
  32 #include "compiler/disassembler.hpp"
  33 #include "jvmci/jvmciRuntime.hpp"
  34 #include "jvmci/jvmciCompilerToVM.hpp"
  35 #include "jvmci/jvmciCompiler.hpp"
  36 #include "jvmci/jvmciJavaClasses.hpp"
  37 #include "jvmci/jvmciEnv.hpp"
  38 #include "logging/log.hpp"
  39 #include "memory/allocation.inline.hpp"
  40 #include "memory/oopFactory.hpp"
  41 #include "memory/resourceArea.hpp"
  42 #include "oops/constantPool.inline.hpp"
  43 #include "oops/cpCache.inline.hpp"
  44 #include "oops/instanceMirrorKlass.hpp"
  45 #include "oops/method.inline.hpp"
  46 #include "oops/methodData.hpp"
  47 #include "oops/objArrayOop.inline.hpp"
  48 #include "oops/oop.inline.hpp"
  49 #include "runtime/biasedLocking.hpp"
  50 #include "runtime/fieldDescriptor.inline.hpp"
  51 #include "runtime/frame.inline.hpp"
  52 #include "runtime/handles.inline.hpp"
  53 #include "runtime/interfaceSupport.inline.hpp"
  54 #include "runtime/jniHandles.inline.hpp"
  55 #include "runtime/reflection.hpp"
  56 #include "runtime/sharedRuntime.hpp"
  57 #include "runtime/sweeper.hpp"
  58 #include "runtime/threadSMR.hpp"
  59 #include "utilities/debug.hpp"
  60 #include "utilities/defaultStream.hpp"
  61 #include "utilities/macros.hpp"
  62 #if INCLUDE_G1GC
  63 #include "gc/g1/g1ThreadLocalData.hpp"
  64 #endif // INCLUDE_G1GC
  65 
  66 #if defined(_MSC_VER)
  67 #define strtoll _strtoi64
  68 #endif
  69 
  70 #ifdef ASSERT
  71 #define METADATA_TRACK_NAMES
  72 #endif
  73 
  74 struct _jmetadata {
  75  private:
  76   Metadata* _handle;
  77 #ifdef METADATA_TRACK_NAMES
  78   // Debug data for tracking stale metadata
  79   const char* _name;
  80 #endif
  81 
  82  public:
  83   Metadata* handle() { return _handle; }
  84 
  85 #ifdef METADATA_TRACK_NAMES
  86   void initialize() {
  87     _handle = NULL;
  88     _name = NULL;
  89   }
  90 #endif
  91 
  92   void set_handle(Metadata* value) {
  93     _handle = value;
  94   }
  95 
  96 #ifdef METADATA_TRACK_NAMES
  97   const char* name() { return _name; }
  98   void set_name(const char* name) {
  99     if (_name != NULL) {
 100       os::free((void*) _name);
 101       _name = NULL;
 102     }
 103     if (name != NULL) {
 104       _name = os::strdup(name);
 105     }
 106   }
 107 #endif
 108 };
 109 typedef struct _jmetadata HandleRecord;
 110 
 111 // JVMCI maintains direct references to metadata. To make these references safe in the face of
 112 // class redefinition, they are held in handles so they can be scanned during GC. They are
 113 // managed in a cooperative way between the Java code and HotSpot. A handle is filled in and
 114 // passed back to the Java code which is responsible for setting the handle to NULL when it
 115 // is no longer in use. This is done by jdk.vm.ci.hotspot.HandleCleaner. The
 116 // rebuild_free_list function notices when the handle is clear and reclaims it for re-use.
 117 class MetadataHandleBlock : public CHeapObj<mtInternal> {
 118  private:
 119   enum SomeConstants {
 120     block_size_in_handles  = 32,      // Number of handles per handle block
 121     ptr_tag = 1,
 122     ptr_mask = ~((intptr_t)ptr_tag)
 123   };
 124 
 125   // Free handles always have their low bit set so those pointers can
 126   // be distinguished from handles which are in use.  The last handle
 127   // on the free list has a NULL pointer with the tag bit set, so it's
 128   // clear that the handle has been reclaimed.  The _free_list is
 129   // always a real pointer to a handle.
 130 
 131   HandleRecord    _handles[block_size_in_handles]; // The handles
 132   int             _top;                         // Index of next unused handle
 133   MetadataHandleBlock* _next;                   // Link to next block
 134 
 135   // The following instance variables are only used by the first block in a chain.
 136   // Having two types of blocks complicates the code and the space overhead is negligible.
 137   MetadataHandleBlock* _last;                   // Last block in use
 138   intptr_t        _free_list;                   // Handle free list
 139   int             _allocate_before_rebuild;     // Number of blocks to allocate before rebuilding free list
 140 
 141   jmetadata allocate_metadata_handle(Metadata* metadata);
 142   void rebuild_free_list();
 143 
 144   MetadataHandleBlock() {
 145     _top = 0;
 146     _next = NULL;
 147     _last = this;
 148     _free_list = 0;
 149     _allocate_before_rebuild = 0;
 150 #ifdef METADATA_TRACK_NAMES
 151     for (int i = 0; i < block_size_in_handles; i++) {
 152       _handles[i].initialize();
 153     }
 154 #endif
 155   }
 156 
 157   const char* get_name(int index) {
 158 #ifdef METADATA_TRACK_NAMES
 159     return _handles[index].name();
 160 #else
 161     return "<missing>";
 162 #endif
 163   }
 164 
 165  public:
 166   jmetadata allocate_handle(methodHandle handle)       { return allocate_metadata_handle(handle()); }
 167   jmetadata allocate_handle(constantPoolHandle handle) { return allocate_metadata_handle(handle()); }
 168 
 169   static MetadataHandleBlock* allocate_block();
 170 
 171   // Adds `handle` to the free list in this block
 172   void chain_free_list(HandleRecord* handle) {
 173     handle->set_handle((Metadata*) (ptr_tag | _free_list));
 174 #ifdef METADATA_TRACK_NAMES
 175     handle->set_name(NULL);
 176 #endif
 177     _free_list = (intptr_t) handle;
 178   }
 179 
 180   HandleRecord* get_free_handle() {
 181     assert(_free_list != 0, "should check before calling");
 182     HandleRecord* handle = (HandleRecord*) (_free_list & ptr_mask);
 183     _free_list = (ptr_mask & (intptr_t) (handle->handle()));
 184     assert(_free_list != ptr_tag, "should be null");
 185     handle->set_handle(NULL);
 186     return handle;
 187   }
 188 
 189   void metadata_do(void f(Metadata*));
 190 
 191   void do_unloading(BoolObjectClosure* is_alive);
 192 };
 193 
 194 
 195 jmetadata MetadataHandleBlock::allocate_metadata_handle(Metadata* obj) {
 196   assert(obj->is_valid() && obj->is_metadata(), "must be");
 197 
 198   // Try last block
 199   HandleRecord* handle = NULL;
 200   if (_last->_top < block_size_in_handles) {
 201     handle = &(_last->_handles)[_last->_top++];
 202   } else if (_free_list != 0) {
 203     // Try free list
 204     handle = get_free_handle();
 205   }
 206 
 207   if (handle != NULL) {
 208     handle->set_handle(obj);
 209 #ifdef METADATA_TRACK_NAMES
 210     handle->set_name(obj->print_value_string());
 211 #endif
 212     return (jmetadata) handle;
 213   }
 214 
 215   // Check if unused block follow last
 216   if (_last->_next != NULL) {
 217     // update last and retry
 218     _last = _last->_next;
 219     return allocate_metadata_handle(obj);
 220   }
 221 
 222   // No space available, we have to rebuild free list or expand
 223   if (_allocate_before_rebuild == 0) {
 224     rebuild_free_list();        // updates _allocate_before_rebuild counter
 225   } else {
 226     // Append new block
 227     // This can block, but the caller has a metadata handle around this object.
 228     _last->_next = allocate_block();
 229     _last = _last->_next;
 230     _allocate_before_rebuild--;
 231   }
 232   return allocate_metadata_handle(obj);  // retry
 233 }
 234 
 235 
 236 void MetadataHandleBlock::rebuild_free_list() {
 237   assert(_allocate_before_rebuild == 0 && _free_list == 0, "just checking");
 238   int free = 0;
 239   int blocks = 0;
 240   for (MetadataHandleBlock* current = this; current != NULL; current = current->_next) {
 241     for (int index = 0; index < current->_top; index++) {
 242       HandleRecord* handle = &(current->_handles)[index];
 243       if (handle->handle() == NULL) {
 244         // this handle was cleared out by a delete call, reuse it
 245         chain_free_list(handle);
 246         free++;
 247       }
 248     }
 249     // we should not rebuild free list if there are unused handles at the end
 250     assert(current->_top == block_size_in_handles, "just checking");
 251     blocks++;
 252   }
 253   // Heuristic: if more than half of the handles are free we rebuild next time
 254   // as well, otherwise we append a corresponding number of new blocks before
 255   // attempting a free list rebuild again.
 256   int total = blocks * block_size_in_handles;
 257   int extra = total - 2*free;
 258   if (extra > 0) {
 259     // Not as many free handles as we would like - compute number of new blocks to append
 260     _allocate_before_rebuild = (extra + block_size_in_handles - 1) / block_size_in_handles;
 261   }
 262 }
 263 
 264 MetadataHandleBlock* MetadataHandleBlock::allocate_block() {
 265   return new MetadataHandleBlock();
 266 }
 267 
 268 void MetadataHandleBlock::metadata_do(void f(Metadata*)) {
 269   for (MetadataHandleBlock* current = this; current != NULL; current = current->_next) {
 270     for (int index = 0; index < current->_top; index++) {
 271       HandleRecord* root = &(current->_handles)[index];
 272       Metadata* value = root->handle();
 273       // traverse heap pointers only, not deleted handles or free list
 274       // pointers
 275       if (value != NULL && ((intptr_t) value & ptr_tag) == 0) {
 276         assert(value->is_valid(), "invalid metadata %s", get_name(index));
 277         f(value);
 278       }
 279     }
 280     // the next handle block is valid only if current block is full
 281     if (current->_top < block_size_in_handles) {
 282       break;
 283     }
 284   }
 285 }
 286 
 287 // Visit any live metadata handles and clean them up.  Since clearing of these handles is driven by
 288 // weak references they will be cleared at some point in the future when the reference cleaning logic is run.
 289 void MetadataHandleBlock::do_unloading(BoolObjectClosure* is_alive) {
 290   for (MetadataHandleBlock* current = this; current != NULL; current = current->_next) {
 291     for (int index = 0; index < current->_top; index++) {
 292       HandleRecord* handle = &(current->_handles)[index];
 293       Metadata* value = handle->handle();
 294       // traverse heap pointers only, not deleted handles or free list
 295       // pointers
 296       if (value != NULL && ((intptr_t) value & ptr_tag) == 0) {
 297         Klass* klass = NULL;
 298         if (value->is_klass()) {
 299           klass = (Klass*)value;
 300         } else if (value->is_method()) {
 301           Method* m = (Method*)value;
 302           klass = m->method_holder();
 303         } else if (value->is_constantPool()) {
 304           ConstantPool* cp = (ConstantPool*)value;
 305           klass = cp->pool_holder();
 306         } else {
 307           ShouldNotReachHere();
 308         }
 309         if (klass->class_loader_data()->is_unloading()) {
 310           // This needs to be marked so that it's no longer scanned
 311           // but can't be put on the free list yet. The
 312           // HandleCleaner will set this to NULL and
 313           // put it on the free list.
 314           jlong old_value = Atomic::cmpxchg((jlong) (ptr_tag), (jlong*)handle, (jlong) value);
 315           if (old_value == (jlong) value) {
 316             // Success
 317           } else {
 318             guarantee(old_value == 0, "only other possible value");
 319           }
 320         }
 321       }
 322     }
 323     // the next handle block is valid only if current block is full
 324     if (current->_top < block_size_in_handles) {
 325       break;
 326     }
 327   }
 328 }
 329 
 330 JNIHandleBlock* JVMCI::_object_handles = NULL;
 331 MetadataHandleBlock* JVMCI::_metadata_handles = NULL;
 332 JVMCIRuntime* JVMCI::_compiler_runtime = NULL;
 333 JVMCIRuntime* JVMCI::_java_runtime = NULL;
 334 
 335 // Simple helper to see if the caller of a runtime stub which
 336 // entered the VM has been deoptimized
 337 
 338 static bool caller_is_deopted() {
 339   JavaThread* thread = JavaThread::current();
 340   RegisterMap reg_map(thread, false);
 341   frame runtime_frame = thread->last_frame();
 342   frame caller_frame = runtime_frame.sender(&reg_map);
 343   assert(caller_frame.is_compiled_frame(), "must be compiled");
 344   return caller_frame.is_deoptimized_frame();
 345 }
 346 
 347 // Stress deoptimization
 348 static void deopt_caller() {
 349   if ( !caller_is_deopted()) {
 350     JavaThread* thread = JavaThread::current();
 351     RegisterMap reg_map(thread, false);
 352     frame runtime_frame = thread->last_frame();
 353     frame caller_frame = runtime_frame.sender(&reg_map);
 354     Deoptimization::deoptimize_frame(thread, caller_frame.id(), Deoptimization::Reason_constraint);
 355     assert(caller_is_deopted(), "Must be deoptimized");
 356   }
 357 }
 358 
 359 // Manages a scope for a JVMCI runtime call that attempts a heap allocation.
 360 // If there is a pending exception upon closing the scope and the runtime
 361 // call is of the variety where allocation failure returns NULL without an
 362 // exception, the following action is taken:
 363 //   1. The pending exception is cleared
 364 //   2. NULL is written to JavaThread::_vm_result
 365 //   3. Checks that an OutOfMemoryError is Universe::out_of_memory_error_retry().
 366 class RetryableAllocationMark: public StackObj {
 367  private:
 368   JavaThread* _thread;
 369  public:
 370   RetryableAllocationMark(JavaThread* thread, bool activate) {
 371     if (activate) {
 372       assert(!thread->in_retryable_allocation(), "retryable allocation scope is non-reentrant");
 373       _thread = thread;
 374       _thread->set_in_retryable_allocation(true);
 375     } else {
 376       _thread = NULL;
 377     }
 378   }
 379   ~RetryableAllocationMark() {
 380     if (_thread != NULL) {
 381       _thread->set_in_retryable_allocation(false);
 382       JavaThread* THREAD = _thread;
 383       if (HAS_PENDING_EXCEPTION) {
 384         oop ex = PENDING_EXCEPTION;
 385         CLEAR_PENDING_EXCEPTION;
 386         oop retry_oome = Universe::out_of_memory_error_retry();
 387         if (ex->is_a(retry_oome->klass()) && retry_oome != ex) {
 388           ResourceMark rm;
 389           fatal("Unexpected exception in scope of retryable allocation: " INTPTR_FORMAT " of type %s", p2i(ex), ex->klass()->external_name());
 390         }
 391         _thread->set_vm_result(NULL);
 392       }
 393     }
 394   }
 395 };
 396 
 397 JRT_BLOCK_ENTRY(void, JVMCIRuntime::new_instance_common(JavaThread* thread, Klass* klass, bool null_on_fail))
 398   JRT_BLOCK;
 399   assert(klass->is_klass(), "not a class");
 400   Handle holder(THREAD, klass->klass_holder()); // keep the klass alive
 401   InstanceKlass* h = InstanceKlass::cast(klass);
 402   {
 403     RetryableAllocationMark ram(thread, null_on_fail);
 404     h->check_valid_for_instantiation(true, CHECK);
 405     oop obj;
 406     if (null_on_fail) {
 407       if (!h->is_initialized()) {
 408         // Cannot re-execute class initialization without side effects
 409         // so return without attempting the initialization
 410         return;
 411       }
 412     } else {
 413       // make sure klass is initialized
 414       h->initialize(CHECK);
 415     }
 416     // allocate instance and return via TLS
 417     obj = h->allocate_instance(CHECK);
 418     thread->set_vm_result(obj);
 419   }
 420   JRT_BLOCK_END;
 421   SharedRuntime::on_slowpath_allocation_exit(thread);
 422 JRT_END
 423 
 424 JRT_BLOCK_ENTRY(void, JVMCIRuntime::new_array_common(JavaThread* thread, Klass* array_klass, jint length, bool null_on_fail))
 425   JRT_BLOCK;
 426   // Note: no handle for klass needed since they are not used
 427   //       anymore after new_objArray() and no GC can happen before.
 428   //       (This may have to change if this code changes!)
 429   assert(array_klass->is_klass(), "not a class");
 430   oop obj;
 431   if (array_klass->is_typeArray_klass()) {
 432     BasicType elt_type = TypeArrayKlass::cast(array_klass)->element_type();
 433     RetryableAllocationMark ram(thread, null_on_fail);
 434     obj = oopFactory::new_typeArray(elt_type, length, CHECK);
 435   } else {
 436     Handle holder(THREAD, array_klass->klass_holder()); // keep the klass alive
 437     Klass* elem_klass = ObjArrayKlass::cast(array_klass)->element_klass();
 438     RetryableAllocationMark ram(thread, null_on_fail);
 439     obj = oopFactory::new_objArray(elem_klass, length, CHECK);
 440   }
 441   thread->set_vm_result(obj);
 442   // This is pretty rare but this runtime patch is stressful to deoptimization
 443   // if we deoptimize here so force a deopt to stress the path.
 444   if (DeoptimizeALot) {
 445     static int deopts = 0;
 446     // Alternate between deoptimizing and raising an error (which will also cause a deopt)
 447     if (deopts++ % 2 == 0) {
 448       if (null_on_fail) {
 449         return;
 450       } else {
 451         ResourceMark rm(THREAD);
 452         THROW(vmSymbols::java_lang_OutOfMemoryError());
 453       }
 454     } else {
 455       deopt_caller();
 456     }
 457   }
 458   JRT_BLOCK_END;
 459   SharedRuntime::on_slowpath_allocation_exit(thread);
 460 JRT_END
 461 
 462 JRT_ENTRY(void, JVMCIRuntime::new_multi_array_common(JavaThread* thread, Klass* klass, int rank, jint* dims, bool null_on_fail))
 463   assert(klass->is_klass(), "not a class");
 464   assert(rank >= 1, "rank must be nonzero");
 465   Handle holder(THREAD, klass->klass_holder()); // keep the klass alive
 466   RetryableAllocationMark ram(thread, null_on_fail);
 467   oop obj = ArrayKlass::cast(klass)->multi_allocate(rank, dims, CHECK);
 468   thread->set_vm_result(obj);
 469 JRT_END
 470 
 471 JRT_ENTRY(void, JVMCIRuntime::dynamic_new_array_common(JavaThread* thread, oopDesc* element_mirror, jint length, bool null_on_fail))
 472   RetryableAllocationMark ram(thread, null_on_fail);
 473   oop obj = Reflection::reflect_new_array(element_mirror, length, CHECK);
 474   thread->set_vm_result(obj);
 475 JRT_END
 476 
 477 JRT_ENTRY(void, JVMCIRuntime::dynamic_new_instance_common(JavaThread* thread, oopDesc* type_mirror, bool null_on_fail))
 478   InstanceKlass* klass = InstanceKlass::cast(java_lang_Class::as_Klass(type_mirror));
 479 
 480   if (klass == NULL) {
 481     ResourceMark rm(THREAD);
 482     THROW(vmSymbols::java_lang_InstantiationException());
 483   }
 484   RetryableAllocationMark ram(thread, null_on_fail);
 485 
 486   // Create new instance (the receiver)
 487   klass->check_valid_for_instantiation(false, CHECK);
 488 
 489   if (null_on_fail) {
 490     if (!klass->is_initialized()) {
 491       // Cannot re-execute class initialization without side effects
 492       // so return without attempting the initialization
 493       return;
 494     }
 495   } else {
 496     // Make sure klass gets initialized
 497     klass->initialize(CHECK);
 498   }
 499 
 500   oop obj = klass->allocate_instance(CHECK);
 501   thread->set_vm_result(obj);
 502 JRT_END
 503 
 504 extern void vm_exit(int code);
 505 
 506 // Enter this method from compiled code handler below. This is where we transition
 507 // to VM mode. This is done as a helper routine so that the method called directly
 508 // from compiled code does not have to transition to VM. This allows the entry
 509 // method to see if the nmethod that we have just looked up a handler for has
 510 // been deoptimized while we were in the vm. This simplifies the assembly code
 511 // cpu directories.
 512 //
 513 // We are entering here from exception stub (via the entry method below)
 514 // If there is a compiled exception handler in this method, we will continue there;
 515 // otherwise we will unwind the stack and continue at the caller of top frame method
 516 // Note: we enter in Java using a special JRT wrapper. This wrapper allows us to
 517 // control the area where we can allow a safepoint. After we exit the safepoint area we can
 518 // check to see if the handler we are going to return is now in a nmethod that has
 519 // been deoptimized. If that is the case we return the deopt blob
 520 // unpack_with_exception entry instead. This makes life for the exception blob easier
 521 // because making that same check and diverting is painful from assembly language.
 522 JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* thread, oopDesc* ex, address pc, CompiledMethod*& cm))
 523   // Reset method handle flag.
 524   thread->set_is_method_handle_return(false);
 525 
 526   Handle exception(thread, ex);
 527   cm = CodeCache::find_compiled(pc);
 528   assert(cm != NULL, "this is not a compiled method");
 529   // Adjust the pc as needed/
 530   if (cm->is_deopt_pc(pc)) {
 531     RegisterMap map(thread, false);
 532     frame exception_frame = thread->last_frame().sender(&map);
 533     // if the frame isn't deopted then pc must not correspond to the caller of last_frame
 534     assert(exception_frame.is_deoptimized_frame(), "must be deopted");
 535     pc = exception_frame.pc();
 536   }
 537 #ifdef ASSERT
 538   assert(exception.not_null(), "NULL exceptions should be handled by throw_exception");
 539   assert(oopDesc::is_oop(exception()), "just checking");
 540   // Check that exception is a subclass of Throwable, otherwise we have a VerifyError
 541   if (!(exception->is_a(SystemDictionary::Throwable_klass()))) {
 542     if (ExitVMOnVerifyError) vm_exit(-1);
 543     ShouldNotReachHere();
 544   }
 545 #endif
 546 
 547   // Check the stack guard pages and reenable them if necessary and there is
 548   // enough space on the stack to do so.  Use fast exceptions only if the guard
 549   // pages are enabled.
 550   bool guard_pages_enabled = thread->stack_guards_enabled();
 551   if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack();
 552 
 553   if (JvmtiExport::can_post_on_exceptions()) {
 554     // To ensure correct notification of exception catches and throws
 555     // we have to deoptimize here.  If we attempted to notify the
 556     // catches and throws during this exception lookup it's possible
 557     // we could deoptimize on the way out of the VM and end back in
 558     // the interpreter at the throw site.  This would result in double
 559     // notifications since the interpreter would also notify about
 560     // these same catches and throws as it unwound the frame.
 561 
 562     RegisterMap reg_map(thread);
 563     frame stub_frame = thread->last_frame();
 564     frame caller_frame = stub_frame.sender(&reg_map);
 565 
 566     // We don't really want to deoptimize the nmethod itself since we
 567     // can actually continue in the exception handler ourselves but I
 568     // don't see an easy way to have the desired effect.
 569     Deoptimization::deoptimize_frame(thread, caller_frame.id(), Deoptimization::Reason_constraint);
 570     assert(caller_is_deopted(), "Must be deoptimized");
 571 
 572     return SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
 573   }
 574 
 575   // ExceptionCache is used only for exceptions at call sites and not for implicit exceptions
 576   if (guard_pages_enabled) {
 577     address fast_continuation = cm->handler_for_exception_and_pc(exception, pc);
 578     if (fast_continuation != NULL) {
 579       // Set flag if return address is a method handle call site.
 580       thread->set_is_method_handle_return(cm->is_method_handle_return(pc));
 581       return fast_continuation;
 582     }
 583   }
 584 
 585   // If the stack guard pages are enabled, check whether there is a handler in
 586   // the current method.  Otherwise (guard pages disabled), force an unwind and
 587   // skip the exception cache update (i.e., just leave continuation==NULL).
 588   address continuation = NULL;
 589   if (guard_pages_enabled) {
 590 
 591     // New exception handling mechanism can support inlined methods
 592     // with exception handlers since the mappings are from PC to PC
 593 
 594     // debugging support
 595     // tracing
 596     if (log_is_enabled(Info, exceptions)) {
 597       ResourceMark rm;
 598       stringStream tempst;
 599       assert(cm->method() != NULL, "Unexpected null method()");
 600       tempst.print("compiled method <%s>\n"
 601                    " at PC" INTPTR_FORMAT " for thread " INTPTR_FORMAT,
 602                    cm->method()->print_value_string(), p2i(pc), p2i(thread));
 603       Exceptions::log_exception(exception, tempst);
 604     }
 605     // for AbortVMOnException flag
 606     NOT_PRODUCT(Exceptions::debug_check_abort(exception));
 607 
 608     // Clear out the exception oop and pc since looking up an
 609     // exception handler can cause class loading, which might throw an
 610     // exception and those fields are expected to be clear during
 611     // normal bytecode execution.
 612     thread->clear_exception_oop_and_pc();
 613 
 614     bool recursive_exception = false;
 615     continuation = SharedRuntime::compute_compiled_exc_handler(cm, pc, exception, false, false, recursive_exception);
 616     // If an exception was thrown during exception dispatch, the exception oop may have changed
 617     thread->set_exception_oop(exception());
 618     thread->set_exception_pc(pc);
 619 
 620     // The exception cache is used only for non-implicit exceptions
 621     // Update the exception cache only when another exception did
 622     // occur during the computation of the compiled exception handler
 623     // (e.g., when loading the class of the catch type).
 624     // Checking for exception oop equality is not
 625     // sufficient because some exceptions are pre-allocated and reused.
 626     if (continuation != NULL && !recursive_exception && !SharedRuntime::deopt_blob()->contains(continuation)) {
 627       cm->add_handler_for_exception_and_pc(exception, pc, continuation);
 628     }
 629   }
 630 
 631   // Set flag if return address is a method handle call site.
 632   thread->set_is_method_handle_return(cm->is_method_handle_return(pc));
 633 
 634   if (log_is_enabled(Info, exceptions)) {
 635     ResourceMark rm;
 636     log_info(exceptions)("Thread " PTR_FORMAT " continuing at PC " PTR_FORMAT
 637                          " for exception thrown at PC " PTR_FORMAT,
 638                          p2i(thread), p2i(continuation), p2i(pc));
 639   }
 640 
 641   return continuation;
 642 JRT_END
 643 
 644 // Enter this method from compiled code only if there is a Java exception handler
 645 // in the method handling the exception.
 646 // We are entering here from exception stub. We don't do a normal VM transition here.
 647 // We do it in a helper. This is so we can check to see if the nmethod we have just
 648 // searched for an exception handler has been deoptimized in the meantime.
 649 address JVMCIRuntime::exception_handler_for_pc(JavaThread* thread) {
 650   oop exception = thread->exception_oop();
 651   address pc = thread->exception_pc();
 652   // Still in Java mode
 653   DEBUG_ONLY(ResetNoHandleMark rnhm);
 654   CompiledMethod* cm = NULL;
 655   address continuation = NULL;
 656   {
 657     // Enter VM mode by calling the helper
 658     ResetNoHandleMark rnhm;
 659     continuation = exception_handler_for_pc_helper(thread, exception, pc, cm);
 660   }
 661   // Back in JAVA, use no oops DON'T safepoint
 662 
 663   // Now check to see if the compiled method we were called from is now deoptimized.
 664   // If so we must return to the deopt blob and deoptimize the nmethod
 665   if (cm != NULL && caller_is_deopted()) {
 666     continuation = SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
 667   }
 668 
 669   assert(continuation != NULL, "no handler found");
 670   return continuation;
 671 }
 672 
 673 JRT_ENTRY_NO_ASYNC(void, JVMCIRuntime::monitorenter(JavaThread* thread, oopDesc* obj, BasicLock* lock))
 674   IF_TRACE_jvmci_3 {
 675     char type[O_BUFLEN];
 676     obj->klass()->name()->as_C_string(type, O_BUFLEN);
 677     markOop mark = obj->mark();
 678     TRACE_jvmci_3("%s: entered locking slow case with obj=" INTPTR_FORMAT ", type=%s, mark=" INTPTR_FORMAT ", lock=" INTPTR_FORMAT, thread->name(), p2i(obj), type, p2i(mark), p2i(lock));
 679     tty->flush();
 680   }
 681   if (PrintBiasedLockingStatistics) {
 682     Atomic::inc(BiasedLocking::slow_path_entry_count_addr());
 683   }
 684   Handle h_obj(thread, obj);
 685   assert(oopDesc::is_oop(h_obj()), "must be NULL or an object");
 686   if (UseBiasedLocking) {
 687     // Retry fast entry if bias is revoked to avoid unnecessary inflation
 688     ObjectSynchronizer::fast_enter(h_obj, lock, true, CHECK);
 689   } else {
 690     if (JVMCIUseFastLocking) {
 691       // When using fast locking, the compiled code has already tried the fast case
 692       ObjectSynchronizer::slow_enter(h_obj, lock, THREAD);
 693     } else {
 694       ObjectSynchronizer::fast_enter(h_obj, lock, false, THREAD);
 695     }
 696   }
 697   TRACE_jvmci_3("%s: exiting locking slow with obj=" INTPTR_FORMAT, thread->name(), p2i(obj));
 698 JRT_END
 699 
 700 JRT_LEAF(void, JVMCIRuntime::monitorexit(JavaThread* thread, oopDesc* obj, BasicLock* lock))
 701   assert(thread == JavaThread::current(), "threads must correspond");
 702   assert(thread->last_Java_sp(), "last_Java_sp must be set");
 703   // monitorexit is non-blocking (leaf routine) => no exceptions can be thrown
 704   EXCEPTION_MARK;
 705 
 706 #ifdef ASSERT
 707   if (!oopDesc::is_oop(obj)) {
 708     ResetNoHandleMark rhm;
 709     nmethod* method = thread->last_frame().cb()->as_nmethod_or_null();
 710     if (method != NULL) {
 711       tty->print_cr("ERROR in monitorexit in method %s wrong obj " INTPTR_FORMAT, method->name(), p2i(obj));
 712     }
 713     thread->print_stack_on(tty);
 714     assert(false, "invalid lock object pointer dected");
 715   }
 716 #endif
 717 
 718   if (JVMCIUseFastLocking) {
 719     // When using fast locking, the compiled code has already tried the fast case
 720     ObjectSynchronizer::slow_exit(obj, lock, THREAD);
 721   } else {
 722     ObjectSynchronizer::fast_exit(obj, lock, THREAD);
 723   }
 724   IF_TRACE_jvmci_3 {
 725     char type[O_BUFLEN];
 726     obj->klass()->name()->as_C_string(type, O_BUFLEN);
 727     TRACE_jvmci_3("%s: exited locking slow case with obj=" INTPTR_FORMAT ", type=%s, mark=" INTPTR_FORMAT ", lock=" INTPTR_FORMAT, thread->name(), p2i(obj), type, p2i(obj->mark()), p2i(lock));
 728     tty->flush();
 729   }
 730 JRT_END
 731 
 732 // Object.notify() fast path, caller does slow path
 733 JRT_LEAF(jboolean, JVMCIRuntime::object_notify(JavaThread *thread, oopDesc* obj))
 734 
 735   // Very few notify/notifyAll operations find any threads on the waitset, so
 736   // the dominant fast-path is to simply return.
 737   // Relatedly, it's critical that notify/notifyAll be fast in order to
 738   // reduce lock hold times.
 739   if (!SafepointSynchronize::is_synchronizing()) {
 740     if (ObjectSynchronizer::quick_notify(obj, thread, false)) {
 741       return true;
 742     }
 743   }
 744   return false; // caller must perform slow path
 745 
 746 JRT_END
 747 
 748 // Object.notifyAll() fast path, caller does slow path
 749 JRT_LEAF(jboolean, JVMCIRuntime::object_notifyAll(JavaThread *thread, oopDesc* obj))
 750 
 751   if (!SafepointSynchronize::is_synchronizing() ) {
 752     if (ObjectSynchronizer::quick_notify(obj, thread, true)) {
 753       return true;
 754     }
 755   }
 756   return false; // caller must perform slow path
 757 
 758 JRT_END
 759 
 760 JRT_ENTRY(void, JVMCIRuntime::throw_and_post_jvmti_exception(JavaThread* thread, const char* exception, const char* message))
 761   TempNewSymbol symbol = SymbolTable::new_symbol(exception, CHECK);
 762   SharedRuntime::throw_and_post_jvmti_exception(thread, symbol, message);
 763 JRT_END
 764 
 765 JRT_ENTRY(void, JVMCIRuntime::throw_klass_external_name_exception(JavaThread* thread, const char* exception, Klass* klass))
 766   ResourceMark rm(thread);
 767   TempNewSymbol symbol = SymbolTable::new_symbol(exception, CHECK);
 768   SharedRuntime::throw_and_post_jvmti_exception(thread, symbol, klass->external_name());
 769 JRT_END
 770 
 771 JRT_ENTRY(void, JVMCIRuntime::throw_class_cast_exception(JavaThread* thread, const char* exception, Klass* caster_klass, Klass* target_klass))
 772   ResourceMark rm(thread);
 773   const char* message = SharedRuntime::generate_class_cast_message(caster_klass, target_klass);
 774   TempNewSymbol symbol = SymbolTable::new_symbol(exception, CHECK);
 775   SharedRuntime::throw_and_post_jvmti_exception(thread, symbol, message);
 776 JRT_END
 777 
 778 JRT_LEAF(void, JVMCIRuntime::log_object(JavaThread* thread, oopDesc* obj, bool as_string, bool newline))
 779   ttyLocker ttyl;
 780 
 781   if (obj == NULL) {
 782     tty->print("NULL");
 783   } else if (oopDesc::is_oop_or_null(obj, true) && (!as_string || !java_lang_String::is_instance(obj))) {
 784     if (oopDesc::is_oop_or_null(obj, true)) {
 785       char buf[O_BUFLEN];
 786       tty->print("%s@" INTPTR_FORMAT, obj->klass()->name()->as_C_string(buf, O_BUFLEN), p2i(obj));
 787     } else {
 788       tty->print(INTPTR_FORMAT, p2i(obj));
 789     }
 790   } else {
 791     ResourceMark rm;
 792     assert(obj != NULL && java_lang_String::is_instance(obj), "must be");
 793     char *buf = java_lang_String::as_utf8_string(obj);
 794     tty->print_raw(buf);
 795   }
 796   if (newline) {
 797     tty->cr();
 798   }
 799 JRT_END
 800 
 801 #if INCLUDE_G1GC
 802 
 803 JRT_LEAF(void, JVMCIRuntime::write_barrier_pre(JavaThread* thread, oopDesc* obj))
 804   G1ThreadLocalData::satb_mark_queue(thread).enqueue(obj);
 805 JRT_END
 806 
 807 JRT_LEAF(void, JVMCIRuntime::write_barrier_post(JavaThread* thread, void* card_addr))
 808   G1ThreadLocalData::dirty_card_queue(thread).enqueue(card_addr);
 809 JRT_END
 810 
 811 #endif // INCLUDE_G1GC
 812 
 813 JRT_LEAF(jboolean, JVMCIRuntime::validate_object(JavaThread* thread, oopDesc* parent, oopDesc* child))
 814   bool ret = true;
 815   if(!Universe::heap()->is_in_closed_subset(parent)) {
 816     tty->print_cr("Parent Object " INTPTR_FORMAT " not in heap", p2i(parent));
 817     parent->print();
 818     ret=false;
 819   }
 820   if(!Universe::heap()->is_in_closed_subset(child)) {
 821     tty->print_cr("Child Object " INTPTR_FORMAT " not in heap", p2i(child));
 822     child->print();
 823     ret=false;
 824   }
 825   return (jint)ret;
 826 JRT_END
 827 
 828 JRT_ENTRY(void, JVMCIRuntime::vm_error(JavaThread* thread, jlong where, jlong format, jlong value))
 829   ResourceMark rm;
 830   const char *error_msg = where == 0L ? "<internal JVMCI error>" : (char*) (address) where;
 831   char *detail_msg = NULL;
 832   if (format != 0L) {
 833     const char* buf = (char*) (address) format;
 834     size_t detail_msg_length = strlen(buf) * 2;
 835     detail_msg = (char *) NEW_RESOURCE_ARRAY(u_char, detail_msg_length);
 836     jio_snprintf(detail_msg, detail_msg_length, buf, value);
 837   }
 838   report_vm_error(__FILE__, __LINE__, error_msg, "%s", detail_msg);
 839 JRT_END
 840 
 841 JRT_LEAF(oopDesc*, JVMCIRuntime::load_and_clear_exception(JavaThread* thread))
 842   oop exception = thread->exception_oop();
 843   assert(exception != NULL, "npe");
 844   thread->set_exception_oop(NULL);
 845   thread->set_exception_pc(0);
 846   return exception;
 847 JRT_END
 848 
 849 PRAGMA_DIAG_PUSH
 850 PRAGMA_FORMAT_NONLITERAL_IGNORED
 851 JRT_LEAF(void, JVMCIRuntime::log_printf(JavaThread* thread, const char* format, jlong v1, jlong v2, jlong v3))
 852   ResourceMark rm;
 853   tty->print(format, v1, v2, v3);
 854 JRT_END
 855 PRAGMA_DIAG_POP
 856 
 857 static void decipher(jlong v, bool ignoreZero) {
 858   if (v != 0 || !ignoreZero) {
 859     void* p = (void *)(address) v;
 860     CodeBlob* cb = CodeCache::find_blob(p);
 861     if (cb) {
 862       if (cb->is_nmethod()) {
 863         char buf[O_BUFLEN];
 864         tty->print("%s [" INTPTR_FORMAT "+" JLONG_FORMAT "]", cb->as_nmethod_or_null()->method()->name_and_sig_as_C_string(buf, O_BUFLEN), p2i(cb->code_begin()), (jlong)((address)v - cb->code_begin()));
 865         return;
 866       }
 867       cb->print_value_on(tty);
 868       return;
 869     }
 870     if (Universe::heap()->is_in(p)) {
 871       oop obj = oop(p);
 872       obj->print_value_on(tty);
 873       return;
 874     }
 875     tty->print(INTPTR_FORMAT " [long: " JLONG_FORMAT ", double %lf, char %c]",p2i((void *)v), (jlong)v, (jdouble)v, (char)v);
 876   }
 877 }
 878 
 879 PRAGMA_DIAG_PUSH
 880 PRAGMA_FORMAT_NONLITERAL_IGNORED
 881 JRT_LEAF(void, JVMCIRuntime::vm_message(jboolean vmError, jlong format, jlong v1, jlong v2, jlong v3))
 882   ResourceMark rm;
 883   const char *buf = (const char*) (address) format;
 884   if (vmError) {
 885     if (buf != NULL) {
 886       fatal(buf, v1, v2, v3);
 887     } else {
 888       fatal("<anonymous error>");
 889     }
 890   } else if (buf != NULL) {
 891     tty->print(buf, v1, v2, v3);
 892   } else {
 893     assert(v2 == 0, "v2 != 0");
 894     assert(v3 == 0, "v3 != 0");
 895     decipher(v1, false);
 896   }
 897 JRT_END
 898 PRAGMA_DIAG_POP
 899 
 900 JRT_LEAF(void, JVMCIRuntime::log_primitive(JavaThread* thread, jchar typeChar, jlong value, jboolean newline))
 901   union {
 902       jlong l;
 903       jdouble d;
 904       jfloat f;
 905   } uu;
 906   uu.l = value;
 907   switch (typeChar) {
 908     case 'Z': tty->print(value == 0 ? "false" : "true"); break;
 909     case 'B': tty->print("%d", (jbyte) value); break;
 910     case 'C': tty->print("%c", (jchar) value); break;
 911     case 'S': tty->print("%d", (jshort) value); break;
 912     case 'I': tty->print("%d", (jint) value); break;
 913     case 'F': tty->print("%f", uu.f); break;
 914     case 'J': tty->print(JLONG_FORMAT, value); break;
 915     case 'D': tty->print("%lf", uu.d); break;
 916     default: assert(false, "unknown typeChar"); break;
 917   }
 918   if (newline) {
 919     tty->cr();
 920   }
 921 JRT_END
 922 
 923 JRT_ENTRY(jint, JVMCIRuntime::identity_hash_code(JavaThread* thread, oopDesc* obj))
 924   return (jint) obj->identity_hash();
 925 JRT_END
 926 
 927 JRT_ENTRY(jboolean, JVMCIRuntime::thread_is_interrupted(JavaThread* thread, oopDesc* receiver, jboolean clear_interrupted))
 928   Handle receiverHandle(thread, receiver);
 929   // A nested ThreadsListHandle may require the Threads_lock which
 930   // requires thread_in_vm which is why this method cannot be JRT_LEAF.
 931   ThreadsListHandle tlh;
 932 
 933   JavaThread* receiverThread = java_lang_Thread::thread(receiverHandle());
 934   if (receiverThread == NULL || (EnableThreadSMRExtraValidityChecks && !tlh.includes(receiverThread))) {
 935     // The other thread may exit during this process, which is ok so return false.
 936     return JNI_FALSE;
 937   } else {
 938     return (jint) Thread::is_interrupted(receiverThread, clear_interrupted != 0);
 939   }
 940 JRT_END
 941 
 942 JRT_ENTRY(jint, JVMCIRuntime::test_deoptimize_call_int(JavaThread* thread, int value))
 943   deopt_caller();
 944   return (jint) value;
 945 JRT_END
 946 
 947 
 948 // private static JVMCIRuntime JVMCI.initializeRuntime()
 949 JVM_ENTRY_NO_ENV(jobject, JVM_GetJVMCIRuntime(JNIEnv *env, jclass c))
 950   JNI_JVMCIENV(env);
 951   if (!EnableJVMCI) {
 952     JVMCI_THROW_MSG_NULL(InternalError, "JVMCI is not enabled");
 953   }
 954   JVMCIENV->runtime()->initialize_HotSpotJVMCIRuntime(JVMCI_CHECK_NULL);
 955   JVMCIObject runtime = JVMCIENV->runtime()->get_HotSpotJVMCIRuntime(JVMCI_CHECK_NULL);
 956   return JVMCIENV->get_jobject(runtime);
 957 JVM_END
 958 
 959 void JVMCIRuntime::call_getCompiler(TRAPS) {
 960   THREAD_JVMCIENV(JavaThread::current());
 961   JVMCIObject jvmciRuntime = JVMCIRuntime::get_HotSpotJVMCIRuntime(JVMCI_CHECK);
 962   initialize(JVMCIENV);
 963   JVMCIENV->call_HotSpotJVMCIRuntime_getCompiler(jvmciRuntime, JVMCI_CHECK);
 964 }
 965 
 966 void JVMCINMethodData::initialize(
 967   int nmethod_mirror_index,
 968   const char* name,
 969   FailedSpeculation** failed_speculations)
 970 {
 971   _failed_speculations = failed_speculations;
 972   _nmethod_mirror_index = nmethod_mirror_index;
 973   if (name != NULL) {
 974     _has_name = true;
 975     char* dest = (char*) this->name();
 976     strcpy(dest, name);
 977   } else {
 978     _has_name = false;
 979   }
 980 }
 981 
 982 void JVMCINMethodData::add_failed_speculation(nmethod* nm, jlong speculation) {
 983   uint index = (speculation >> 32) & 0xFFFFFFFF;
 984   int length = (int) speculation;
 985   if (index + length > (uint) nm->speculations_size()) {
 986     fatal(INTPTR_FORMAT "[index: %d, length: %d] out of bounds wrt encoded speculations of length %u", speculation, index, length, nm->speculations_size());
 987   }
 988   address data = nm->speculations_begin() + index;
 989   FailedSpeculation::add_failed_speculation(nm, _failed_speculations, data, length);
 990 }
 991 
 992 oop JVMCINMethodData::get_nmethod_mirror(nmethod* nm) {
 993   if (_nmethod_mirror_index == -1) {
 994     return NULL;
 995   }
 996   return nm->oop_at(_nmethod_mirror_index);
 997 }
 998 
 999 void JVMCINMethodData::set_nmethod_mirror(nmethod* nm, oop new_mirror) {
1000   assert(_nmethod_mirror_index != -1, "cannot set JVMCI mirror for nmethod");
1001   oop* addr = nm->oop_addr_at(_nmethod_mirror_index);
1002   assert(new_mirror != NULL, "use clear_nmethod_mirror to clear the mirror");
1003   assert(*addr == NULL, "cannot overwrite non-null mirror");
1004 
1005   *addr = new_mirror;
1006 
1007   // Since we've patched some oops in the nmethod,
1008   // (re)register it with the heap.
1009   Universe::heap()->register_nmethod(nm);
1010 }
1011 
1012 void JVMCINMethodData::clear_nmethod_mirror(nmethod* nm) {
1013   if (_nmethod_mirror_index != -1) {
1014     oop* addr = nm->oop_addr_at(_nmethod_mirror_index);
1015     *addr = NULL;
1016   }
1017 }
1018 
1019 void JVMCINMethodData::invalidate_nmethod_mirror(nmethod* nm) {
1020   oop nmethod_mirror = get_nmethod_mirror(nm);
1021   if (nmethod_mirror == NULL) {
1022     return;
1023   }
1024 
1025   // Update the values in the mirror if it still refers to nm.
1026   // We cannot use JVMCIObject to wrap the mirror as this is called
1027   // during GC, forbidding the creation of JNIHandles.
1028   JVMCIEnv* jvmciEnv = NULL;
1029   nmethod* current = (nmethod*) HotSpotJVMCI::InstalledCode::address(jvmciEnv, nmethod_mirror);
1030   if (nm == current) {
1031     if (!nm->is_alive()) {
1032       // Break the link from the mirror to nm such that
1033       // future invocations via the mirror will result in
1034       // an InvalidInstalledCodeException.
1035       HotSpotJVMCI::InstalledCode::set_address(jvmciEnv, nmethod_mirror, 0);
1036       HotSpotJVMCI::InstalledCode::set_entryPoint(jvmciEnv, nmethod_mirror, 0);
1037     } else if (nm->is_not_entrant()) {
1038       // Zero the entry point so any new invocation will fail but keep
1039       // the address link around that so that existing activations can
1040       // be deoptimized via the mirror (i.e. JVMCIEnv::invalidate_installed_code).
1041       HotSpotJVMCI::InstalledCode::set_entryPoint(jvmciEnv, nmethod_mirror, 0);
1042     }
1043   }
1044 }
1045 
1046 void JVMCIRuntime::initialize_HotSpotJVMCIRuntime(JVMCI_TRAPS) {
1047   if (!_HotSpotJVMCIRuntime_instance.is_null()) {
1048     if (JVMCIENV->is_hotspot() && UseJVMCINativeLibrary) {
1049       JVMCI_THROW_MSG(InternalError, "JVMCI has already been enabled in the JVMCI shared library");
1050     }
1051   }
1052 
1053   initialize(JVMCIENV);
1054 
1055   // This should only be called in the context of the JVMCI class being initialized
1056   JVMCIObject result = JVMCIENV->call_HotSpotJVMCIRuntime_runtime(JVMCI_CHECK);
1057   int adjustment = JVMCIENV->get_HotSpotJVMCIRuntime_compilationLevelAdjustment(result);
1058   assert(adjustment >= JVMCIRuntime::none &&
1059          adjustment <= JVMCIRuntime::by_full_signature,
1060          "compilation level adjustment out of bounds");
1061   _comp_level_adjustment = (CompLevelAdjustment) adjustment;
1062 
1063   _HotSpotJVMCIRuntime_instance = JVMCIENV->make_global(result);
1064 }
1065 
1066 bool JVMCI::can_initialize_JVMCI() {
1067   // Initializing JVMCI requires the module system to be initialized past phase 3.
1068   // The JVMCI API itself isn't available until phase 2 and ServiceLoader (which
1069   // JVMCI initialization requires) isn't usable until after phase 3. Testing
1070   // whether the system loader is initialized satisfies all these invariants.
1071   if (SystemDictionary::java_system_loader() == NULL) {
1072     return false;
1073   }
1074   assert(Universe::is_module_initialized(), "must be");
1075   return true;
1076 }
1077 
1078 void JVMCI::initialize_compiler(TRAPS) {
1079   if (JVMCILibDumpJNIConfig) {
1080     JNIJVMCI::initialize_ids(NULL);
1081     ShouldNotReachHere();
1082   }
1083 
1084   JVMCI::compiler_runtime()->call_getCompiler(CHECK);
1085 }
1086 
1087 void JVMCI::initialize_globals() {
1088   _object_handles = JNIHandleBlock::allocate_block();
1089   _metadata_handles = MetadataHandleBlock::allocate_block();
1090   if (UseJVMCINativeLibrary) {
1091     // There are two runtimes.
1092     _compiler_runtime = new JVMCIRuntime();
1093     _java_runtime = new JVMCIRuntime();
1094   } else {
1095     // There is only a single runtime
1096     _java_runtime = _compiler_runtime = new JVMCIRuntime();
1097   }
1098 }
1099 
1100 
1101 void JVMCIRuntime::initialize(JVMCIEnv* JVMCIENV) {
1102   assert(this != NULL, "sanity");
1103   // Check first without JVMCI_lock
1104   if (_initialized) {
1105     return;
1106   }
1107 
1108   MutexLocker locker(JVMCI_lock);
1109   // Check again under JVMCI_lock
1110   if (_initialized) {
1111     return;
1112   }
1113 
1114   while (_being_initialized) {
1115     JVMCI_lock->wait();
1116     if (_initialized) {
1117       return;
1118     }
1119   }
1120 
1121   _being_initialized = true;
1122 
1123   {
1124     MutexUnlocker unlock(JVMCI_lock);
1125 
1126     HandleMark hm;
1127     ResourceMark rm;
1128     JavaThread* THREAD = JavaThread::current();
1129     if (JVMCIENV->is_hotspot()) {
1130       HotSpotJVMCI::compute_offsets(CHECK_EXIT);
1131     } else {
1132       JNIAccessMark jni(JVMCIENV);
1133 
1134       JNIJVMCI::initialize_ids(jni.env());
1135       if (jni()->ExceptionCheck()) {
1136         jni()->ExceptionDescribe();
1137         fatal("JNI exception during init");
1138       }
1139     }
1140     create_jvmci_primitive_type(T_BOOLEAN, JVMCI_CHECK_EXIT_((void)0));
1141     create_jvmci_primitive_type(T_BYTE, JVMCI_CHECK_EXIT_((void)0));
1142     create_jvmci_primitive_type(T_CHAR, JVMCI_CHECK_EXIT_((void)0));
1143     create_jvmci_primitive_type(T_SHORT, JVMCI_CHECK_EXIT_((void)0));
1144     create_jvmci_primitive_type(T_INT, JVMCI_CHECK_EXIT_((void)0));
1145     create_jvmci_primitive_type(T_LONG, JVMCI_CHECK_EXIT_((void)0));
1146     create_jvmci_primitive_type(T_FLOAT, JVMCI_CHECK_EXIT_((void)0));
1147     create_jvmci_primitive_type(T_DOUBLE, JVMCI_CHECK_EXIT_((void)0));
1148     create_jvmci_primitive_type(T_VOID, JVMCI_CHECK_EXIT_((void)0));
1149 
1150     if (!JVMCIENV->is_hotspot()) {
1151       JVMCIENV->copy_saved_properties();
1152     }
1153   }
1154 
1155   _initialized = true;
1156   _being_initialized = false;
1157   JVMCI_lock->notify_all();
1158 }
1159 
1160 JVMCIObject JVMCIRuntime::create_jvmci_primitive_type(BasicType type, JVMCI_TRAPS) {
1161   Thread* THREAD = Thread::current();
1162   // These primitive types are long lived and are created before the runtime is fully set up
1163   // so skip registering them for scanning.
1164   JVMCIObject mirror = JVMCIENV->get_object_constant(java_lang_Class::primitive_mirror(type), false, true);
1165   if (JVMCIENV->is_hotspot()) {
1166     JavaValue result(T_OBJECT);
1167     JavaCallArguments args;
1168     args.push_oop(Handle(THREAD, HotSpotJVMCI::resolve(mirror)));
1169     args.push_int(type2char(type));
1170     JavaCalls::call_static(&result, HotSpotJVMCI::HotSpotResolvedPrimitiveType::klass(), vmSymbols::fromMetaspace_name(), vmSymbols::primitive_fromMetaspace_signature(), &args, CHECK_(JVMCIObject()));
1171 
1172     return JVMCIENV->wrap(JNIHandles::make_local((oop)result.get_jobject()));
1173   } else {
1174     JNIAccessMark jni(JVMCIENV);
1175     jobject result = jni()->CallStaticObjectMethod(JNIJVMCI::HotSpotResolvedPrimitiveType::clazz(),
1176                                            JNIJVMCI::HotSpotResolvedPrimitiveType_fromMetaspace_method(),
1177                                            mirror.as_jobject(), type2char(type));
1178     if (jni()->ExceptionCheck()) {
1179       return JVMCIObject();
1180     }
1181     return JVMCIENV->wrap(result);
1182   }
1183 }
1184 
1185 void JVMCIRuntime::initialize_JVMCI(JVMCI_TRAPS) {
1186   if (_HotSpotJVMCIRuntime_instance.is_null()) {
1187     initialize(JVMCI_CHECK);
1188     JVMCIENV->call_JVMCI_getRuntime(JVMCI_CHECK);
1189   }
1190 }
1191 
1192 JVMCIObject JVMCIRuntime::get_HotSpotJVMCIRuntime(JVMCI_TRAPS) {
1193   initialize(JVMCIENV);
1194   initialize_JVMCI(JVMCI_CHECK_(JVMCIObject()));
1195   return _HotSpotJVMCIRuntime_instance;
1196 }
1197 
1198 jobject JVMCI::make_global(Handle obj) {
1199   assert(_object_handles != NULL, "uninitialized");
1200   MutexLocker ml(JVMCI_lock);
1201   return _object_handles->allocate_handle(obj());
1202 }
1203 
1204 bool JVMCI::is_global_handle(jobject handle) {
1205   return _object_handles->chain_contains(handle);
1206 }
1207 
1208 jmetadata JVMCI::allocate_handle(const methodHandle& handle) {
1209   assert(_metadata_handles != NULL, "uninitialized");
1210   MutexLocker ml(JVMCI_lock);
1211   return _metadata_handles->allocate_handle(handle);
1212 }
1213 
1214 jmetadata JVMCI::allocate_handle(const constantPoolHandle& handle) {
1215   assert(_metadata_handles != NULL, "uninitialized");
1216   MutexLocker ml(JVMCI_lock);
1217   return _metadata_handles->allocate_handle(handle);
1218 }
1219 
1220 void JVMCI::release_handle(jmetadata handle) {
1221   MutexLocker ml(JVMCI_lock);
1222   _metadata_handles->chain_free_list(handle);
1223 }
1224 
1225 void JVMCI::oops_do(OopClosure* f) {
1226   if (_object_handles != NULL) {
1227     _object_handles->oops_do(f);
1228   }
1229 }
1230 
1231 void JVMCI::metadata_do(void f(Metadata*)) {
1232   if (_metadata_handles != NULL) {
1233     _metadata_handles->metadata_do(f);
1234   }
1235 }
1236 
1237 void JVMCI::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
1238   if (_metadata_handles != NULL && unloading_occurred) {
1239     _metadata_handles->do_unloading(is_alive);
1240   }
1241 }
1242 
1243 CompLevel JVMCI::adjust_comp_level(methodHandle method, bool is_osr, CompLevel level, JavaThread* thread) {
1244   return compiler_runtime()->adjust_comp_level(method, is_osr, level, thread);
1245 }
1246 
1247 
1248 // private void CompilerToVM.registerNatives()
1249 JVM_ENTRY_NO_ENV(void, JVM_RegisterJVMCINatives(JNIEnv *env, jclass c2vmClass))
1250 
1251 #ifdef _LP64
1252 #ifndef TARGET_ARCH_sparc
1253   uintptr_t heap_end = (uintptr_t) Universe::heap()->reserved_region().end();
1254   uintptr_t allocation_end = heap_end + ((uintptr_t)16) * 1024 * 1024 * 1024;
1255   guarantee(heap_end < allocation_end, "heap end too close to end of address space (might lead to erroneous TLAB allocations)");
1256 #endif // TARGET_ARCH_sparc
1257 #else
1258   fatal("check TLAB allocation code for address space conflicts");
1259 #endif
1260 
1261   JNI_JVMCIENV(env);
1262 
1263   if (!EnableJVMCI) {
1264     JVMCI_THROW_MSG(InternalError, "JVMCI is not enabled");
1265   }
1266 
1267   JVMCIENV->runtime()->initialize(JVMCIENV);
1268 
1269   {
1270     ResourceMark rm;
1271     HandleMark hm(thread);
1272     ThreadToNativeFromVM trans(thread);
1273 
1274     // Ensure _non_oop_bits is initialized
1275     Universe::non_oop_word();
1276 
1277     if (JNI_OK != env->RegisterNatives(c2vmClass, CompilerToVM::methods, CompilerToVM::methods_count())) {
1278       if (!env->ExceptionCheck()) {
1279         for (int i = 0; i < CompilerToVM::methods_count(); i++) {
1280           if (JNI_OK != env->RegisterNatives(c2vmClass, CompilerToVM::methods + i, 1)) {
1281             guarantee(false, "Error registering JNI method %s%s", CompilerToVM::methods[i].name, CompilerToVM::methods[i].signature);
1282             break;
1283           }
1284         }
1285       } else {
1286         env->ExceptionDescribe();
1287       }
1288       guarantee(false, "Failed registering CompilerToVM native methods");
1289     }
1290   }
1291 JVM_END
1292 
1293 
1294 bool JVMCI::is_compiler_initialized() {
1295   return compiler_runtime()->is_HotSpotJVMCIRuntime_initialized();
1296 }
1297 
1298 
1299 void JVMCI::shutdown() {
1300   if (compiler_runtime() != NULL) {
1301     compiler_runtime()->shutdown();
1302   }
1303 }
1304 
1305 
1306 bool JVMCI::shutdown_called() {
1307   if (compiler_runtime() != NULL) {
1308     return compiler_runtime()->shutdown_called();
1309   }
1310   return false;
1311 }
1312 
1313 
1314 void JVMCIRuntime::shutdown() {
1315   if (_HotSpotJVMCIRuntime_instance.is_non_null()) {
1316     _shutdown_called = true;
1317 
1318     THREAD_JVMCIENV(JavaThread::current());
1319     JVMCIENV->call_HotSpotJVMCIRuntime_shutdown(_HotSpotJVMCIRuntime_instance);
1320   }
1321 }
1322 
1323 void JVMCIRuntime::bootstrap_finished(TRAPS) {
1324   if (_HotSpotJVMCIRuntime_instance.is_non_null()) {
1325     THREAD_JVMCIENV(JavaThread::current());
1326     JVMCIENV->call_HotSpotJVMCIRuntime_bootstrapFinished(_HotSpotJVMCIRuntime_instance, JVMCIENV);
1327   }
1328 }
1329 CompLevel JVMCIRuntime::adjust_comp_level(methodHandle method, bool is_osr, CompLevel level, JavaThread* thread) {
1330   if (!thread->adjusting_comp_level()) {
1331     thread->set_adjusting_comp_level(true);
1332     level = adjust_comp_level_inner(method, is_osr, level, thread);
1333     thread->set_adjusting_comp_level(false);
1334   }
1335   return level;
1336 }
1337 
1338 CompLevel JVMCIRuntime::adjust_comp_level_inner(methodHandle method, bool is_osr, CompLevel level, JavaThread* thread) {
1339   JVMCICompiler* compiler = JVMCICompiler::instance(false, thread);
1340   if (compiler != NULL && compiler->is_bootstrapping()) {
1341     return level;
1342   }
1343   if (!is_HotSpotJVMCIRuntime_initialized() || _comp_level_adjustment == JVMCIRuntime::none) {
1344     // JVMCI cannot participate in compilation scheduling until
1345     // JVMCI is initialized and indicates it wants to participate.
1346     return level;
1347   }
1348 
1349   JavaThread* THREAD = JavaThread::current();
1350   ResourceMark rm;
1351   HandleMark hm;
1352 
1353 #define CHECK_RETURN JVMCIENV);                                         \
1354   if (JVMCIENV->has_pending_exception()) {                              \
1355     if (JVMCIENV->is_hotspot()) {                                       \
1356       Handle exception(THREAD, PENDING_EXCEPTION);                      \
1357       CLEAR_PENDING_EXCEPTION;                                          \
1358                                                                         \
1359       if (exception->is_a(SystemDictionary::ThreadDeath_klass())) {     \
1360         /* In the special case of ThreadDeath, we need to reset the */  \
1361         /* pending async exception so that it is propagated.         */ \
1362         thread->set_pending_async_exception(exception());               \
1363         return level;                                                   \
1364       }                                                                 \
1365       /* No need report errors while adjusting compilation level. */    \
1366       /* The most likely error will be a StackOverflowError or */       \
1367       /* an OutOfMemoryError. */                                        \
1368     } else {                                                            \
1369       JVMCIENV->clear_pending_exception();                              \
1370     }                                                                   \
1371     return level;                                                       \
1372   }                                                                     \
1373   (void)(0
1374 
1375   THREAD_JVMCIENV(thread);
1376   JVMCIObject receiver = _HotSpotJVMCIRuntime_instance;
1377   JVMCIObject name;
1378   JVMCIObject sig;
1379   if (_comp_level_adjustment == JVMCIRuntime::by_full_signature) {
1380     name = JVMCIENV->create_string(method->name(), CHECK_RETURN);
1381     sig = JVMCIENV->create_string(method->signature(), CHECK_RETURN);
1382   }
1383 
1384   int comp_level = JVMCIENV->call_HotSpotJVMCIRuntime_adjustCompilationLevel(receiver, method->method_holder(), name, sig, is_osr, level, CHECK_RETURN);
1385   if (comp_level < CompLevel_none || comp_level > CompLevel_full_optimization) {
1386     assert(false, "compilation level out of bounds");
1387     return level;
1388   }
1389   return (CompLevel) comp_level;
1390 #undef CHECK_RETURN
1391 }
1392 
1393 void JVMCIRuntime::describe_pending_hotspot_exception(JavaThread* THREAD, bool clear) {
1394   if (HAS_PENDING_EXCEPTION) {
1395     Handle exception(THREAD, PENDING_EXCEPTION);
1396     const char* exception_file = THREAD->exception_file();
1397     int exception_line = THREAD->exception_line();
1398     CLEAR_PENDING_EXCEPTION;
1399     if (exception->is_a(SystemDictionary::ThreadDeath_klass())) {
1400       // Don't print anything if we are being killed.
1401     } else {
1402       java_lang_Throwable::print(exception(), tty);
1403       tty->cr();
1404       java_lang_Throwable::print_stack_trace(exception, tty);
1405 
1406       // Clear and ignore any exceptions raised during printing
1407       CLEAR_PENDING_EXCEPTION;
1408     }
1409     if (!clear) {
1410       THREAD->set_pending_exception(exception(), exception_file, exception_line);
1411     }
1412   }
1413 }
1414 
1415 
1416 void JVMCIRuntime::exit_on_pending_exception(JVMCIEnv* JVMCIENV, const char* message) {
1417   JavaThread* THREAD = JavaThread::current();
1418 
1419   static volatile int report_error = 0;
1420   if (!report_error && Atomic::cmpxchg(1, &report_error, 0) == 0) {
1421     // Only report an error once
1422     tty->print_raw_cr(message);
1423     if (JVMCIENV != NULL) {
1424       JVMCIENV->describe_pending_exception(true);
1425     } else {
1426       describe_pending_hotspot_exception(THREAD, true);
1427     }
1428   } else {
1429     // Allow error reporting thread to print the stack trace.  Windows
1430     // doesn't allow uninterruptible wait for JavaThreads
1431     const bool interruptible = true;
1432     os::sleep(THREAD, 200, interruptible);
1433   }
1434 
1435   before_exit(THREAD);
1436   vm_exit(-1);
1437 }
1438 
1439 // ------------------------------------------------------------------
1440 // Note: the logic of this method should mirror the logic of
1441 // constantPoolOopDesc::verify_constant_pool_resolve.
1442 bool JVMCIRuntime::check_klass_accessibility(Klass* accessing_klass, Klass* resolved_klass) {
1443   if (accessing_klass->is_objArray_klass()) {
1444     accessing_klass = ObjArrayKlass::cast(accessing_klass)->bottom_klass();
1445   }
1446   if (!accessing_klass->is_instance_klass()) {
1447     return true;
1448   }
1449 
1450   if (resolved_klass->is_objArray_klass()) {
1451     // Find the element klass, if this is an array.
1452     resolved_klass = ObjArrayKlass::cast(resolved_klass)->bottom_klass();
1453   }
1454   if (resolved_klass->is_instance_klass()) {
1455     Reflection::VerifyClassAccessResults result =
1456       Reflection::verify_class_access(accessing_klass, InstanceKlass::cast(resolved_klass), true);
1457     return result == Reflection::ACCESS_OK;
1458   }
1459   return true;
1460 }
1461 
1462 // ------------------------------------------------------------------
1463 Klass* JVMCIRuntime::get_klass_by_name_impl(Klass*& accessing_klass,
1464                                           const constantPoolHandle& cpool,
1465                                           Symbol* sym,
1466                                           bool require_local) {
1467   JVMCI_EXCEPTION_CONTEXT;
1468 
1469   // Now we need to check the SystemDictionary
1470   if (sym->char_at(0) == 'L' &&
1471     sym->char_at(sym->utf8_length()-1) == ';') {
1472     // This is a name from a signature.  Strip off the trimmings.
1473     // Call recursive to keep scope of strippedsym.
1474     TempNewSymbol strippedsym = SymbolTable::new_symbol(sym->as_utf8()+1,
1475                     sym->utf8_length()-2,
1476                     CHECK_NULL);
1477     return get_klass_by_name_impl(accessing_klass, cpool, strippedsym, require_local);
1478   }
1479 
1480   Handle loader(THREAD, (oop)NULL);
1481   Handle domain(THREAD, (oop)NULL);
1482   if (accessing_klass != NULL) {
1483     loader = Handle(THREAD, accessing_klass->class_loader());
1484     domain = Handle(THREAD, accessing_klass->protection_domain());
1485   }
1486 
1487   Klass* found_klass;
1488   {
1489     ttyUnlocker ttyul;  // release tty lock to avoid ordering problems
1490     MutexLocker ml(Compile_lock);
1491     if (!require_local) {
1492       found_klass = SystemDictionary::find_constrained_instance_or_array_klass(sym, loader, CHECK_NULL);
1493     } else {
1494       found_klass = SystemDictionary::find_instance_or_array_klass(sym, loader, domain, CHECK_NULL);
1495     }
1496   }
1497 
1498   // If we fail to find an array klass, look again for its element type.
1499   // The element type may be available either locally or via constraints.
1500   // In either case, if we can find the element type in the system dictionary,
1501   // we must build an array type around it.  The CI requires array klasses
1502   // to be loaded if their element klasses are loaded, except when memory
1503   // is exhausted.
1504   if (sym->char_at(0) == '[' &&
1505       (sym->char_at(1) == '[' || sym->char_at(1) == 'L')) {
1506     // We have an unloaded array.
1507     // Build it on the fly if the element class exists.
1508     TempNewSymbol elem_sym = SymbolTable::new_symbol(sym->as_utf8()+1,
1509                                                  sym->utf8_length()-1,
1510                                                  CHECK_NULL);
1511 
1512     // Get element Klass recursively.
1513     Klass* elem_klass =
1514       get_klass_by_name_impl(accessing_klass,
1515                              cpool,
1516                              elem_sym,
1517                              require_local);
1518     if (elem_klass != NULL) {
1519       // Now make an array for it
1520       return elem_klass->array_klass(THREAD);
1521     }
1522   }
1523 
1524   if (found_klass == NULL && !cpool.is_null() && cpool->has_preresolution()) {
1525     // Look inside the constant pool for pre-resolved class entries.
1526     for (int i = cpool->length() - 1; i >= 1; i--) {
1527       if (cpool->tag_at(i).is_klass()) {
1528         Klass*  kls = cpool->resolved_klass_at(i);
1529         if (kls->name() == sym) {
1530           return kls;
1531         }
1532       }
1533     }
1534   }
1535 
1536   return found_klass;
1537 }
1538 
1539 // ------------------------------------------------------------------
1540 Klass* JVMCIRuntime::get_klass_by_name(Klass* accessing_klass,
1541                                   Symbol* klass_name,
1542                                   bool require_local) {
1543   ResourceMark rm;
1544   constantPoolHandle cpool;
1545   return get_klass_by_name_impl(accessing_klass,
1546                                                  cpool,
1547                                                  klass_name,
1548                                                  require_local);
1549 }
1550 
1551 // ------------------------------------------------------------------
1552 // Implementation of get_klass_by_index.
1553 Klass* JVMCIRuntime::get_klass_by_index_impl(const constantPoolHandle& cpool,
1554                                         int index,
1555                                         bool& is_accessible,
1556                                         Klass* accessor) {
1557   JVMCI_EXCEPTION_CONTEXT;
1558   Klass* klass = ConstantPool::klass_at_if_loaded(cpool, index);
1559   Symbol* klass_name = NULL;
1560   if (klass == NULL) {
1561     klass_name = cpool->klass_name_at(index);
1562   }
1563 
1564   if (klass == NULL) {
1565     // Not found in constant pool.  Use the name to do the lookup.
1566     Klass* k = get_klass_by_name_impl(accessor,
1567                                         cpool,
1568                                         klass_name,
1569                                         false);
1570     // Calculate accessibility the hard way.
1571     if (k == NULL) {
1572       is_accessible = false;
1573     } else if (k->class_loader() != accessor->class_loader() &&
1574                get_klass_by_name_impl(accessor, cpool, k->name(), true) == NULL) {
1575       // Loaded only remotely.  Not linked yet.
1576       is_accessible = false;
1577     } else {
1578       // Linked locally, and we must also check public/private, etc.
1579       is_accessible = check_klass_accessibility(accessor, k);
1580     }
1581     if (!is_accessible) {
1582       return NULL;
1583     }
1584     return k;
1585   }
1586 
1587   // It is known to be accessible, since it was found in the constant pool.
1588   is_accessible = true;
1589   return klass;
1590 }
1591 
1592 // ------------------------------------------------------------------
1593 // Get a klass from the constant pool.
1594 Klass* JVMCIRuntime::get_klass_by_index(const constantPoolHandle& cpool,
1595                                    int index,
1596                                    bool& is_accessible,
1597                                    Klass* accessor) {
1598   ResourceMark rm;
1599   Klass* result = get_klass_by_index_impl(cpool, index, is_accessible, accessor);
1600   return result;
1601 }
1602 
1603 // ------------------------------------------------------------------
1604 // Implementation of get_field_by_index.
1605 //
1606 // Implementation note: the results of field lookups are cached
1607 // in the accessor klass.
1608 void JVMCIRuntime::get_field_by_index_impl(InstanceKlass* klass, fieldDescriptor& field_desc,
1609                                         int index) {
1610   JVMCI_EXCEPTION_CONTEXT;
1611 
1612   assert(klass->is_linked(), "must be linked before using its constant-pool");
1613 
1614   constantPoolHandle cpool(thread, klass->constants());
1615 
1616   // Get the field's name, signature, and type.
1617   Symbol* name  = cpool->name_ref_at(index);
1618 
1619   int nt_index = cpool->name_and_type_ref_index_at(index);
1620   int sig_index = cpool->signature_ref_index_at(nt_index);
1621   Symbol* signature = cpool->symbol_at(sig_index);
1622 
1623   // Get the field's declared holder.
1624   int holder_index = cpool->klass_ref_index_at(index);
1625   bool holder_is_accessible;
1626   Klass* declared_holder = get_klass_by_index(cpool, holder_index,
1627                                                holder_is_accessible,
1628                                                klass);
1629 
1630   // The declared holder of this field may not have been loaded.
1631   // Bail out with partial field information.
1632   if (!holder_is_accessible) {
1633     return;
1634   }
1635 
1636 
1637   // Perform the field lookup.
1638   Klass*  canonical_holder =
1639     InstanceKlass::cast(declared_holder)->find_field(name, signature, &field_desc);
1640   if (canonical_holder == NULL) {
1641     return;
1642   }
1643 
1644   assert(canonical_holder == field_desc.field_holder(), "just checking");
1645 }
1646 
1647 // ------------------------------------------------------------------
1648 // Get a field by index from a klass's constant pool.
1649 void JVMCIRuntime::get_field_by_index(InstanceKlass* accessor, fieldDescriptor& fd, int index) {
1650   ResourceMark rm;
1651   return get_field_by_index_impl(accessor, fd, index);
1652 }
1653 
1654 // ------------------------------------------------------------------
1655 // Perform an appropriate method lookup based on accessor, holder,
1656 // name, signature, and bytecode.
1657 methodHandle JVMCIRuntime::lookup_method(InstanceKlass* accessor,
1658                                Klass*        holder,
1659                                Symbol*       name,
1660                                Symbol*       sig,
1661                                Bytecodes::Code bc,
1662                                constantTag   tag) {
1663   // Accessibility checks are performed in JVMCIEnv::get_method_by_index_impl().
1664   assert(check_klass_accessibility(accessor, holder), "holder not accessible");
1665 
1666   methodHandle dest_method;
1667   LinkInfo link_info(holder, name, sig, accessor, LinkInfo::needs_access_check, tag);
1668   switch (bc) {
1669   case Bytecodes::_invokestatic:
1670     dest_method =
1671       LinkResolver::resolve_static_call_or_null(link_info);
1672     break;
1673   case Bytecodes::_invokespecial:
1674     dest_method =
1675       LinkResolver::resolve_special_call_or_null(link_info);
1676     break;
1677   case Bytecodes::_invokeinterface:
1678     dest_method =
1679       LinkResolver::linktime_resolve_interface_method_or_null(link_info);
1680     break;
1681   case Bytecodes::_invokevirtual:
1682     dest_method =
1683       LinkResolver::linktime_resolve_virtual_method_or_null(link_info);
1684     break;
1685   default: ShouldNotReachHere();
1686   }
1687 
1688   return dest_method;
1689 }
1690 
1691 
1692 // ------------------------------------------------------------------
1693 methodHandle JVMCIRuntime::get_method_by_index_impl(const constantPoolHandle& cpool,
1694                                           int index, Bytecodes::Code bc,
1695                                           InstanceKlass* accessor) {
1696   if (bc == Bytecodes::_invokedynamic) {
1697     ConstantPoolCacheEntry* cpce = cpool->invokedynamic_cp_cache_entry_at(index);
1698     bool is_resolved = !cpce->is_f1_null();
1699     if (is_resolved) {
1700       // Get the invoker Method* from the constant pool.
1701       // (The appendix argument, if any, will be noted in the method's signature.)
1702       Method* adapter = cpce->f1_as_method();
1703       return methodHandle(adapter);
1704     }
1705 
1706     return NULL;
1707   }
1708 
1709   int holder_index = cpool->klass_ref_index_at(index);
1710   bool holder_is_accessible;
1711   Klass* holder = get_klass_by_index_impl(cpool, holder_index, holder_is_accessible, accessor);
1712 
1713   // Get the method's name and signature.
1714   Symbol* name_sym = cpool->name_ref_at(index);
1715   Symbol* sig_sym  = cpool->signature_ref_at(index);
1716 
1717   if (cpool->has_preresolution()
1718       || ((holder == SystemDictionary::MethodHandle_klass() || holder == SystemDictionary::VarHandle_klass()) &&
1719           MethodHandles::is_signature_polymorphic_name(holder, name_sym))) {
1720     // Short-circuit lookups for JSR 292-related call sites.
1721     // That is, do not rely only on name-based lookups, because they may fail
1722     // if the names are not resolvable in the boot class loader (7056328).
1723     switch (bc) {
1724     case Bytecodes::_invokevirtual:
1725     case Bytecodes::_invokeinterface:
1726     case Bytecodes::_invokespecial:
1727     case Bytecodes::_invokestatic:
1728       {
1729         Method* m = ConstantPool::method_at_if_loaded(cpool, index);
1730         if (m != NULL) {
1731           return m;
1732         }
1733       }
1734       break;
1735     default:
1736       break;
1737     }
1738   }
1739 
1740   if (holder_is_accessible) { // Our declared holder is loaded.
1741     constantTag tag = cpool->tag_ref_at(index);
1742     methodHandle m = lookup_method(accessor, holder, name_sym, sig_sym, bc, tag);
1743     if (!m.is_null()) {
1744       // We found the method.
1745       return m;
1746     }
1747   }
1748 
1749   // Either the declared holder was not loaded, or the method could
1750   // not be found.
1751 
1752   return NULL;
1753 }
1754 
1755 // ------------------------------------------------------------------
1756 InstanceKlass* JVMCIRuntime::get_instance_klass_for_declared_method_holder(Klass* method_holder) {
1757   // For the case of <array>.clone(), the method holder can be an ArrayKlass*
1758   // instead of an InstanceKlass*.  For that case simply pretend that the
1759   // declared holder is Object.clone since that's where the call will bottom out.
1760   if (method_holder->is_instance_klass()) {
1761     return InstanceKlass::cast(method_holder);
1762   } else if (method_holder->is_array_klass()) {
1763     return InstanceKlass::cast(SystemDictionary::Object_klass());
1764   } else {
1765     ShouldNotReachHere();
1766   }
1767   return NULL;
1768 }
1769 
1770 
1771 // ------------------------------------------------------------------
1772 methodHandle JVMCIRuntime::get_method_by_index(const constantPoolHandle& cpool,
1773                                      int index, Bytecodes::Code bc,
1774                                      InstanceKlass* accessor) {
1775   ResourceMark rm;
1776   return get_method_by_index_impl(cpool, index, bc, accessor);
1777 }
1778 
1779 // ------------------------------------------------------------------
1780 // Check for changes to the system dictionary during compilation
1781 // class loads, evolution, breakpoints
1782 JVMCI::CodeInstallResult JVMCIRuntime::validate_compile_task_dependencies(Dependencies* dependencies, JVMCICompileState* compile_state, char** failure_detail) {
1783   // If JVMTI capabilities were enabled during compile, the compilation is invalidated.
1784   if (compile_state != NULL && compile_state->jvmti_state_changed()) {
1785     *failure_detail = (char*) "Jvmti state change during compilation invalidated dependencies";
1786     return JVMCI::dependencies_failed;
1787   }
1788 
1789   // Dependencies must be checked when the system dictionary changes
1790   // or if we don't know whether it has changed (i.e., compile_state == NULL).
1791   bool counter_changed = compile_state == NULL || compile_state->system_dictionary_modification_counter() != SystemDictionary::number_of_modifications();
1792   CompileTask* task = compile_state == NULL ? NULL : compile_state->task();
1793   Dependencies::DepType result = dependencies->validate_dependencies(task, counter_changed, failure_detail);
1794   if (result == Dependencies::end_marker) {
1795     return JVMCI::ok;
1796   }
1797 
1798   if (!Dependencies::is_klass_type(result) || counter_changed) {
1799     return JVMCI::dependencies_failed;
1800   }
1801   // The dependencies were invalid at the time of installation
1802   // without any intervening modification of the system
1803   // dictionary.  That means they were invalidly constructed.
1804   return JVMCI::dependencies_invalid;
1805 }
1806 
1807 
1808 void JVMCIRuntime::compile_method(JVMCIEnv* JVMCIENV, JVMCICompiler* compiler, const methodHandle& method, int entry_bci) {
1809   JVMCI_EXCEPTION_CONTEXT
1810 
1811   JVMCICompileState* compile_state = JVMCIENV->compile_state();
1812 
1813   bool is_osr = entry_bci != InvocationEntryBci;
1814   if (compiler->is_bootstrapping() && is_osr) {
1815     // no OSR compilations during bootstrap - the compiler is just too slow at this point,
1816     // and we know that there are no endless loops
1817     compile_state->set_failure(true, "No OSR during boostrap");
1818     return;
1819   }
1820 
1821   HandleMark hm;
1822   JVMCIObject receiver = get_HotSpotJVMCIRuntime(JVMCIENV);
1823   if (JVMCIENV->has_pending_exception()) {
1824     JVMCIENV->describe_pending_exception(true);
1825     compile_state->set_failure(false, "exception getting HotSpotJVMCIRuntime object");
1826     return;
1827   }
1828   JVMCIObject jvmci_method = JVMCIENV->get_jvmci_method(method, JVMCIENV);
1829   if (JVMCIENV->has_pending_exception()) {
1830     JVMCIENV->describe_pending_exception(true);
1831     compile_state->set_failure(false, "exception getting JVMCI wrapper method");
1832     return;
1833   }
1834 
1835   JVMCIObject result_object = JVMCIENV->call_HotSpotJVMCIRuntime_compileMethod(receiver, jvmci_method, entry_bci,
1836                                                                      (jlong) compile_state, compile_state->task()->compile_id());
1837   if (!JVMCIENV->has_pending_exception()) {
1838     if (result_object.is_non_null()) {
1839       JVMCIObject failure_message = JVMCIENV->get_HotSpotCompilationRequestResult_failureMessage(result_object);
1840       if (failure_message.is_non_null()) {
1841         // Copy failure reason into resource memory first ...
1842         const char* failure_reason = JVMCIENV->as_utf8_string(failure_message);
1843         // ... and then into the C heap.
1844         failure_reason = os::strdup(failure_reason, mtCompiler);
1845         bool retryable = JVMCIENV->get_HotSpotCompilationRequestResult_retry(result_object) != 0;
1846         compile_state->set_failure(retryable, failure_reason, true);
1847       } else {
1848         if (compile_state->task()->code() == NULL) {
1849           compile_state->set_failure(true, "no nmethod produced");
1850         } else {
1851           compile_state->task()->set_num_inlined_bytecodes(JVMCIENV->get_HotSpotCompilationRequestResult_inlinedBytecodes(result_object));
1852           compiler->inc_methods_compiled();
1853         }
1854       }
1855     } else {
1856       assert(false, "JVMCICompiler.compileMethod should always return non-null");
1857     }
1858   } else {
1859     // An uncaught exception was thrown during compilation. Generally these
1860     // should be handled by the Java code in some useful way but if they leak
1861     // through to here report them instead of dying or silently ignoring them.
1862     JVMCIENV->describe_pending_exception(true);
1863     compile_state->set_failure(false, "unexpected exception thrown");
1864   }
1865   if (compiler->is_bootstrapping()) {
1866     compiler->set_bootstrap_compilation_request_handled();
1867   }
1868 }
1869 
1870 
1871 // ------------------------------------------------------------------
1872 JVMCI::CodeInstallResult JVMCIRuntime::register_method(JVMCIEnv* JVMCIENV,
1873                                 const methodHandle& method,
1874                                 nmethod*& nm,
1875                                 int entry_bci,
1876                                 CodeOffsets* offsets,
1877                                 int orig_pc_offset,
1878                                 CodeBuffer* code_buffer,
1879                                 int frame_words,
1880                                 OopMapSet* oop_map_set,
1881                                 ExceptionHandlerTable* handler_table,
1882                                 AbstractCompiler* compiler,
1883                                 DebugInformationRecorder* debug_info,
1884                                 Dependencies* dependencies,
1885                                 int compile_id,
1886                                 bool has_unsafe_access,
1887                                 bool has_wide_vector,
1888                                 JVMCIObject compiled_code,
1889                                 JVMCIObject nmethod_mirror,
1890                                 FailedSpeculation** failed_speculations,
1891                                 char* speculations,
1892                                 int speculations_len) {
1893   JVMCI_EXCEPTION_CONTEXT;
1894   nm = NULL;
1895   int comp_level = CompLevel_full_optimization;
1896   char* failure_detail = NULL;
1897 
1898   bool install_default = JVMCIENV->get_HotSpotNmethod_isDefault(nmethod_mirror) != 0;
1899   assert(JVMCIENV->isa_HotSpotNmethod(nmethod_mirror), "must be");
1900   JVMCIObject name = JVMCIENV->get_InstalledCode_name(nmethod_mirror);
1901   const char* nmethod_mirror_name = name.is_null() ? NULL : JVMCIENV->as_utf8_string(name);
1902   int nmethod_mirror_index;
1903   if (!install_default) {
1904     // Reserve or initialize mirror slot in the oops table.
1905     OopRecorder* oop_recorder = debug_info->oop_recorder();
1906     nmethod_mirror_index = oop_recorder->allocate_oop_index(nmethod_mirror.is_hotspot() ? nmethod_mirror.as_jobject() : NULL);
1907   } else {
1908     // A default HotSpotNmethod mirror is never tracked by the nmethod
1909     nmethod_mirror_index = -1;
1910   }
1911 
1912   JVMCI::CodeInstallResult result;
1913   {
1914     // To prevent compile queue updates.
1915     MutexLocker locker(MethodCompileQueue_lock, THREAD);
1916 
1917     // Prevent SystemDictionary::add_to_hierarchy from running
1918     // and invalidating our dependencies until we install this method.
1919     MutexLocker ml(Compile_lock);
1920 
1921     // Encode the dependencies now, so we can check them right away.
1922     dependencies->encode_content_bytes();
1923 
1924     // Record the dependencies for the current compile in the log
1925     if (LogCompilation) {
1926       for (Dependencies::DepStream deps(dependencies); deps.next(); ) {
1927         deps.log_dependency();
1928       }
1929     }
1930 
1931     // Check for {class loads, evolution, breakpoints} during compilation
1932     result = validate_compile_task_dependencies(dependencies, JVMCIENV->compile_state(), &failure_detail);
1933     if (result != JVMCI::ok) {
1934       // While not a true deoptimization, it is a preemptive decompile.
1935       MethodData* mdp = method()->method_data();
1936       if (mdp != NULL) {
1937         mdp->inc_decompile_count();
1938 #ifdef ASSERT
1939         if (mdp->decompile_count() > (uint)PerMethodRecompilationCutoff) {
1940           ResourceMark m;
1941           tty->print_cr("WARN: endless recompilation of %s. Method was set to not compilable.", method()->name_and_sig_as_C_string());
1942         }
1943 #endif
1944       }
1945 
1946       // All buffers in the CodeBuffer are allocated in the CodeCache.
1947       // If the code buffer is created on each compile attempt
1948       // as in C2, then it must be freed.
1949       //code_buffer->free_blob();
1950     } else {
1951       ImplicitExceptionTable implicit_tbl;
1952       nm =  nmethod::new_nmethod(method,
1953                                  compile_id,
1954                                  entry_bci,
1955                                  offsets,
1956                                  orig_pc_offset,
1957                                  debug_info, dependencies, code_buffer,
1958                                  frame_words, oop_map_set,
1959                                  handler_table, &implicit_tbl,
1960                                  compiler, comp_level,
1961                                  speculations, speculations_len,
1962                                  nmethod_mirror_index, nmethod_mirror_name, failed_speculations);
1963 
1964 
1965       // Free codeBlobs
1966       if (nm == NULL) {
1967         // The CodeCache is full.  Print out warning and disable compilation.
1968         {
1969           MutexUnlocker ml(Compile_lock);
1970           MutexUnlocker locker(MethodCompileQueue_lock);
1971           CompileBroker::handle_full_code_cache(CodeCache::get_code_blob_type(comp_level));
1972         }
1973       } else {
1974         nm->set_has_unsafe_access(has_unsafe_access);
1975         nm->set_has_wide_vectors(has_wide_vector);
1976 
1977         // Record successful registration.
1978         // (Put nm into the task handle *before* publishing to the Java heap.)
1979         if (JVMCIENV->compile_state() != NULL) {
1980           JVMCIENV->compile_state()->task()->set_code(nm);
1981         }
1982 
1983         JVMCINMethodData* data = nm->jvmci_nmethod_data();
1984         assert(data != NULL, "must be");
1985         if (install_default) {
1986           assert(!nmethod_mirror.is_hotspot() || data->get_nmethod_mirror(nm) == NULL, "must be");
1987           if (entry_bci == InvocationEntryBci) {
1988             if (TieredCompilation) {
1989               // If there is an old version we're done with it
1990               CompiledMethod* old = method->code();
1991               if (TraceMethodReplacement && old != NULL) {
1992                 ResourceMark rm;
1993                 char *method_name = method->name_and_sig_as_C_string();
1994                 tty->print_cr("Replacing method %s", method_name);
1995               }
1996               if (old != NULL ) {
1997                 old->make_not_entrant();
1998               }
1999             }
2000             if (TraceNMethodInstalls) {
2001               ResourceMark rm;
2002               char *method_name = method->name_and_sig_as_C_string();
2003               ttyLocker ttyl;
2004               tty->print_cr("Installing method (%d) %s [entry point: %p]",
2005                             comp_level,
2006                             method_name, nm->entry_point());
2007             }
2008             // Allow the code to be executed
2009             method->set_code(method, nm);
2010           } else {
2011             if (TraceNMethodInstalls ) {
2012               ResourceMark rm;
2013               char *method_name = method->name_and_sig_as_C_string();
2014               ttyLocker ttyl;
2015               tty->print_cr("Installing osr method (%d) %s @ %d",
2016                             comp_level,
2017                             method_name,
2018                             entry_bci);
2019             }
2020             InstanceKlass::cast(method->method_holder())->add_osr_nmethod(nm);
2021           }
2022         } else {
2023           assert(!nmethod_mirror.is_hotspot() || data->get_nmethod_mirror(nm) == HotSpotJVMCI::resolve(nmethod_mirror), "must be");
2024         }
2025         nm->make_in_use();
2026       }
2027       result = nm != NULL ? JVMCI::ok :JVMCI::cache_full;
2028     }
2029   }
2030 
2031   // String creation must be done outside lock
2032   if (failure_detail != NULL) {
2033     // A failure to allocate the string is silently ignored.
2034     JVMCIObject message = JVMCIENV->create_string(failure_detail, JVMCIENV);
2035     JVMCIENV->set_HotSpotCompiledNmethod_installationFailureMessage(compiled_code, message);
2036   }
2037 
2038   // JVMTI -- compiled method notification (must be done outside lock)
2039   if (nm != NULL) {
2040     nm->post_compiled_method_load_event();
2041   }
2042 
2043   return result;
2044 }