1 /*
   2  * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/codeBuffer.hpp"
  27 #include "c1/c1_CodeStubs.hpp"
  28 #include "c1/c1_Defs.hpp"
  29 #include "c1/c1_FrameMap.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_MacroAssembler.hpp"
  32 #include "c1/c1_Runtime1.hpp"
  33 #include "classfile/systemDictionary.hpp"
  34 #include "classfile/vmSymbols.hpp"
  35 #include "code/codeBlob.hpp"
  36 #include "code/compiledIC.hpp"
  37 #include "code/pcDesc.hpp"
  38 #include "code/scopeDesc.hpp"
  39 #include "code/vtableStubs.hpp"
  40 #include "compiler/disassembler.hpp"
  41 #include "gc/shared/barrierSet.hpp"
  42 #include "gc/shared/collectedHeap.hpp"
  43 #include "interpreter/bytecode.hpp"
  44 #include "interpreter/interpreter.hpp"
  45 #include "logging/log.hpp"
  46 #include "memory/allocation.inline.hpp"
  47 #include "memory/oopFactory.hpp"
  48 #include "memory/resourceArea.hpp"
  49 #include "oops/access.inline.hpp"
  50 #include "oops/objArrayOop.inline.hpp"
  51 #include "oops/objArrayKlass.hpp"
  52 #include "oops/oop.inline.hpp"
  53 #include "runtime/atomic.hpp"
  54 #include "runtime/biasedLocking.hpp"
  55 #include "runtime/compilationPolicy.hpp"
  56 #include "runtime/interfaceSupport.inline.hpp"
  57 #include "runtime/frame.inline.hpp"
  58 #include "runtime/javaCalls.hpp"
  59 #include "runtime/sharedRuntime.hpp"
  60 #include "runtime/threadCritical.hpp"
  61 #include "runtime/vframe.inline.hpp"
  62 #include "runtime/vframeArray.hpp"
  63 #include "runtime/vm_version.hpp"
  64 #include "utilities/copy.hpp"
  65 #include "utilities/events.hpp"
  66 
  67 
  68 // Implementation of StubAssembler
  69 
  70 StubAssembler::StubAssembler(CodeBuffer* code, const char * name, int stub_id) : C1_MacroAssembler(code) {
  71   _name = name;
  72   _must_gc_arguments = false;
  73   _frame_size = no_frame_size;
  74   _num_rt_args = 0;
  75   _stub_id = stub_id;
  76 }
  77 
  78 
  79 void StubAssembler::set_info(const char* name, bool must_gc_arguments) {
  80   _name = name;
  81   _must_gc_arguments = must_gc_arguments;
  82 }
  83 
  84 
  85 void StubAssembler::set_frame_size(int size) {
  86   if (_frame_size == no_frame_size) {
  87     _frame_size = size;
  88   }
  89   assert(_frame_size == size, "can't change the frame size");
  90 }
  91 
  92 
  93 void StubAssembler::set_num_rt_args(int args) {
  94   if (_num_rt_args == 0) {
  95     _num_rt_args = args;
  96   }
  97   assert(_num_rt_args == args, "can't change the number of args");
  98 }
  99 
 100 // Implementation of Runtime1
 101 
 102 CodeBlob* Runtime1::_blobs[Runtime1::number_of_ids];
 103 const char *Runtime1::_blob_names[] = {
 104   RUNTIME1_STUBS(STUB_NAME, LAST_STUB_NAME)
 105 };
 106 
 107 #ifndef PRODUCT
 108 // statistics
 109 int Runtime1::_generic_arraycopy_cnt = 0;
 110 int Runtime1::_generic_arraycopystub_cnt = 0;
 111 int Runtime1::_arraycopy_slowcase_cnt = 0;
 112 int Runtime1::_arraycopy_checkcast_cnt = 0;
 113 int Runtime1::_arraycopy_checkcast_attempt_cnt = 0;
 114 int Runtime1::_new_type_array_slowcase_cnt = 0;
 115 int Runtime1::_new_object_array_slowcase_cnt = 0;
 116 int Runtime1::_new_instance_slowcase_cnt = 0;
 117 int Runtime1::_new_multi_array_slowcase_cnt = 0;
 118 int Runtime1::_monitorenter_slowcase_cnt = 0;
 119 int Runtime1::_monitorexit_slowcase_cnt = 0;
 120 int Runtime1::_patch_code_slowcase_cnt = 0;
 121 int Runtime1::_throw_range_check_exception_count = 0;
 122 int Runtime1::_throw_index_exception_count = 0;
 123 int Runtime1::_throw_div0_exception_count = 0;
 124 int Runtime1::_throw_null_pointer_exception_count = 0;
 125 int Runtime1::_throw_class_cast_exception_count = 0;
 126 int Runtime1::_throw_incompatible_class_change_error_count = 0;
 127 int Runtime1::_throw_array_store_exception_count = 0;
 128 int Runtime1::_throw_count = 0;
 129 
 130 static int _byte_arraycopy_stub_cnt = 0;
 131 static int _short_arraycopy_stub_cnt = 0;
 132 static int _int_arraycopy_stub_cnt = 0;
 133 static int _long_arraycopy_stub_cnt = 0;
 134 static int _oop_arraycopy_stub_cnt = 0;
 135 
 136 address Runtime1::arraycopy_count_address(BasicType type) {
 137   switch (type) {
 138   case T_BOOLEAN:
 139   case T_BYTE:   return (address)&_byte_arraycopy_stub_cnt;
 140   case T_CHAR:
 141   case T_SHORT:  return (address)&_short_arraycopy_stub_cnt;
 142   case T_FLOAT:
 143   case T_INT:    return (address)&_int_arraycopy_stub_cnt;
 144   case T_DOUBLE:
 145   case T_LONG:   return (address)&_long_arraycopy_stub_cnt;
 146   case T_ARRAY:
 147   case T_OBJECT: return (address)&_oop_arraycopy_stub_cnt;
 148   default:
 149     ShouldNotReachHere();
 150     return NULL;
 151   }
 152 }
 153 
 154 
 155 #endif
 156 
 157 // Simple helper to see if the caller of a runtime stub which
 158 // entered the VM has been deoptimized
 159 
 160 static bool caller_is_deopted() {
 161   JavaThread* thread = JavaThread::current();
 162   RegisterMap reg_map(thread, false);
 163   frame runtime_frame = thread->last_frame();
 164   frame caller_frame = runtime_frame.sender(&reg_map);
 165   assert(caller_frame.is_compiled_frame(), "must be compiled");
 166   return caller_frame.is_deoptimized_frame();
 167 }
 168 
 169 // Stress deoptimization
 170 static void deopt_caller() {
 171   if ( !caller_is_deopted()) {
 172     JavaThread* thread = JavaThread::current();
 173     RegisterMap reg_map(thread, false);
 174     frame runtime_frame = thread->last_frame();
 175     frame caller_frame = runtime_frame.sender(&reg_map);
 176     Deoptimization::deoptimize_frame(thread, caller_frame.id());
 177     assert(caller_is_deopted(), "Must be deoptimized");
 178   }
 179 }
 180 
 181 
 182 void Runtime1::generate_blob_for(BufferBlob* buffer_blob, StubID id) {
 183   assert(0 <= id && id < number_of_ids, "illegal stub id");
 184   ResourceMark rm;
 185   // create code buffer for code storage
 186   CodeBuffer code(buffer_blob);
 187 
 188   OopMapSet* oop_maps;
 189   int frame_size;
 190   bool must_gc_arguments;
 191 
 192   Compilation::setup_code_buffer(&code, 0);
 193 
 194   // create assembler for code generation
 195   StubAssembler* sasm = new StubAssembler(&code, name_for(id), id);
 196   // generate code for runtime stub
 197   oop_maps = generate_code_for(id, sasm);
 198   assert(oop_maps == NULL || sasm->frame_size() != no_frame_size,
 199          "if stub has an oop map it must have a valid frame size");
 200 
 201 #ifdef ASSERT
 202   // Make sure that stubs that need oopmaps have them
 203   switch (id) {
 204     // These stubs don't need to have an oopmap
 205   case dtrace_object_alloc_id:
 206   case g1_pre_barrier_slow_id:
 207   case g1_post_barrier_slow_id:
 208   case slow_subtype_check_id:
 209   case fpu2long_stub_id:
 210   case unwind_exception_id:
 211   case counter_overflow_id:
 212 #if defined(SPARC) || defined(PPC32)
 213   case handle_exception_nofpu_id:  // Unused on sparc
 214 #endif
 215     break;
 216 
 217     // All other stubs should have oopmaps
 218   default:
 219     assert(oop_maps != NULL, "must have an oopmap");
 220   }
 221 #endif
 222 
 223   // align so printing shows nop's instead of random code at the end (SimpleStubs are aligned)
 224   sasm->align(BytesPerWord);
 225   // make sure all code is in code buffer
 226   sasm->flush();
 227 
 228   frame_size = sasm->frame_size();
 229   must_gc_arguments = sasm->must_gc_arguments();
 230   // create blob - distinguish a few special cases
 231   CodeBlob* blob = RuntimeStub::new_runtime_stub(name_for(id),
 232                                                  &code,
 233                                                  CodeOffsets::frame_never_safe,
 234                                                  frame_size,
 235                                                  oop_maps,
 236                                                  must_gc_arguments);
 237   // install blob
 238   assert(blob != NULL, "blob must exist");
 239   _blobs[id] = blob;
 240 }
 241 
 242 
 243 void Runtime1::initialize(BufferBlob* blob) {
 244   // platform-dependent initialization
 245   initialize_pd();
 246   // generate stubs
 247   for (int id = 0; id < number_of_ids; id++) generate_blob_for(blob, (StubID)id);
 248   // printing
 249 #ifndef PRODUCT
 250   if (PrintSimpleStubs) {
 251     ResourceMark rm;
 252     for (int id = 0; id < number_of_ids; id++) {
 253       _blobs[id]->print();
 254       if (_blobs[id]->oop_maps() != NULL) {
 255         _blobs[id]->oop_maps()->print();
 256       }
 257     }
 258   }
 259 #endif
 260 }
 261 
 262 
 263 CodeBlob* Runtime1::blob_for(StubID id) {
 264   assert(0 <= id && id < number_of_ids, "illegal stub id");
 265   return _blobs[id];
 266 }
 267 
 268 
 269 const char* Runtime1::name_for(StubID id) {
 270   assert(0 <= id && id < number_of_ids, "illegal stub id");
 271   return _blob_names[id];
 272 }
 273 
 274 const char* Runtime1::name_for_address(address entry) {
 275   for (int id = 0; id < number_of_ids; id++) {
 276     if (entry == entry_for((StubID)id)) return name_for((StubID)id);
 277   }
 278 
 279 #define FUNCTION_CASE(a, f) \
 280   if ((intptr_t)a == CAST_FROM_FN_PTR(intptr_t, f))  return #f
 281 
 282   FUNCTION_CASE(entry, os::javaTimeMillis);
 283   FUNCTION_CASE(entry, os::javaTimeNanos);
 284   FUNCTION_CASE(entry, SharedRuntime::OSR_migration_end);
 285   FUNCTION_CASE(entry, SharedRuntime::d2f);
 286   FUNCTION_CASE(entry, SharedRuntime::d2i);
 287   FUNCTION_CASE(entry, SharedRuntime::d2l);
 288   FUNCTION_CASE(entry, SharedRuntime::dcos);
 289   FUNCTION_CASE(entry, SharedRuntime::dexp);
 290   FUNCTION_CASE(entry, SharedRuntime::dlog);
 291   FUNCTION_CASE(entry, SharedRuntime::dlog10);
 292   FUNCTION_CASE(entry, SharedRuntime::dpow);
 293   FUNCTION_CASE(entry, SharedRuntime::drem);
 294   FUNCTION_CASE(entry, SharedRuntime::dsin);
 295   FUNCTION_CASE(entry, SharedRuntime::dtan);
 296   FUNCTION_CASE(entry, SharedRuntime::f2i);
 297   FUNCTION_CASE(entry, SharedRuntime::f2l);
 298   FUNCTION_CASE(entry, SharedRuntime::frem);
 299   FUNCTION_CASE(entry, SharedRuntime::l2d);
 300   FUNCTION_CASE(entry, SharedRuntime::l2f);
 301   FUNCTION_CASE(entry, SharedRuntime::ldiv);
 302   FUNCTION_CASE(entry, SharedRuntime::lmul);
 303   FUNCTION_CASE(entry, SharedRuntime::lrem);
 304   FUNCTION_CASE(entry, SharedRuntime::lrem);
 305   FUNCTION_CASE(entry, SharedRuntime::dtrace_method_entry);
 306   FUNCTION_CASE(entry, SharedRuntime::dtrace_method_exit);
 307   FUNCTION_CASE(entry, is_instance_of);
 308   FUNCTION_CASE(entry, trace_block_entry);
 309 #ifdef TRACE_HAVE_INTRINSICS
 310   FUNCTION_CASE(entry, TRACE_TIME_METHOD);
 311 #endif
 312   FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32());
 313   FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32C());
 314   FUNCTION_CASE(entry, StubRoutines::vectorizedMismatch());
 315   FUNCTION_CASE(entry, StubRoutines::dexp());
 316   FUNCTION_CASE(entry, StubRoutines::dlog());
 317   FUNCTION_CASE(entry, StubRoutines::dlog10());
 318   FUNCTION_CASE(entry, StubRoutines::dpow());
 319   FUNCTION_CASE(entry, StubRoutines::dsin());
 320   FUNCTION_CASE(entry, StubRoutines::dcos());
 321   FUNCTION_CASE(entry, StubRoutines::dtan());
 322 
 323 #undef FUNCTION_CASE
 324 
 325   // Soft float adds more runtime names.
 326   return pd_name_for_address(entry);
 327 }
 328 
 329 
 330 JRT_ENTRY(void, Runtime1::new_instance(JavaThread* thread, Klass* klass))
 331   NOT_PRODUCT(_new_instance_slowcase_cnt++;)
 332 
 333   assert(klass->is_klass(), "not a class");
 334   Handle holder(THREAD, klass->klass_holder()); // keep the klass alive
 335   InstanceKlass* h = InstanceKlass::cast(klass);
 336   h->check_valid_for_instantiation(true, CHECK);
 337   // make sure klass is initialized
 338   h->initialize(CHECK);
 339   // allocate instance and return via TLS
 340   oop obj = h->allocate_instance(CHECK);
 341   thread->set_vm_result(obj);
 342 JRT_END
 343 
 344 
 345 JRT_ENTRY(void, Runtime1::new_type_array(JavaThread* thread, Klass* klass, jint length))
 346   NOT_PRODUCT(_new_type_array_slowcase_cnt++;)
 347   // Note: no handle for klass needed since they are not used
 348   //       anymore after new_typeArray() and no GC can happen before.
 349   //       (This may have to change if this code changes!)
 350   assert(klass->is_klass(), "not a class");
 351   BasicType elt_type = TypeArrayKlass::cast(klass)->element_type();
 352   oop obj = oopFactory::new_typeArray(elt_type, length, CHECK);
 353   thread->set_vm_result(obj);
 354   // This is pretty rare but this runtime patch is stressful to deoptimization
 355   // if we deoptimize here so force a deopt to stress the path.
 356   if (DeoptimizeALot) {
 357     deopt_caller();
 358   }
 359 
 360 JRT_END
 361 
 362 
 363 JRT_ENTRY(void, Runtime1::new_object_array(JavaThread* thread, Klass* array_klass, jint length))
 364   NOT_PRODUCT(_new_object_array_slowcase_cnt++;)
 365 
 366   // Note: no handle for klass needed since they are not used
 367   //       anymore after new_objArray() and no GC can happen before.
 368   //       (This may have to change if this code changes!)
 369   assert(array_klass->is_klass(), "not a class");
 370   Handle holder(THREAD, array_klass->klass_holder()); // keep the klass alive
 371   Klass* elem_klass = ObjArrayKlass::cast(array_klass)->element_klass();
 372   objArrayOop obj = oopFactory::new_objArray(elem_klass, length, CHECK);
 373   thread->set_vm_result(obj);
 374   // This is pretty rare but this runtime patch is stressful to deoptimization
 375   // if we deoptimize here so force a deopt to stress the path.
 376   if (DeoptimizeALot) {
 377     deopt_caller();
 378   }
 379 JRT_END
 380 
 381 
 382 JRT_ENTRY(void, Runtime1::new_multi_array(JavaThread* thread, Klass* klass, int rank, jint* dims))
 383   NOT_PRODUCT(_new_multi_array_slowcase_cnt++;)
 384 
 385   assert(klass->is_klass(), "not a class");
 386   assert(rank >= 1, "rank must be nonzero");
 387   Handle holder(THREAD, klass->klass_holder()); // keep the klass alive
 388   oop obj = ArrayKlass::cast(klass)->multi_allocate(rank, dims, CHECK);
 389   thread->set_vm_result(obj);
 390 JRT_END
 391 
 392 
 393 JRT_ENTRY(void, Runtime1::unimplemented_entry(JavaThread* thread, StubID id))
 394   tty->print_cr("Runtime1::entry_for(%d) returned unimplemented entry point", id);
 395 JRT_END
 396 
 397 
 398 JRT_ENTRY(void, Runtime1::throw_array_store_exception(JavaThread* thread, oopDesc* obj))
 399   ResourceMark rm(thread);
 400   const char* klass_name = obj->klass()->external_name();
 401   SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_ArrayStoreException(), klass_name);
 402 JRT_END
 403 
 404 
 405 // counter_overflow() is called from within C1-compiled methods. The enclosing method is the method
 406 // associated with the top activation record. The inlinee (that is possibly included in the enclosing
 407 // method) method oop is passed as an argument. In order to do that it is embedded in the code as
 408 // a constant.
 409 static nmethod* counter_overflow_helper(JavaThread* THREAD, int branch_bci, Method* m) {
 410   nmethod* osr_nm = NULL;
 411   methodHandle method(THREAD, m);
 412 
 413   RegisterMap map(THREAD, false);
 414   frame fr =  THREAD->last_frame().sender(&map);
 415   nmethod* nm = (nmethod*) fr.cb();
 416   assert(nm!= NULL && nm->is_nmethod(), "Sanity check");
 417   methodHandle enclosing_method(THREAD, nm->method());
 418 
 419   CompLevel level = (CompLevel)nm->comp_level();
 420   int bci = InvocationEntryBci;
 421   if (branch_bci != InvocationEntryBci) {
 422     // Compute destination bci
 423     address pc = method()->code_base() + branch_bci;
 424     Bytecodes::Code branch = Bytecodes::code_at(method(), pc);
 425     int offset = 0;
 426     switch (branch) {
 427       case Bytecodes::_if_icmplt: case Bytecodes::_iflt:
 428       case Bytecodes::_if_icmpgt: case Bytecodes::_ifgt:
 429       case Bytecodes::_if_icmple: case Bytecodes::_ifle:
 430       case Bytecodes::_if_icmpge: case Bytecodes::_ifge:
 431       case Bytecodes::_if_icmpeq: case Bytecodes::_if_acmpeq: case Bytecodes::_ifeq:
 432       case Bytecodes::_if_icmpne: case Bytecodes::_if_acmpne: case Bytecodes::_ifne:
 433       case Bytecodes::_ifnull: case Bytecodes::_ifnonnull: case Bytecodes::_goto:
 434         offset = (int16_t)Bytes::get_Java_u2(pc + 1);
 435         break;
 436       case Bytecodes::_goto_w:
 437         offset = Bytes::get_Java_u4(pc + 1);
 438         break;
 439       default: ;
 440     }
 441     bci = branch_bci + offset;
 442   }
 443   assert(!HAS_PENDING_EXCEPTION, "Should not have any exceptions pending");
 444   osr_nm = CompilationPolicy::policy()->event(enclosing_method, method, branch_bci, bci, level, nm, THREAD);
 445   assert(!HAS_PENDING_EXCEPTION, "Event handler should not throw any exceptions");
 446   return osr_nm;
 447 }
 448 
 449 JRT_BLOCK_ENTRY(address, Runtime1::counter_overflow(JavaThread* thread, int bci, Method* method))
 450   nmethod* osr_nm;
 451   JRT_BLOCK
 452     osr_nm = counter_overflow_helper(thread, bci, method);
 453     if (osr_nm != NULL) {
 454       RegisterMap map(thread, false);
 455       frame fr =  thread->last_frame().sender(&map);
 456       Deoptimization::deoptimize_frame(thread, fr.id());
 457     }
 458   JRT_BLOCK_END
 459   return NULL;
 460 JRT_END
 461 
 462 extern void vm_exit(int code);
 463 
 464 // Enter this method from compiled code handler below. This is where we transition
 465 // to VM mode. This is done as a helper routine so that the method called directly
 466 // from compiled code does not have to transition to VM. This allows the entry
 467 // method to see if the nmethod that we have just looked up a handler for has
 468 // been deoptimized while we were in the vm. This simplifies the assembly code
 469 // cpu directories.
 470 //
 471 // We are entering here from exception stub (via the entry method below)
 472 // If there is a compiled exception handler in this method, we will continue there;
 473 // otherwise we will unwind the stack and continue at the caller of top frame method
 474 // Note: we enter in Java using a special JRT wrapper. This wrapper allows us to
 475 // control the area where we can allow a safepoint. After we exit the safepoint area we can
 476 // check to see if the handler we are going to return is now in a nmethod that has
 477 // been deoptimized. If that is the case we return the deopt blob
 478 // unpack_with_exception entry instead. This makes life for the exception blob easier
 479 // because making that same check and diverting is painful from assembly language.
 480 JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* thread, oopDesc* ex, address pc, nmethod*& nm))
 481   // Reset method handle flag.
 482   thread->set_is_method_handle_return(false);
 483 
 484   Handle exception(thread, ex);
 485   nm = CodeCache::find_nmethod(pc);
 486   assert(nm != NULL, "this is not an nmethod");
 487   // Adjust the pc as needed/
 488   if (nm->is_deopt_pc(pc)) {
 489     RegisterMap map(thread, false);
 490     frame exception_frame = thread->last_frame().sender(&map);
 491     // if the frame isn't deopted then pc must not correspond to the caller of last_frame
 492     assert(exception_frame.is_deoptimized_frame(), "must be deopted");
 493     pc = exception_frame.pc();
 494   }
 495 #ifdef ASSERT
 496   assert(exception.not_null(), "NULL exceptions should be handled by throw_exception");
 497   // Check that exception is a subclass of Throwable, otherwise we have a VerifyError
 498   if (!(exception->is_a(SystemDictionary::Throwable_klass()))) {
 499     if (ExitVMOnVerifyError) vm_exit(-1);
 500     ShouldNotReachHere();
 501   }
 502 #endif
 503 
 504   // Check the stack guard pages and reenable them if necessary and there is
 505   // enough space on the stack to do so.  Use fast exceptions only if the guard
 506   // pages are enabled.
 507   bool guard_pages_enabled = thread->stack_guards_enabled();
 508   if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack();
 509 
 510   if (JvmtiExport::can_post_on_exceptions()) {
 511     // To ensure correct notification of exception catches and throws
 512     // we have to deoptimize here.  If we attempted to notify the
 513     // catches and throws during this exception lookup it's possible
 514     // we could deoptimize on the way out of the VM and end back in
 515     // the interpreter at the throw site.  This would result in double
 516     // notifications since the interpreter would also notify about
 517     // these same catches and throws as it unwound the frame.
 518 
 519     RegisterMap reg_map(thread);
 520     frame stub_frame = thread->last_frame();
 521     frame caller_frame = stub_frame.sender(&reg_map);
 522 
 523     // We don't really want to deoptimize the nmethod itself since we
 524     // can actually continue in the exception handler ourselves but I
 525     // don't see an easy way to have the desired effect.
 526     Deoptimization::deoptimize_frame(thread, caller_frame.id());
 527     assert(caller_is_deopted(), "Must be deoptimized");
 528 
 529     return SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
 530   }
 531 
 532   // ExceptionCache is used only for exceptions at call sites and not for implicit exceptions
 533   if (guard_pages_enabled) {
 534     address fast_continuation = nm->handler_for_exception_and_pc(exception, pc);
 535     if (fast_continuation != NULL) {
 536       // Set flag if return address is a method handle call site.
 537       thread->set_is_method_handle_return(nm->is_method_handle_return(pc));
 538       return fast_continuation;
 539     }
 540   }
 541 
 542   // If the stack guard pages are enabled, check whether there is a handler in
 543   // the current method.  Otherwise (guard pages disabled), force an unwind and
 544   // skip the exception cache update (i.e., just leave continuation==NULL).
 545   address continuation = NULL;
 546   if (guard_pages_enabled) {
 547 
 548     // New exception handling mechanism can support inlined methods
 549     // with exception handlers since the mappings are from PC to PC
 550 
 551     // debugging support
 552     // tracing
 553     if (log_is_enabled(Info, exceptions)) {
 554       ResourceMark rm;
 555       stringStream tempst;
 556       tempst.print("compiled method <%s>\n"
 557                    " at PC" INTPTR_FORMAT " for thread " INTPTR_FORMAT,
 558                    nm->method()->print_value_string(), p2i(pc), p2i(thread));
 559       Exceptions::log_exception(exception, tempst);
 560     }
 561     // for AbortVMOnException flag
 562     Exceptions::debug_check_abort(exception);
 563 
 564     // Clear out the exception oop and pc since looking up an
 565     // exception handler can cause class loading, which might throw an
 566     // exception and those fields are expected to be clear during
 567     // normal bytecode execution.
 568     thread->clear_exception_oop_and_pc();
 569 
 570     bool recursive_exception = false;
 571     continuation = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, false, false, recursive_exception);
 572     // If an exception was thrown during exception dispatch, the exception oop may have changed
 573     thread->set_exception_oop(exception());
 574     thread->set_exception_pc(pc);
 575 
 576     // the exception cache is used only by non-implicit exceptions
 577     // Update the exception cache only when there didn't happen
 578     // another exception during the computation of the compiled
 579     // exception handler. Checking for exception oop equality is not
 580     // sufficient because some exceptions are pre-allocated and reused.
 581     if (continuation != NULL && !recursive_exception) {
 582       nm->add_handler_for_exception_and_pc(exception, pc, continuation);
 583     }
 584   }
 585 
 586   thread->set_vm_result(exception());
 587   // Set flag if return address is a method handle call site.
 588   thread->set_is_method_handle_return(nm->is_method_handle_return(pc));
 589 
 590   if (log_is_enabled(Info, exceptions)) {
 591     ResourceMark rm;
 592     log_info(exceptions)("Thread " PTR_FORMAT " continuing at PC " PTR_FORMAT
 593                          " for exception thrown at PC " PTR_FORMAT,
 594                          p2i(thread), p2i(continuation), p2i(pc));
 595   }
 596 
 597   return continuation;
 598 JRT_END
 599 
 600 // Enter this method from compiled code only if there is a Java exception handler
 601 // in the method handling the exception.
 602 // We are entering here from exception stub. We don't do a normal VM transition here.
 603 // We do it in a helper. This is so we can check to see if the nmethod we have just
 604 // searched for an exception handler has been deoptimized in the meantime.
 605 address Runtime1::exception_handler_for_pc(JavaThread* thread) {
 606   oop exception = thread->exception_oop();
 607   address pc = thread->exception_pc();
 608   // Still in Java mode
 609   DEBUG_ONLY(ResetNoHandleMark rnhm);
 610   nmethod* nm = NULL;
 611   address continuation = NULL;
 612   {
 613     // Enter VM mode by calling the helper
 614     ResetNoHandleMark rnhm;
 615     continuation = exception_handler_for_pc_helper(thread, exception, pc, nm);
 616   }
 617   // Back in JAVA, use no oops DON'T safepoint
 618 
 619   // Now check to see if the nmethod we were called from is now deoptimized.
 620   // If so we must return to the deopt blob and deoptimize the nmethod
 621   if (nm != NULL && caller_is_deopted()) {
 622     continuation = SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
 623   }
 624 
 625   assert(continuation != NULL, "no handler found");
 626   return continuation;
 627 }
 628 
 629 
 630 JRT_ENTRY(void, Runtime1::throw_range_check_exception(JavaThread* thread, int index, arrayOopDesc* a))
 631   NOT_PRODUCT(_throw_range_check_exception_count++;)
 632   JavaCallArguments args;
 633   args.push_int(index);
 634   args.push_int(a->length());
 635   THROW_ARG(vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), vmSymbols::int_int_void_signature(), &args);
 636 JRT_END
 637 
 638 
 639 JRT_ENTRY(void, Runtime1::throw_index_exception(JavaThread* thread, int index))
 640   NOT_PRODUCT(_throw_index_exception_count++;)
 641   char message[16];
 642   sprintf(message, "%d", index);
 643   SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_IndexOutOfBoundsException(), message);
 644 JRT_END
 645 
 646 
 647 JRT_ENTRY(void, Runtime1::throw_div0_exception(JavaThread* thread))
 648   NOT_PRODUCT(_throw_div0_exception_count++;)
 649   SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_ArithmeticException(), "/ by zero");
 650 JRT_END
 651 
 652 
 653 JRT_ENTRY(void, Runtime1::throw_null_pointer_exception(JavaThread* thread))
 654   NOT_PRODUCT(_throw_null_pointer_exception_count++;)
 655   SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_NullPointerException());
 656 JRT_END
 657 
 658 
 659 JRT_ENTRY(void, Runtime1::throw_class_cast_exception(JavaThread* thread, oopDesc* object))
 660   NOT_PRODUCT(_throw_class_cast_exception_count++;)
 661   ResourceMark rm(thread);
 662   char* message = SharedRuntime::generate_class_cast_message(
 663     thread, object->klass());
 664   SharedRuntime::throw_and_post_jvmti_exception(
 665     thread, vmSymbols::java_lang_ClassCastException(), message);
 666 JRT_END
 667 
 668 
 669 JRT_ENTRY(void, Runtime1::throw_incompatible_class_change_error(JavaThread* thread))
 670   NOT_PRODUCT(_throw_incompatible_class_change_error_count++;)
 671   ResourceMark rm(thread);
 672   SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_IncompatibleClassChangeError());
 673 JRT_END
 674 
 675 
 676 JRT_ENTRY_NO_ASYNC(void, Runtime1::monitorenter(JavaThread* thread, oopDesc* obj, BasicObjectLock* lock))
 677   NOT_PRODUCT(_monitorenter_slowcase_cnt++;)
 678   if (PrintBiasedLockingStatistics) {
 679     Atomic::inc(BiasedLocking::slow_path_entry_count_addr());
 680   }
 681   Handle h_obj(thread, obj);
 682   if (UseBiasedLocking) {
 683     // Retry fast entry if bias is revoked to avoid unnecessary inflation
 684     ObjectSynchronizer::fast_enter(h_obj, lock->lock(), true, CHECK);
 685   } else {
 686     if (UseFastLocking) {
 687       // When using fast locking, the compiled code has already tried the fast case
 688       assert(obj == lock->obj(), "must match");
 689       ObjectSynchronizer::slow_enter(h_obj, lock->lock(), THREAD);
 690     } else {
 691       lock->set_obj(obj);
 692       ObjectSynchronizer::fast_enter(h_obj, lock->lock(), false, THREAD);
 693     }
 694   }
 695 JRT_END
 696 
 697 
 698 JRT_LEAF(void, Runtime1::monitorexit(JavaThread* thread, BasicObjectLock* lock))
 699   NOT_PRODUCT(_monitorexit_slowcase_cnt++;)
 700   assert(thread == JavaThread::current(), "threads must correspond");
 701   assert(thread->last_Java_sp(), "last_Java_sp must be set");
 702   // monitorexit is non-blocking (leaf routine) => no exceptions can be thrown
 703   EXCEPTION_MARK;
 704 
 705   oop obj = lock->obj();
 706   assert(oopDesc::is_oop(obj), "must be NULL or an object");
 707   if (UseFastLocking) {
 708     // When using fast locking, the compiled code has already tried the fast case
 709     ObjectSynchronizer::slow_exit(obj, lock->lock(), THREAD);
 710   } else {
 711     ObjectSynchronizer::fast_exit(obj, lock->lock(), THREAD);
 712   }
 713 JRT_END
 714 
 715 // Cf. OptoRuntime::deoptimize_caller_frame
 716 JRT_ENTRY(void, Runtime1::deoptimize(JavaThread* thread, jint trap_request))
 717   // Called from within the owner thread, so no need for safepoint
 718   RegisterMap reg_map(thread, false);
 719   frame stub_frame = thread->last_frame();
 720   assert(stub_frame.is_runtime_frame(), "Sanity check");
 721   frame caller_frame = stub_frame.sender(&reg_map);
 722   nmethod* nm = caller_frame.cb()->as_nmethod_or_null();
 723   assert(nm != NULL, "Sanity check");
 724   methodHandle method(thread, nm->method());
 725   assert(nm == CodeCache::find_nmethod(caller_frame.pc()), "Should be the same");
 726   Deoptimization::DeoptAction action = Deoptimization::trap_request_action(trap_request);
 727   Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(trap_request);
 728 
 729   if (action == Deoptimization::Action_make_not_entrant) {
 730     if (nm->make_not_entrant()) {
 731       if (reason == Deoptimization::Reason_tenured) {
 732         MethodData* trap_mdo = Deoptimization::get_method_data(thread, method, true /*create_if_missing*/);
 733         if (trap_mdo != NULL) {
 734           trap_mdo->inc_tenure_traps();
 735         }
 736       }
 737     }
 738   }
 739 
 740   // Deoptimize the caller frame.
 741   Deoptimization::deoptimize_frame(thread, caller_frame.id());
 742   // Return to the now deoptimized frame.
 743 JRT_END
 744 
 745 
 746 #ifndef DEOPTIMIZE_WHEN_PATCHING
 747 
 748 static Klass* resolve_field_return_klass(const methodHandle& caller, int bci, TRAPS) {
 749   Bytecode_field field_access(caller, bci);
 750   // This can be static or non-static field access
 751   Bytecodes::Code code       = field_access.code();
 752 
 753   // We must load class, initialize class and resolve the field
 754   fieldDescriptor result; // initialize class if needed
 755   constantPoolHandle constants(THREAD, caller->constants());
 756   LinkResolver::resolve_field_access(result, constants, field_access.index(), caller, Bytecodes::java_code(code), CHECK_NULL);
 757   return result.field_holder();
 758 }
 759 
 760 
 761 //
 762 // This routine patches sites where a class wasn't loaded or
 763 // initialized at the time the code was generated.  It handles
 764 // references to classes, fields and forcing of initialization.  Most
 765 // of the cases are straightforward and involving simply forcing
 766 // resolution of a class, rewriting the instruction stream with the
 767 // needed constant and replacing the call in this function with the
 768 // patched code.  The case for static field is more complicated since
 769 // the thread which is in the process of initializing a class can
 770 // access it's static fields but other threads can't so the code
 771 // either has to deoptimize when this case is detected or execute a
 772 // check that the current thread is the initializing thread.  The
 773 // current
 774 //
 775 // Patches basically look like this:
 776 //
 777 //
 778 // patch_site: jmp patch stub     ;; will be patched
 779 // continue:   ...
 780 //             ...
 781 //             ...
 782 //             ...
 783 //
 784 // They have a stub which looks like this:
 785 //
 786 //             ;; patch body
 787 //             movl <const>, reg           (for class constants)
 788 //        <or> movl [reg1 + <const>], reg  (for field offsets)
 789 //        <or> movl reg, [reg1 + <const>]  (for field offsets)
 790 //             <being_init offset> <bytes to copy> <bytes to skip>
 791 // patch_stub: call Runtime1::patch_code (through a runtime stub)
 792 //             jmp patch_site
 793 //
 794 //
 795 // A normal patch is done by rewriting the patch body, usually a move,
 796 // and then copying it into place over top of the jmp instruction
 797 // being careful to flush caches and doing it in an MP-safe way.  The
 798 // constants following the patch body are used to find various pieces
 799 // of the patch relative to the call site for Runtime1::patch_code.
 800 // The case for getstatic and putstatic is more complicated because
 801 // getstatic and putstatic have special semantics when executing while
 802 // the class is being initialized.  getstatic/putstatic on a class
 803 // which is being_initialized may be executed by the initializing
 804 // thread but other threads have to block when they execute it.  This
 805 // is accomplished in compiled code by executing a test of the current
 806 // thread against the initializing thread of the class.  It's emitted
 807 // as boilerplate in their stub which allows the patched code to be
 808 // executed before it's copied back into the main body of the nmethod.
 809 //
 810 // being_init: get_thread(<tmp reg>
 811 //             cmpl [reg1 + <init_thread_offset>], <tmp reg>
 812 //             jne patch_stub
 813 //             movl [reg1 + <const>], reg  (for field offsets)  <or>
 814 //             movl reg, [reg1 + <const>]  (for field offsets)
 815 //             jmp continue
 816 //             <being_init offset> <bytes to copy> <bytes to skip>
 817 // patch_stub: jmp Runtim1::patch_code (through a runtime stub)
 818 //             jmp patch_site
 819 //
 820 // If the class is being initialized the patch body is rewritten and
 821 // the patch site is rewritten to jump to being_init, instead of
 822 // patch_stub.  Whenever this code is executed it checks the current
 823 // thread against the intializing thread so other threads will enter
 824 // the runtime and end up blocked waiting the class to finish
 825 // initializing inside the calls to resolve_field below.  The
 826 // initializing class will continue on it's way.  Once the class is
 827 // fully_initialized, the intializing_thread of the class becomes
 828 // NULL, so the next thread to execute this code will fail the test,
 829 // call into patch_code and complete the patching process by copying
 830 // the patch body back into the main part of the nmethod and resume
 831 // executing.
 832 //
 833 //
 834 
 835 JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_id ))
 836   NOT_PRODUCT(_patch_code_slowcase_cnt++;)
 837 
 838   ResourceMark rm(thread);
 839   RegisterMap reg_map(thread, false);
 840   frame runtime_frame = thread->last_frame();
 841   frame caller_frame = runtime_frame.sender(&reg_map);
 842 
 843   // last java frame on stack
 844   vframeStream vfst(thread, true);
 845   assert(!vfst.at_end(), "Java frame must exist");
 846 
 847   methodHandle caller_method(THREAD, vfst.method());
 848   // Note that caller_method->code() may not be same as caller_code because of OSR's
 849   // Note also that in the presence of inlining it is not guaranteed
 850   // that caller_method() == caller_code->method()
 851 
 852   int bci = vfst.bci();
 853   Bytecodes::Code code = caller_method()->java_code_at(bci);
 854 
 855   // this is used by assertions in the access_field_patching_id
 856   BasicType patch_field_type = T_ILLEGAL;
 857   bool deoptimize_for_volatile = false;
 858   bool deoptimize_for_atomic = false;
 859   int patch_field_offset = -1;
 860   Klass* init_klass = NULL; // klass needed by load_klass_patching code
 861   Klass* load_klass = NULL; // klass needed by load_klass_patching code
 862   Handle mirror(THREAD, NULL);                    // oop needed by load_mirror_patching code
 863   Handle appendix(THREAD, NULL);                  // oop needed by appendix_patching code
 864   bool load_klass_or_mirror_patch_id =
 865     (stub_id == Runtime1::load_klass_patching_id || stub_id == Runtime1::load_mirror_patching_id);
 866 
 867   if (stub_id == Runtime1::access_field_patching_id) {
 868 
 869     Bytecode_field field_access(caller_method, bci);
 870     fieldDescriptor result; // initialize class if needed
 871     Bytecodes::Code code = field_access.code();
 872     constantPoolHandle constants(THREAD, caller_method->constants());
 873     LinkResolver::resolve_field_access(result, constants, field_access.index(), caller_method, Bytecodes::java_code(code), CHECK);
 874     patch_field_offset = result.offset();
 875 
 876     // If we're patching a field which is volatile then at compile it
 877     // must not have been know to be volatile, so the generated code
 878     // isn't correct for a volatile reference.  The nmethod has to be
 879     // deoptimized so that the code can be regenerated correctly.
 880     // This check is only needed for access_field_patching since this
 881     // is the path for patching field offsets.  load_klass is only
 882     // used for patching references to oops which don't need special
 883     // handling in the volatile case.
 884 
 885     deoptimize_for_volatile = result.access_flags().is_volatile();
 886 
 887     // If we are patching a field which should be atomic, then
 888     // the generated code is not correct either, force deoptimizing.
 889     // We need to only cover T_LONG and T_DOUBLE fields, as we can
 890     // break access atomicity only for them.
 891 
 892     // Strictly speaking, the deoptimizaation on 64-bit platforms
 893     // is unnecessary, and T_LONG stores on 32-bit platforms need
 894     // to be handled by special patching code when AlwaysAtomicAccesses
 895     // becomes product feature. At this point, we are still going
 896     // for the deoptimization for consistency against volatile
 897     // accesses.
 898 
 899     patch_field_type = result.field_type();
 900     deoptimize_for_atomic = (AlwaysAtomicAccesses && (patch_field_type == T_DOUBLE || patch_field_type == T_LONG));
 901 
 902   } else if (load_klass_or_mirror_patch_id) {
 903     Klass* k = NULL;
 904     switch (code) {
 905       case Bytecodes::_putstatic:
 906       case Bytecodes::_getstatic:
 907         { Klass* klass = resolve_field_return_klass(caller_method, bci, CHECK);
 908           init_klass = klass;
 909           mirror = Handle(THREAD, klass->java_mirror());
 910         }
 911         break;
 912       case Bytecodes::_new:
 913         { Bytecode_new bnew(caller_method(), caller_method->bcp_from(bci));
 914           k = caller_method->constants()->klass_at(bnew.index(), CHECK);
 915         }
 916         break;
 917       case Bytecodes::_multianewarray:
 918         { Bytecode_multianewarray mna(caller_method(), caller_method->bcp_from(bci));
 919           k = caller_method->constants()->klass_at(mna.index(), CHECK);
 920         }
 921         break;
 922       case Bytecodes::_instanceof:
 923         { Bytecode_instanceof io(caller_method(), caller_method->bcp_from(bci));
 924           k = caller_method->constants()->klass_at(io.index(), CHECK);
 925         }
 926         break;
 927       case Bytecodes::_checkcast:
 928         { Bytecode_checkcast cc(caller_method(), caller_method->bcp_from(bci));
 929           k = caller_method->constants()->klass_at(cc.index(), CHECK);
 930         }
 931         break;
 932       case Bytecodes::_anewarray:
 933         { Bytecode_anewarray anew(caller_method(), caller_method->bcp_from(bci));
 934           Klass* ek = caller_method->constants()->klass_at(anew.index(), CHECK);
 935           k = ek->array_klass(CHECK);
 936         }
 937         break;
 938       case Bytecodes::_ldc:
 939       case Bytecodes::_ldc_w:
 940         {
 941           Bytecode_loadconstant cc(caller_method, bci);
 942           oop m = cc.resolve_constant(CHECK);
 943           mirror = Handle(THREAD, m);
 944         }
 945         break;
 946       default: fatal("unexpected bytecode for load_klass_or_mirror_patch_id");
 947     }
 948     load_klass = k;
 949   } else if (stub_id == load_appendix_patching_id) {
 950     Bytecode_invoke bytecode(caller_method, bci);
 951     Bytecodes::Code bc = bytecode.invoke_code();
 952 
 953     CallInfo info;
 954     constantPoolHandle pool(thread, caller_method->constants());
 955     int index = bytecode.index();
 956     LinkResolver::resolve_invoke(info, Handle(), pool, index, bc, CHECK);
 957     switch (bc) {
 958       case Bytecodes::_invokehandle: {
 959         int cache_index = ConstantPool::decode_cpcache_index(index, true);
 960         assert(cache_index >= 0 && cache_index < pool->cache()->length(), "unexpected cache index");
 961         ConstantPoolCacheEntry* cpce = pool->cache()->entry_at(cache_index);
 962         cpce->set_method_handle(pool, info);
 963         appendix = Handle(THREAD, cpce->appendix_if_resolved(pool)); // just in case somebody already resolved the entry
 964         break;
 965       }
 966       case Bytecodes::_invokedynamic: {
 967         ConstantPoolCacheEntry* cpce = pool->invokedynamic_cp_cache_entry_at(index);
 968         cpce->set_dynamic_call(pool, info);
 969         appendix = Handle(THREAD, cpce->appendix_if_resolved(pool)); // just in case somebody already resolved the entry
 970         break;
 971       }
 972       default: fatal("unexpected bytecode for load_appendix_patching_id");
 973     }
 974   } else {
 975     ShouldNotReachHere();
 976   }
 977 
 978   if (deoptimize_for_volatile || deoptimize_for_atomic) {
 979     // At compile time we assumed the field wasn't volatile/atomic but after
 980     // loading it turns out it was volatile/atomic so we have to throw the
 981     // compiled code out and let it be regenerated.
 982     if (TracePatching) {
 983       if (deoptimize_for_volatile) {
 984         tty->print_cr("Deoptimizing for patching volatile field reference");
 985       }
 986       if (deoptimize_for_atomic) {
 987         tty->print_cr("Deoptimizing for patching atomic field reference");
 988       }
 989     }
 990 
 991     // It's possible the nmethod was invalidated in the last
 992     // safepoint, but if it's still alive then make it not_entrant.
 993     nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
 994     if (nm != NULL) {
 995       nm->make_not_entrant();
 996     }
 997 
 998     Deoptimization::deoptimize_frame(thread, caller_frame.id());
 999 
1000     // Return to the now deoptimized frame.
1001   }
1002 
1003   // Now copy code back
1004 
1005   {
1006     MutexLockerEx ml_patch (Patching_lock, Mutex::_no_safepoint_check_flag);
1007     //
1008     // Deoptimization may have happened while we waited for the lock.
1009     // In that case we don't bother to do any patching we just return
1010     // and let the deopt happen
1011     if (!caller_is_deopted()) {
1012       NativeGeneralJump* jump = nativeGeneralJump_at(caller_frame.pc());
1013       address instr_pc = jump->jump_destination();
1014       NativeInstruction* ni = nativeInstruction_at(instr_pc);
1015       if (ni->is_jump() ) {
1016         // the jump has not been patched yet
1017         // The jump destination is slow case and therefore not part of the stubs
1018         // (stubs are only for StaticCalls)
1019 
1020         // format of buffer
1021         //    ....
1022         //    instr byte 0     <-- copy_buff
1023         //    instr byte 1
1024         //    ..
1025         //    instr byte n-1
1026         //      n
1027         //    ....             <-- call destination
1028 
1029         address stub_location = caller_frame.pc() + PatchingStub::patch_info_offset();
1030         unsigned char* byte_count = (unsigned char*) (stub_location - 1);
1031         unsigned char* byte_skip = (unsigned char*) (stub_location - 2);
1032         unsigned char* being_initialized_entry_offset = (unsigned char*) (stub_location - 3);
1033         address copy_buff = stub_location - *byte_skip - *byte_count;
1034         address being_initialized_entry = stub_location - *being_initialized_entry_offset;
1035         if (TracePatching) {
1036           ttyLocker ttyl;
1037           tty->print_cr(" Patching %s at bci %d at address " INTPTR_FORMAT "  (%s)", Bytecodes::name(code), bci,
1038                         p2i(instr_pc), (stub_id == Runtime1::access_field_patching_id) ? "field" : "klass");
1039           nmethod* caller_code = CodeCache::find_nmethod(caller_frame.pc());
1040           assert(caller_code != NULL, "nmethod not found");
1041 
1042           // NOTE we use pc() not original_pc() because we already know they are
1043           // identical otherwise we'd have never entered this block of code
1044 
1045           const ImmutableOopMap* map = caller_code->oop_map_for_return_address(caller_frame.pc());
1046           assert(map != NULL, "null check");
1047           map->print();
1048           tty->cr();
1049 
1050           Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
1051         }
1052         // depending on the code below, do_patch says whether to copy the patch body back into the nmethod
1053         bool do_patch = true;
1054         if (stub_id == Runtime1::access_field_patching_id) {
1055           // The offset may not be correct if the class was not loaded at code generation time.
1056           // Set it now.
1057           NativeMovRegMem* n_move = nativeMovRegMem_at(copy_buff);
1058           assert(n_move->offset() == 0 || (n_move->offset() == 4 && (patch_field_type == T_DOUBLE || patch_field_type == T_LONG)), "illegal offset for type");
1059           assert(patch_field_offset >= 0, "illegal offset");
1060           n_move->add_offset_in_bytes(patch_field_offset);
1061         } else if (load_klass_or_mirror_patch_id) {
1062           // If a getstatic or putstatic is referencing a klass which
1063           // isn't fully initialized, the patch body isn't copied into
1064           // place until initialization is complete.  In this case the
1065           // patch site is setup so that any threads besides the
1066           // initializing thread are forced to come into the VM and
1067           // block.
1068           do_patch = (code != Bytecodes::_getstatic && code != Bytecodes::_putstatic) ||
1069                      InstanceKlass::cast(init_klass)->is_initialized();
1070           NativeGeneralJump* jump = nativeGeneralJump_at(instr_pc);
1071           if (jump->jump_destination() == being_initialized_entry) {
1072             assert(do_patch == true, "initialization must be complete at this point");
1073           } else {
1074             // patch the instruction <move reg, klass>
1075             NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff);
1076 
1077             assert(n_copy->data() == 0 ||
1078                    n_copy->data() == (intptr_t)Universe::non_oop_word(),
1079                    "illegal init value");
1080             if (stub_id == Runtime1::load_klass_patching_id) {
1081               assert(load_klass != NULL, "klass not set");
1082               n_copy->set_data((intx) (load_klass));
1083             } else {
1084               assert(mirror() != NULL, "klass not set");
1085               // Don't need a G1 pre-barrier here since we assert above that data isn't an oop.
1086               n_copy->set_data(cast_from_oop<intx>(mirror()));
1087             }
1088 
1089             if (TracePatching) {
1090               Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
1091             }
1092           }
1093         } else if (stub_id == Runtime1::load_appendix_patching_id) {
1094           NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff);
1095           assert(n_copy->data() == 0 ||
1096                  n_copy->data() == (intptr_t)Universe::non_oop_word(),
1097                  "illegal init value");
1098           n_copy->set_data(cast_from_oop<intx>(appendix()));
1099 
1100           if (TracePatching) {
1101             Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
1102           }
1103         } else {
1104           ShouldNotReachHere();
1105         }
1106 
1107 #if defined(SPARC) || defined(PPC32)
1108         if (load_klass_or_mirror_patch_id ||
1109             stub_id == Runtime1::load_appendix_patching_id) {
1110           // Update the location in the nmethod with the proper
1111           // metadata.  When the code was generated, a NULL was stuffed
1112           // in the metadata table and that table needs to be update to
1113           // have the right value.  On intel the value is kept
1114           // directly in the instruction instead of in the metadata
1115           // table, so set_data above effectively updated the value.
1116           nmethod* nm = CodeCache::find_nmethod(instr_pc);
1117           assert(nm != NULL, "invalid nmethod_pc");
1118           RelocIterator mds(nm, copy_buff, copy_buff + 1);
1119           bool found = false;
1120           while (mds.next() && !found) {
1121             if (mds.type() == relocInfo::oop_type) {
1122               assert(stub_id == Runtime1::load_mirror_patching_id ||
1123                      stub_id == Runtime1::load_appendix_patching_id, "wrong stub id");
1124               oop_Relocation* r = mds.oop_reloc();
1125               oop* oop_adr = r->oop_addr();
1126               *oop_adr = stub_id == Runtime1::load_mirror_patching_id ? mirror() : appendix();
1127               r->fix_oop_relocation();
1128               found = true;
1129             } else if (mds.type() == relocInfo::metadata_type) {
1130               assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id");
1131               metadata_Relocation* r = mds.metadata_reloc();
1132               Metadata** metadata_adr = r->metadata_addr();
1133               *metadata_adr = load_klass;
1134               r->fix_metadata_relocation();
1135               found = true;
1136             }
1137           }
1138           assert(found, "the metadata must exist!");
1139         }
1140 #endif
1141         if (do_patch) {
1142           // replace instructions
1143           // first replace the tail, then the call
1144 #ifdef ARM
1145           if((load_klass_or_mirror_patch_id ||
1146               stub_id == Runtime1::load_appendix_patching_id) &&
1147               nativeMovConstReg_at(copy_buff)->is_pc_relative()) {
1148             nmethod* nm = CodeCache::find_nmethod(instr_pc);
1149             address addr = NULL;
1150             assert(nm != NULL, "invalid nmethod_pc");
1151             RelocIterator mds(nm, copy_buff, copy_buff + 1);
1152             while (mds.next()) {
1153               if (mds.type() == relocInfo::oop_type) {
1154                 assert(stub_id == Runtime1::load_mirror_patching_id ||
1155                        stub_id == Runtime1::load_appendix_patching_id, "wrong stub id");
1156                 oop_Relocation* r = mds.oop_reloc();
1157                 addr = (address)r->oop_addr();
1158                 break;
1159               } else if (mds.type() == relocInfo::metadata_type) {
1160                 assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id");
1161                 metadata_Relocation* r = mds.metadata_reloc();
1162                 addr = (address)r->metadata_addr();
1163                 break;
1164               }
1165             }
1166             assert(addr != NULL, "metadata relocation must exist");
1167             copy_buff -= *byte_count;
1168             NativeMovConstReg* n_copy2 = nativeMovConstReg_at(copy_buff);
1169             n_copy2->set_pc_relative_offset(addr, instr_pc);
1170           }
1171 #endif
1172 
1173           for (int i = NativeGeneralJump::instruction_size; i < *byte_count; i++) {
1174             address ptr = copy_buff + i;
1175             int a_byte = (*ptr) & 0xFF;
1176             address dst = instr_pc + i;
1177             *(unsigned char*)dst = (unsigned char) a_byte;
1178           }
1179           ICache::invalidate_range(instr_pc, *byte_count);
1180           NativeGeneralJump::replace_mt_safe(instr_pc, copy_buff);
1181 
1182           if (load_klass_or_mirror_patch_id ||
1183               stub_id == Runtime1::load_appendix_patching_id) {
1184             relocInfo::relocType rtype =
1185               (stub_id == Runtime1::load_klass_patching_id) ?
1186                                    relocInfo::metadata_type :
1187                                    relocInfo::oop_type;
1188             // update relocInfo to metadata
1189             nmethod* nm = CodeCache::find_nmethod(instr_pc);
1190             assert(nm != NULL, "invalid nmethod_pc");
1191 
1192             // The old patch site is now a move instruction so update
1193             // the reloc info so that it will get updated during
1194             // future GCs.
1195             RelocIterator iter(nm, (address)instr_pc, (address)(instr_pc + 1));
1196             relocInfo::change_reloc_info_for_address(&iter, (address) instr_pc,
1197                                                      relocInfo::none, rtype);
1198 #ifdef SPARC
1199             // Sparc takes two relocations for an metadata so update the second one.
1200             address instr_pc2 = instr_pc + NativeMovConstReg::add_offset;
1201             RelocIterator iter2(nm, instr_pc2, instr_pc2 + 1);
1202             relocInfo::change_reloc_info_for_address(&iter2, (address) instr_pc2,
1203                                                      relocInfo::none, rtype);
1204 #endif
1205 #ifdef PPC32
1206           { address instr_pc2 = instr_pc + NativeMovConstReg::lo_offset;
1207             RelocIterator iter2(nm, instr_pc2, instr_pc2 + 1);
1208             relocInfo::change_reloc_info_for_address(&iter2, (address) instr_pc2,
1209                                                      relocInfo::none, rtype);
1210           }
1211 #endif
1212           }
1213 
1214         } else {
1215           ICache::invalidate_range(copy_buff, *byte_count);
1216           NativeGeneralJump::insert_unconditional(instr_pc, being_initialized_entry);
1217         }
1218       }
1219     }
1220   }
1221 
1222   // If we are patching in a non-perm oop, make sure the nmethod
1223   // is on the right list.
1224   if (ScavengeRootsInCode) {
1225     MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag);
1226     nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1227     guarantee(nm != NULL, "only nmethods can contain non-perm oops");
1228 
1229     // Since we've patched some oops in the nmethod,
1230     // (re)register it with the heap.
1231     Universe::heap()->register_nmethod(nm);
1232   }
1233 JRT_END
1234 
1235 #else // DEOPTIMIZE_WHEN_PATCHING
1236 
1237 JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_id ))
1238   RegisterMap reg_map(thread, false);
1239 
1240   NOT_PRODUCT(_patch_code_slowcase_cnt++;)
1241   if (TracePatching) {
1242     tty->print_cr("Deoptimizing because patch is needed");
1243   }
1244 
1245   frame runtime_frame = thread->last_frame();
1246   frame caller_frame = runtime_frame.sender(&reg_map);
1247 
1248   // It's possible the nmethod was invalidated in the last
1249   // safepoint, but if it's still alive then make it not_entrant.
1250   nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1251   if (nm != NULL) {
1252     nm->make_not_entrant();
1253   }
1254 
1255   Deoptimization::deoptimize_frame(thread, caller_frame.id());
1256 
1257   // Return to the now deoptimized frame.
1258 JRT_END
1259 
1260 #endif // DEOPTIMIZE_WHEN_PATCHING
1261 
1262 //
1263 // Entry point for compiled code. We want to patch a nmethod.
1264 // We don't do a normal VM transition here because we want to
1265 // know after the patching is complete and any safepoint(s) are taken
1266 // if the calling nmethod was deoptimized. We do this by calling a
1267 // helper method which does the normal VM transition and when it
1268 // completes we can check for deoptimization. This simplifies the
1269 // assembly code in the cpu directories.
1270 //
1271 int Runtime1::move_klass_patching(JavaThread* thread) {
1272 //
1273 // NOTE: we are still in Java
1274 //
1275   Thread* THREAD = thread;
1276   debug_only(NoHandleMark nhm;)
1277   {
1278     // Enter VM mode
1279 
1280     ResetNoHandleMark rnhm;
1281     patch_code(thread, load_klass_patching_id);
1282   }
1283   // Back in JAVA, use no oops DON'T safepoint
1284 
1285   // Return true if calling code is deoptimized
1286 
1287   return caller_is_deopted();
1288 }
1289 
1290 int Runtime1::move_mirror_patching(JavaThread* thread) {
1291 //
1292 // NOTE: we are still in Java
1293 //
1294   Thread* THREAD = thread;
1295   debug_only(NoHandleMark nhm;)
1296   {
1297     // Enter VM mode
1298 
1299     ResetNoHandleMark rnhm;
1300     patch_code(thread, load_mirror_patching_id);
1301   }
1302   // Back in JAVA, use no oops DON'T safepoint
1303 
1304   // Return true if calling code is deoptimized
1305 
1306   return caller_is_deopted();
1307 }
1308 
1309 int Runtime1::move_appendix_patching(JavaThread* thread) {
1310 //
1311 // NOTE: we are still in Java
1312 //
1313   Thread* THREAD = thread;
1314   debug_only(NoHandleMark nhm;)
1315   {
1316     // Enter VM mode
1317 
1318     ResetNoHandleMark rnhm;
1319     patch_code(thread, load_appendix_patching_id);
1320   }
1321   // Back in JAVA, use no oops DON'T safepoint
1322 
1323   // Return true if calling code is deoptimized
1324 
1325   return caller_is_deopted();
1326 }
1327 //
1328 // Entry point for compiled code. We want to patch a nmethod.
1329 // We don't do a normal VM transition here because we want to
1330 // know after the patching is complete and any safepoint(s) are taken
1331 // if the calling nmethod was deoptimized. We do this by calling a
1332 // helper method which does the normal VM transition and when it
1333 // completes we can check for deoptimization. This simplifies the
1334 // assembly code in the cpu directories.
1335 //
1336 
1337 int Runtime1::access_field_patching(JavaThread* thread) {
1338 //
1339 // NOTE: we are still in Java
1340 //
1341   Thread* THREAD = thread;
1342   debug_only(NoHandleMark nhm;)
1343   {
1344     // Enter VM mode
1345 
1346     ResetNoHandleMark rnhm;
1347     patch_code(thread, access_field_patching_id);
1348   }
1349   // Back in JAVA, use no oops DON'T safepoint
1350 
1351   // Return true if calling code is deoptimized
1352 
1353   return caller_is_deopted();
1354 JRT_END
1355 
1356 
1357 JRT_LEAF(void, Runtime1::trace_block_entry(jint block_id))
1358   // for now we just print out the block id
1359   tty->print("%d ", block_id);
1360 JRT_END
1361 
1362 
1363 JRT_LEAF(int, Runtime1::is_instance_of(oopDesc* mirror, oopDesc* obj))
1364   // had to return int instead of bool, otherwise there may be a mismatch
1365   // between the C calling convention and the Java one.
1366   // e.g., on x86, GCC may clear only %al when returning a bool false, but
1367   // JVM takes the whole %eax as the return value, which may misinterpret
1368   // the return value as a boolean true.
1369 
1370   assert(mirror != NULL, "should null-check on mirror before calling");
1371   Klass* k = java_lang_Class::as_Klass(mirror);
1372   return (k != NULL && obj != NULL && obj->is_a(k)) ? 1 : 0;
1373 JRT_END
1374 
1375 JRT_ENTRY(void, Runtime1::predicate_failed_trap(JavaThread* thread))
1376   ResourceMark rm;
1377 
1378   assert(!TieredCompilation, "incompatible with tiered compilation");
1379 
1380   RegisterMap reg_map(thread, false);
1381   frame runtime_frame = thread->last_frame();
1382   frame caller_frame = runtime_frame.sender(&reg_map);
1383 
1384   nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1385   assert (nm != NULL, "no more nmethod?");
1386   nm->make_not_entrant();
1387 
1388   methodHandle m(nm->method());
1389   MethodData* mdo = m->method_data();
1390 
1391   if (mdo == NULL && !HAS_PENDING_EXCEPTION) {
1392     // Build an MDO.  Ignore errors like OutOfMemory;
1393     // that simply means we won't have an MDO to update.
1394     Method::build_interpreter_method_data(m, THREAD);
1395     if (HAS_PENDING_EXCEPTION) {
1396       assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here");
1397       CLEAR_PENDING_EXCEPTION;
1398     }
1399     mdo = m->method_data();
1400   }
1401 
1402   if (mdo != NULL) {
1403     mdo->inc_trap_count(Deoptimization::Reason_none);
1404   }
1405 
1406   if (TracePredicateFailedTraps) {
1407     stringStream ss1, ss2;
1408     vframeStream vfst(thread);
1409     methodHandle inlinee = methodHandle(vfst.method());
1410     inlinee->print_short_name(&ss1);
1411     m->print_short_name(&ss2);
1412     tty->print_cr("Predicate failed trap in method %s at bci %d inlined in %s at pc " INTPTR_FORMAT, ss1.as_string(), vfst.bci(), ss2.as_string(), p2i(caller_frame.pc()));
1413   }
1414 
1415 
1416   Deoptimization::deoptimize_frame(thread, caller_frame.id());
1417 
1418 JRT_END
1419 
1420 #ifndef PRODUCT
1421 void Runtime1::print_statistics() {
1422   tty->print_cr("C1 Runtime statistics:");
1423   tty->print_cr(" _resolve_invoke_virtual_cnt:     %d", SharedRuntime::_resolve_virtual_ctr);
1424   tty->print_cr(" _resolve_invoke_opt_virtual_cnt: %d", SharedRuntime::_resolve_opt_virtual_ctr);
1425   tty->print_cr(" _resolve_invoke_static_cnt:      %d", SharedRuntime::_resolve_static_ctr);
1426   tty->print_cr(" _handle_wrong_method_cnt:        %d", SharedRuntime::_wrong_method_ctr);
1427   tty->print_cr(" _ic_miss_cnt:                    %d", SharedRuntime::_ic_miss_ctr);
1428   tty->print_cr(" _generic_arraycopy_cnt:          %d", _generic_arraycopy_cnt);
1429   tty->print_cr(" _generic_arraycopystub_cnt:      %d", _generic_arraycopystub_cnt);
1430   tty->print_cr(" _byte_arraycopy_cnt:             %d", _byte_arraycopy_stub_cnt);
1431   tty->print_cr(" _short_arraycopy_cnt:            %d", _short_arraycopy_stub_cnt);
1432   tty->print_cr(" _int_arraycopy_cnt:              %d", _int_arraycopy_stub_cnt);
1433   tty->print_cr(" _long_arraycopy_cnt:             %d", _long_arraycopy_stub_cnt);
1434   tty->print_cr(" _oop_arraycopy_cnt:              %d", _oop_arraycopy_stub_cnt);
1435   tty->print_cr(" _arraycopy_slowcase_cnt:         %d", _arraycopy_slowcase_cnt);
1436   tty->print_cr(" _arraycopy_checkcast_cnt:        %d", _arraycopy_checkcast_cnt);
1437   tty->print_cr(" _arraycopy_checkcast_attempt_cnt:%d", _arraycopy_checkcast_attempt_cnt);
1438 
1439   tty->print_cr(" _new_type_array_slowcase_cnt:    %d", _new_type_array_slowcase_cnt);
1440   tty->print_cr(" _new_object_array_slowcase_cnt:  %d", _new_object_array_slowcase_cnt);
1441   tty->print_cr(" _new_instance_slowcase_cnt:      %d", _new_instance_slowcase_cnt);
1442   tty->print_cr(" _new_multi_array_slowcase_cnt:   %d", _new_multi_array_slowcase_cnt);
1443   tty->print_cr(" _monitorenter_slowcase_cnt:      %d", _monitorenter_slowcase_cnt);
1444   tty->print_cr(" _monitorexit_slowcase_cnt:       %d", _monitorexit_slowcase_cnt);
1445   tty->print_cr(" _patch_code_slowcase_cnt:        %d", _patch_code_slowcase_cnt);
1446 
1447   tty->print_cr(" _throw_range_check_exception_count:            %d:", _throw_range_check_exception_count);
1448   tty->print_cr(" _throw_index_exception_count:                  %d:", _throw_index_exception_count);
1449   tty->print_cr(" _throw_div0_exception_count:                   %d:", _throw_div0_exception_count);
1450   tty->print_cr(" _throw_null_pointer_exception_count:           %d:", _throw_null_pointer_exception_count);
1451   tty->print_cr(" _throw_class_cast_exception_count:             %d:", _throw_class_cast_exception_count);
1452   tty->print_cr(" _throw_incompatible_class_change_error_count:  %d:", _throw_incompatible_class_change_error_count);
1453   tty->print_cr(" _throw_array_store_exception_count:            %d:", _throw_array_store_exception_count);
1454   tty->print_cr(" _throw_count:                                  %d:", _throw_count);
1455 
1456   SharedRuntime::print_ic_miss_histogram();
1457   tty->cr();
1458 }
1459 #endif // PRODUCT