1 /*
   2  * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "incls/_precompiled.incl"
  26 #include "incls/_sharedRuntime.cpp.incl"
  27 #include <math.h>
  28 
  29 HS_DTRACE_PROBE_DECL4(hotspot, object__alloc, Thread*, char*, int, size_t);
  30 HS_DTRACE_PROBE_DECL7(hotspot, method__entry, int,
  31                       char*, int, char*, int, char*, int);
  32 HS_DTRACE_PROBE_DECL7(hotspot, method__return, int,
  33                       char*, int, char*, int, char*, int);
  34 
  35 // Implementation of SharedRuntime
  36 
  37 #ifndef PRODUCT
  38 // For statistics
  39 int SharedRuntime::_ic_miss_ctr = 0;
  40 int SharedRuntime::_wrong_method_ctr = 0;
  41 int SharedRuntime::_resolve_static_ctr = 0;
  42 int SharedRuntime::_resolve_virtual_ctr = 0;
  43 int SharedRuntime::_resolve_opt_virtual_ctr = 0;
  44 int SharedRuntime::_implicit_null_throws = 0;
  45 int SharedRuntime::_implicit_div0_throws = 0;
  46 int SharedRuntime::_throw_null_ctr = 0;
  47 
  48 int SharedRuntime::_nof_normal_calls = 0;
  49 int SharedRuntime::_nof_optimized_calls = 0;
  50 int SharedRuntime::_nof_inlined_calls = 0;
  51 int SharedRuntime::_nof_megamorphic_calls = 0;
  52 int SharedRuntime::_nof_static_calls = 0;
  53 int SharedRuntime::_nof_inlined_static_calls = 0;
  54 int SharedRuntime::_nof_interface_calls = 0;
  55 int SharedRuntime::_nof_optimized_interface_calls = 0;
  56 int SharedRuntime::_nof_inlined_interface_calls = 0;
  57 int SharedRuntime::_nof_megamorphic_interface_calls = 0;
  58 int SharedRuntime::_nof_removable_exceptions = 0;
  59 
  60 int SharedRuntime::_new_instance_ctr=0;
  61 int SharedRuntime::_new_array_ctr=0;
  62 int SharedRuntime::_multi1_ctr=0;
  63 int SharedRuntime::_multi2_ctr=0;
  64 int SharedRuntime::_multi3_ctr=0;
  65 int SharedRuntime::_multi4_ctr=0;
  66 int SharedRuntime::_multi5_ctr=0;
  67 int SharedRuntime::_mon_enter_stub_ctr=0;
  68 int SharedRuntime::_mon_exit_stub_ctr=0;
  69 int SharedRuntime::_mon_enter_ctr=0;
  70 int SharedRuntime::_mon_exit_ctr=0;
  71 int SharedRuntime::_partial_subtype_ctr=0;
  72 int SharedRuntime::_jbyte_array_copy_ctr=0;
  73 int SharedRuntime::_jshort_array_copy_ctr=0;
  74 int SharedRuntime::_jint_array_copy_ctr=0;
  75 int SharedRuntime::_jlong_array_copy_ctr=0;
  76 int SharedRuntime::_oop_array_copy_ctr=0;
  77 int SharedRuntime::_checkcast_array_copy_ctr=0;
  78 int SharedRuntime::_unsafe_array_copy_ctr=0;
  79 int SharedRuntime::_generic_array_copy_ctr=0;
  80 int SharedRuntime::_slow_array_copy_ctr=0;
  81 int SharedRuntime::_find_handler_ctr=0;
  82 int SharedRuntime::_rethrow_ctr=0;
  83 
  84 int     SharedRuntime::_ICmiss_index                    = 0;
  85 int     SharedRuntime::_ICmiss_count[SharedRuntime::maxICmiss_count];
  86 address SharedRuntime::_ICmiss_at[SharedRuntime::maxICmiss_count];
  87 
  88 void SharedRuntime::trace_ic_miss(address at) {
  89   for (int i = 0; i < _ICmiss_index; i++) {
  90     if (_ICmiss_at[i] == at) {
  91       _ICmiss_count[i]++;
  92       return;
  93     }
  94   }
  95   int index = _ICmiss_index++;
  96   if (_ICmiss_index >= maxICmiss_count) _ICmiss_index = maxICmiss_count - 1;
  97   _ICmiss_at[index] = at;
  98   _ICmiss_count[index] = 1;
  99 }
 100 
 101 void SharedRuntime::print_ic_miss_histogram() {
 102   if (ICMissHistogram) {
 103     tty->print_cr ("IC Miss Histogram:");
 104     int tot_misses = 0;
 105     for (int i = 0; i < _ICmiss_index; i++) {
 106       tty->print_cr("  at: " INTPTR_FORMAT "  nof: %d", _ICmiss_at[i], _ICmiss_count[i]);
 107       tot_misses += _ICmiss_count[i];
 108     }
 109     tty->print_cr ("Total IC misses: %7d", tot_misses);
 110   }
 111 }
 112 #endif // PRODUCT
 113 
 114 #ifndef SERIALGC
 115 
 116 // G1 write-barrier pre: executed before a pointer store.
 117 JRT_LEAF(void, SharedRuntime::g1_wb_pre(oopDesc* orig, JavaThread *thread))
 118   if (orig == NULL) {
 119     assert(false, "should be optimized out");
 120     return;
 121   }
 122   assert(orig->is_oop(true /* ignore mark word */), "Error");
 123   // store the original value that was in the field reference
 124   thread->satb_mark_queue().enqueue(orig);
 125 JRT_END
 126 
 127 // G1 write-barrier post: executed after a pointer store.
 128 JRT_LEAF(void, SharedRuntime::g1_wb_post(void* card_addr, JavaThread* thread))
 129   thread->dirty_card_queue().enqueue(card_addr);
 130 JRT_END
 131 
 132 #endif // !SERIALGC
 133 
 134 
 135 JRT_LEAF(jlong, SharedRuntime::lmul(jlong y, jlong x))
 136   return x * y;
 137 JRT_END
 138 
 139 
 140 JRT_LEAF(jlong, SharedRuntime::ldiv(jlong y, jlong x))
 141   if (x == min_jlong && y == CONST64(-1)) {
 142     return x;
 143   } else {
 144     return x / y;
 145   }
 146 JRT_END
 147 
 148 
 149 JRT_LEAF(jlong, SharedRuntime::lrem(jlong y, jlong x))
 150   if (x == min_jlong && y == CONST64(-1)) {
 151     return 0;
 152   } else {
 153     return x % y;
 154   }
 155 JRT_END
 156 
 157 
 158 const juint  float_sign_mask  = 0x7FFFFFFF;
 159 const juint  float_infinity   = 0x7F800000;
 160 const julong double_sign_mask = CONST64(0x7FFFFFFFFFFFFFFF);
 161 const julong double_infinity  = CONST64(0x7FF0000000000000);
 162 
 163 JRT_LEAF(jfloat, SharedRuntime::frem(jfloat  x, jfloat  y))
 164 #ifdef _WIN64
 165   // 64-bit Windows on amd64 returns the wrong values for
 166   // infinity operands.
 167   union { jfloat f; juint i; } xbits, ybits;
 168   xbits.f = x;
 169   ybits.f = y;
 170   // x Mod Infinity == x unless x is infinity
 171   if ( ((xbits.i & float_sign_mask) != float_infinity) &&
 172        ((ybits.i & float_sign_mask) == float_infinity) ) {
 173     return x;
 174   }
 175 #endif
 176   return ((jfloat)fmod((double)x,(double)y));
 177 JRT_END
 178 
 179 
 180 JRT_LEAF(jdouble, SharedRuntime::drem(jdouble x, jdouble y))
 181 #ifdef _WIN64
 182   union { jdouble d; julong l; } xbits, ybits;
 183   xbits.d = x;
 184   ybits.d = y;
 185   // x Mod Infinity == x unless x is infinity
 186   if ( ((xbits.l & double_sign_mask) != double_infinity) &&
 187        ((ybits.l & double_sign_mask) == double_infinity) ) {
 188     return x;
 189   }
 190 #endif
 191   return ((jdouble)fmod((double)x,(double)y));
 192 JRT_END
 193 
 194 #ifdef __SOFTFP__
 195 JRT_LEAF(jfloat, SharedRuntime::fadd(jfloat x, jfloat y))
 196   return x + y;
 197 JRT_END
 198 
 199 JRT_LEAF(jfloat, SharedRuntime::fsub(jfloat x, jfloat y))
 200   return x - y;
 201 JRT_END
 202 
 203 JRT_LEAF(jfloat, SharedRuntime::fmul(jfloat x, jfloat y))
 204   return x * y;
 205 JRT_END
 206 
 207 JRT_LEAF(jfloat, SharedRuntime::fdiv(jfloat x, jfloat y))
 208   return x / y;
 209 JRT_END
 210 
 211 JRT_LEAF(jdouble, SharedRuntime::dadd(jdouble x, jdouble y))
 212   return x + y;
 213 JRT_END
 214 
 215 JRT_LEAF(jdouble, SharedRuntime::dsub(jdouble x, jdouble y))
 216   return x - y;
 217 JRT_END
 218 
 219 JRT_LEAF(jdouble, SharedRuntime::dmul(jdouble x, jdouble y))
 220   return x * y;
 221 JRT_END
 222 
 223 JRT_LEAF(jdouble, SharedRuntime::ddiv(jdouble x, jdouble y))
 224   return x / y;
 225 JRT_END
 226 
 227 JRT_LEAF(jfloat, SharedRuntime::i2f(jint x))
 228   return (jfloat)x;
 229 JRT_END
 230 
 231 JRT_LEAF(jdouble, SharedRuntime::i2d(jint x))
 232   return (jdouble)x;
 233 JRT_END
 234 
 235 JRT_LEAF(jdouble, SharedRuntime::f2d(jfloat x))
 236   return (jdouble)x;
 237 JRT_END
 238 
 239 JRT_LEAF(int,  SharedRuntime::fcmpl(float x, float y))
 240   return x>y ? 1 : (x==y ? 0 : -1);  /* x<y or is_nan*/
 241 JRT_END
 242 
 243 JRT_LEAF(int,  SharedRuntime::fcmpg(float x, float y))
 244   return x<y ? -1 : (x==y ? 0 : 1);  /* x>y or is_nan */
 245 JRT_END
 246 
 247 JRT_LEAF(int,  SharedRuntime::dcmpl(double x, double y))
 248   return x>y ? 1 : (x==y ? 0 : -1); /* x<y or is_nan */
 249 JRT_END
 250 
 251 JRT_LEAF(int,  SharedRuntime::dcmpg(double x, double y))
 252   return x<y ? -1 : (x==y ? 0 : 1);  /* x>y or is_nan */
 253 JRT_END
 254 
 255 // Functions to return the opposite of the aeabi functions for nan.
 256 JRT_LEAF(int, SharedRuntime::unordered_fcmplt(float x, float y))
 257   return (x < y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
 258 JRT_END
 259 
 260 JRT_LEAF(int, SharedRuntime::unordered_dcmplt(double x, double y))
 261   return (x < y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
 262 JRT_END
 263 
 264 JRT_LEAF(int, SharedRuntime::unordered_fcmple(float x, float y))
 265   return (x <= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
 266 JRT_END
 267 
 268 JRT_LEAF(int, SharedRuntime::unordered_dcmple(double x, double y))
 269   return (x <= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
 270 JRT_END
 271 
 272 JRT_LEAF(int, SharedRuntime::unordered_fcmpge(float x, float y))
 273   return (x >= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
 274 JRT_END
 275 
 276 JRT_LEAF(int, SharedRuntime::unordered_dcmpge(double x, double y))
 277   return (x >= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
 278 JRT_END
 279 
 280 JRT_LEAF(int, SharedRuntime::unordered_fcmpgt(float x, float y))
 281   return (x > y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
 282 JRT_END
 283 
 284 JRT_LEAF(int, SharedRuntime::unordered_dcmpgt(double x, double y))
 285   return (x > y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
 286 JRT_END
 287 
 288 // Intrinsics make gcc generate code for these.
 289 float  SharedRuntime::fneg(float f)   {
 290   return -f;
 291 }
 292 
 293 double SharedRuntime::dneg(double f)  {
 294   return -f;
 295 }
 296 
 297 #endif // __SOFTFP__
 298 
 299 #if defined(__SOFTFP__) || defined(E500V2)
 300 // Intrinsics make gcc generate code for these.
 301 double SharedRuntime::dabs(double f)  {
 302   return (f <= (double)0.0) ? (double)0.0 - f : f;
 303 }
 304 
 305 double SharedRuntime::dsqrt(double f) {
 306   return sqrt(f);
 307 }
 308 #endif
 309 
 310 JRT_LEAF(jint, SharedRuntime::f2i(jfloat  x))
 311   if (g_isnan(x))
 312     return 0;
 313   if (x >= (jfloat) max_jint)
 314     return max_jint;
 315   if (x <= (jfloat) min_jint)
 316     return min_jint;
 317   return (jint) x;
 318 JRT_END
 319 
 320 
 321 JRT_LEAF(jlong, SharedRuntime::f2l(jfloat  x))
 322   if (g_isnan(x))
 323     return 0;
 324   if (x >= (jfloat) max_jlong)
 325     return max_jlong;
 326   if (x <= (jfloat) min_jlong)
 327     return min_jlong;
 328   return (jlong) x;
 329 JRT_END
 330 
 331 
 332 JRT_LEAF(jint, SharedRuntime::d2i(jdouble x))
 333   if (g_isnan(x))
 334     return 0;
 335   if (x >= (jdouble) max_jint)
 336     return max_jint;
 337   if (x <= (jdouble) min_jint)
 338     return min_jint;
 339   return (jint) x;
 340 JRT_END
 341 
 342 
 343 JRT_LEAF(jlong, SharedRuntime::d2l(jdouble x))
 344   if (g_isnan(x))
 345     return 0;
 346   if (x >= (jdouble) max_jlong)
 347     return max_jlong;
 348   if (x <= (jdouble) min_jlong)
 349     return min_jlong;
 350   return (jlong) x;
 351 JRT_END
 352 
 353 
 354 JRT_LEAF(jfloat, SharedRuntime::d2f(jdouble x))
 355   return (jfloat)x;
 356 JRT_END
 357 
 358 
 359 JRT_LEAF(jfloat, SharedRuntime::l2f(jlong x))
 360   return (jfloat)x;
 361 JRT_END
 362 
 363 
 364 JRT_LEAF(jdouble, SharedRuntime::l2d(jlong x))
 365   return (jdouble)x;
 366 JRT_END
 367 
 368 // Exception handling accross interpreter/compiler boundaries
 369 //
 370 // exception_handler_for_return_address(...) returns the continuation address.
 371 // The continuation address is the entry point of the exception handler of the
 372 // previous frame depending on the return address.
 373 
 374 address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* thread, address return_address) {
 375   assert(frame::verify_return_pc(return_address), "must be a return pc");
 376 
 377   // Reset MethodHandle flag.
 378   thread->set_is_method_handle_return(false);
 379 
 380   // the fastest case first
 381   CodeBlob* blob = CodeCache::find_blob(return_address);
 382   if (blob != NULL && blob->is_nmethod()) {
 383     nmethod* code = (nmethod*)blob;
 384     assert(code != NULL, "nmethod must be present");
 385     // Check if the return address is a MethodHandle call site.
 386     thread->set_is_method_handle_return(code->is_method_handle_return(return_address));
 387     // native nmethods don't have exception handlers
 388     assert(!code->is_native_method(), "no exception handler");
 389     assert(code->header_begin() != code->exception_begin(), "no exception handler");
 390     if (code->is_deopt_pc(return_address)) {
 391       return SharedRuntime::deopt_blob()->unpack_with_exception();
 392     } else {
 393       return code->exception_begin();
 394     }
 395   }
 396 
 397   // Entry code
 398   if (StubRoutines::returns_to_call_stub(return_address)) {
 399     return StubRoutines::catch_exception_entry();
 400   }
 401   // Interpreted code
 402   if (Interpreter::contains(return_address)) {
 403     return Interpreter::rethrow_exception_entry();
 404   }
 405 
 406   // Compiled code
 407   if (CodeCache::contains(return_address)) {
 408     CodeBlob* blob = CodeCache::find_blob(return_address);
 409     if (blob->is_nmethod()) {
 410       nmethod* code = (nmethod*)blob;
 411       assert(code != NULL, "nmethod must be present");
 412       // Check if the return address is a MethodHandle call site.
 413       thread->set_is_method_handle_return(code->is_method_handle_return(return_address));
 414       assert(code->header_begin() != code->exception_begin(), "no exception handler");
 415       return code->exception_begin();
 416     }
 417     if (blob->is_runtime_stub()) {
 418       ShouldNotReachHere();   // callers are responsible for skipping runtime stub frames
 419     }
 420   }
 421   guarantee(!VtableStubs::contains(return_address), "NULL exceptions in vtables should have been handled already!");
 422 #ifndef PRODUCT
 423   { ResourceMark rm;
 424     tty->print_cr("No exception handler found for exception at " INTPTR_FORMAT " - potential problems:", return_address);
 425     tty->print_cr("a) exception happened in (new?) code stubs/buffers that is not handled here");
 426     tty->print_cr("b) other problem");
 427   }
 428 #endif // PRODUCT
 429   ShouldNotReachHere();
 430   return NULL;
 431 }
 432 
 433 
 434 JRT_LEAF(address, SharedRuntime::exception_handler_for_return_address(JavaThread* thread, address return_address))
 435   return raw_exception_handler_for_return_address(thread, return_address);
 436 JRT_END
 437 
 438 
 439 address SharedRuntime::get_poll_stub(address pc) {
 440   address stub;
 441   // Look up the code blob
 442   CodeBlob *cb = CodeCache::find_blob(pc);
 443 
 444   // Should be an nmethod
 445   assert( cb && cb->is_nmethod(), "safepoint polling: pc must refer to an nmethod" );
 446 
 447   // Look up the relocation information
 448   assert( ((nmethod*)cb)->is_at_poll_or_poll_return(pc),
 449     "safepoint polling: type must be poll" );
 450 
 451   assert( ((NativeInstruction*)pc)->is_safepoint_poll(),
 452     "Only polling locations are used for safepoint");
 453 
 454   bool at_poll_return = ((nmethod*)cb)->is_at_poll_return(pc);
 455   if (at_poll_return) {
 456     assert(SharedRuntime::polling_page_return_handler_blob() != NULL,
 457            "polling page return stub not created yet");
 458     stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
 459   } else {
 460     assert(SharedRuntime::polling_page_safepoint_handler_blob() != NULL,
 461            "polling page safepoint stub not created yet");
 462     stub = SharedRuntime::polling_page_safepoint_handler_blob()->entry_point();
 463   }
 464 #ifndef PRODUCT
 465   if( TraceSafepoint ) {
 466     char buf[256];
 467     jio_snprintf(buf, sizeof(buf),
 468                  "... found polling page %s exception at pc = "
 469                  INTPTR_FORMAT ", stub =" INTPTR_FORMAT,
 470                  at_poll_return ? "return" : "loop",
 471                  (intptr_t)pc, (intptr_t)stub);
 472     tty->print_raw_cr(buf);
 473   }
 474 #endif // PRODUCT
 475   return stub;
 476 }
 477 
 478 
 479 oop SharedRuntime::retrieve_receiver( symbolHandle sig, frame caller ) {
 480   assert(caller.is_interpreted_frame(), "");
 481   int args_size = ArgumentSizeComputer(sig).size() + 1;
 482   assert(args_size <= caller.interpreter_frame_expression_stack_size(), "receiver must be on interpreter stack");
 483   oop result = (oop) *caller.interpreter_frame_tos_at(args_size - 1);
 484   assert(Universe::heap()->is_in(result) && result->is_oop(), "receiver must be an oop");
 485   return result;
 486 }
 487 
 488 
 489 void SharedRuntime::throw_and_post_jvmti_exception(JavaThread *thread, Handle h_exception) {
 490   if (JvmtiExport::can_post_on_exceptions()) {
 491     vframeStream vfst(thread, true);
 492     methodHandle method = methodHandle(thread, vfst.method());
 493     address bcp = method()->bcp_from(vfst.bci());
 494     JvmtiExport::post_exception_throw(thread, method(), bcp, h_exception());
 495   }
 496   Exceptions::_throw(thread, __FILE__, __LINE__, h_exception);
 497 }
 498 
 499 void SharedRuntime::throw_and_post_jvmti_exception(JavaThread *thread, symbolOop name, const char *message) {
 500   Handle h_exception = Exceptions::new_exception(thread, name, message);
 501   throw_and_post_jvmti_exception(thread, h_exception);
 502 }
 503 
 504 // The interpreter code to call this tracing function is only
 505 // called/generated when TraceRedefineClasses has the right bits
 506 // set. Since obsolete methods are never compiled, we don't have
 507 // to modify the compilers to generate calls to this function.
 508 //
 509 JRT_LEAF(int, SharedRuntime::rc_trace_method_entry(
 510     JavaThread* thread, methodOopDesc* method))
 511   assert(RC_TRACE_IN_RANGE(0x00001000, 0x00002000), "wrong call");
 512 
 513   if (method->is_obsolete()) {
 514     // We are calling an obsolete method, but this is not necessarily
 515     // an error. Our method could have been redefined just after we
 516     // fetched the methodOop from the constant pool.
 517 
 518     // RC_TRACE macro has an embedded ResourceMark
 519     RC_TRACE_WITH_THREAD(0x00001000, thread,
 520                          ("calling obsolete method '%s'",
 521                           method->name_and_sig_as_C_string()));
 522     if (RC_TRACE_ENABLED(0x00002000)) {
 523       // this option is provided to debug calls to obsolete methods
 524       guarantee(false, "faulting at call to an obsolete method.");
 525     }
 526   }
 527   return 0;
 528 JRT_END
 529 
 530 // ret_pc points into caller; we are returning caller's exception handler
 531 // for given exception
 532 address SharedRuntime::compute_compiled_exc_handler(nmethod* nm, address ret_pc, Handle& exception,
 533                                                     bool force_unwind, bool top_frame_only) {
 534   assert(nm != NULL, "must exist");
 535   ResourceMark rm;
 536 
 537   ScopeDesc* sd = nm->scope_desc_at(ret_pc);
 538   // determine handler bci, if any
 539   EXCEPTION_MARK;
 540 
 541   int handler_bci = -1;
 542   int scope_depth = 0;
 543   if (!force_unwind) {
 544     int bci = sd->bci();
 545     do {
 546       bool skip_scope_increment = false;
 547       // exception handler lookup
 548       KlassHandle ek (THREAD, exception->klass());
 549       handler_bci = sd->method()->fast_exception_handler_bci_for(ek, bci, THREAD);
 550       if (HAS_PENDING_EXCEPTION) {
 551         // We threw an exception while trying to find the exception handler.
 552         // Transfer the new exception to the exception handle which will
 553         // be set into thread local storage, and do another lookup for an
 554         // exception handler for this exception, this time starting at the
 555         // BCI of the exception handler which caused the exception to be
 556         // thrown (bugs 4307310 and 4546590). Set "exception" reference
 557         // argument to ensure that the correct exception is thrown (4870175).
 558         exception = Handle(THREAD, PENDING_EXCEPTION);
 559         CLEAR_PENDING_EXCEPTION;
 560         if (handler_bci >= 0) {
 561           bci = handler_bci;
 562           handler_bci = -1;
 563           skip_scope_increment = true;
 564         }
 565       }
 566       if (!top_frame_only && handler_bci < 0 && !skip_scope_increment) {
 567         sd = sd->sender();
 568         if (sd != NULL) {
 569           bci = sd->bci();
 570         }
 571         ++scope_depth;
 572       }
 573     } while (!top_frame_only && handler_bci < 0 && sd != NULL);
 574   }
 575 
 576   // found handling method => lookup exception handler
 577   int catch_pco = ret_pc - nm->code_begin();
 578 
 579   ExceptionHandlerTable table(nm);
 580   HandlerTableEntry *t = table.entry_for(catch_pco, handler_bci, scope_depth);
 581   if (t == NULL && (nm->is_compiled_by_c1() || handler_bci != -1)) {
 582     // Allow abbreviated catch tables.  The idea is to allow a method
 583     // to materialize its exceptions without committing to the exact
 584     // routing of exceptions.  In particular this is needed for adding
 585     // a synthethic handler to unlock monitors when inlining
 586     // synchonized methods since the unlock path isn't represented in
 587     // the bytecodes.
 588     t = table.entry_for(catch_pco, -1, 0);
 589   }
 590 
 591 #ifdef COMPILER1
 592   if (t == NULL && nm->is_compiled_by_c1()) {
 593     assert(nm->unwind_handler_begin() != NULL, "");
 594     return nm->unwind_handler_begin();
 595   }
 596 #endif
 597 
 598   if (t == NULL) {
 599     tty->print_cr("MISSING EXCEPTION HANDLER for pc " INTPTR_FORMAT " and handler bci %d", ret_pc, handler_bci);
 600     tty->print_cr("   Exception:");
 601     exception->print();
 602     tty->cr();
 603     tty->print_cr(" Compiled exception table :");
 604     table.print();
 605     nm->print_code();
 606     guarantee(false, "missing exception handler");
 607     return NULL;
 608   }
 609 
 610   return nm->code_begin() + t->pco();
 611 }
 612 
 613 JRT_ENTRY(void, SharedRuntime::throw_AbstractMethodError(JavaThread* thread))
 614   // These errors occur only at call sites
 615   throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_AbstractMethodError());
 616 JRT_END
 617 
 618 JRT_ENTRY(void, SharedRuntime::throw_IncompatibleClassChangeError(JavaThread* thread))
 619   // These errors occur only at call sites
 620   throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_IncompatibleClassChangeError(), "vtable stub");
 621 JRT_END
 622 
 623 JRT_ENTRY(void, SharedRuntime::throw_ArithmeticException(JavaThread* thread))
 624   throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_ArithmeticException(), "/ by zero");
 625 JRT_END
 626 
 627 JRT_ENTRY(void, SharedRuntime::throw_NullPointerException(JavaThread* thread))
 628   throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_NullPointerException());
 629 JRT_END
 630 
 631 JRT_ENTRY(void, SharedRuntime::throw_NullPointerException_at_call(JavaThread* thread))
 632   // This entry point is effectively only used for NullPointerExceptions which occur at inline
 633   // cache sites (when the callee activation is not yet set up) so we are at a call site
 634   throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_NullPointerException());
 635 JRT_END
 636 
 637 JRT_ENTRY(void, SharedRuntime::throw_StackOverflowError(JavaThread* thread))
 638   // We avoid using the normal exception construction in this case because
 639   // it performs an upcall to Java, and we're already out of stack space.
 640   klassOop k = SystemDictionary::StackOverflowError_klass();
 641   oop exception_oop = instanceKlass::cast(k)->allocate_instance(CHECK);
 642   Handle exception (thread, exception_oop);
 643   if (StackTraceInThrowable) {
 644     java_lang_Throwable::fill_in_stack_trace(exception);
 645   }
 646   throw_and_post_jvmti_exception(thread, exception);
 647 JRT_END
 648 
 649 address SharedRuntime::continuation_for_implicit_exception(JavaThread* thread,
 650                                                            address pc,
 651                                                            SharedRuntime::ImplicitExceptionKind exception_kind)
 652 {
 653   address target_pc = NULL;
 654 
 655   if (Interpreter::contains(pc)) {
 656 #ifdef CC_INTERP
 657     // C++ interpreter doesn't throw implicit exceptions
 658     ShouldNotReachHere();
 659 #else
 660     switch (exception_kind) {
 661       case IMPLICIT_NULL:           return Interpreter::throw_NullPointerException_entry();
 662       case IMPLICIT_DIVIDE_BY_ZERO: return Interpreter::throw_ArithmeticException_entry();
 663       case STACK_OVERFLOW:          return Interpreter::throw_StackOverflowError_entry();
 664       default:                      ShouldNotReachHere();
 665     }
 666 #endif // !CC_INTERP
 667   } else {
 668     switch (exception_kind) {
 669       case STACK_OVERFLOW: {
 670         // Stack overflow only occurs upon frame setup; the callee is
 671         // going to be unwound. Dispatch to a shared runtime stub
 672         // which will cause the StackOverflowError to be fabricated
 673         // and processed.
 674         // For stack overflow in deoptimization blob, cleanup thread.
 675         if (thread->deopt_mark() != NULL) {
 676           Deoptimization::cleanup_deopt_info(thread, NULL);
 677         }
 678         return StubRoutines::throw_StackOverflowError_entry();
 679       }
 680 
 681       case IMPLICIT_NULL: {
 682         if (VtableStubs::contains(pc)) {
 683           // We haven't yet entered the callee frame. Fabricate an
 684           // exception and begin dispatching it in the caller. Since
 685           // the caller was at a call site, it's safe to destroy all
 686           // caller-saved registers, as these entry points do.
 687           VtableStub* vt_stub = VtableStubs::stub_containing(pc);
 688 
 689           // If vt_stub is NULL, then return NULL to signal handler to report the SEGV error.
 690           if (vt_stub == NULL) return NULL;
 691 
 692           if (vt_stub->is_abstract_method_error(pc)) {
 693             assert(!vt_stub->is_vtable_stub(), "should never see AbstractMethodErrors from vtable-type VtableStubs");
 694             return StubRoutines::throw_AbstractMethodError_entry();
 695           } else {
 696             return StubRoutines::throw_NullPointerException_at_call_entry();
 697           }
 698         } else {
 699           CodeBlob* cb = CodeCache::find_blob(pc);
 700 
 701           // If code blob is NULL, then return NULL to signal handler to report the SEGV error.
 702           if (cb == NULL) return NULL;
 703 
 704           // Exception happened in CodeCache. Must be either:
 705           // 1. Inline-cache check in C2I handler blob,
 706           // 2. Inline-cache check in nmethod, or
 707           // 3. Implict null exception in nmethod
 708 
 709           if (!cb->is_nmethod()) {
 710             guarantee(cb->is_adapter_blob() || cb->is_method_handles_adapter_blob(),
 711                       "exception happened outside interpreter, nmethods and vtable stubs (1)");
 712             // There is no handler here, so we will simply unwind.
 713             return StubRoutines::throw_NullPointerException_at_call_entry();
 714           }
 715 
 716           // Otherwise, it's an nmethod.  Consult its exception handlers.
 717           nmethod* nm = (nmethod*)cb;
 718           if (nm->inlinecache_check_contains(pc)) {
 719             // exception happened inside inline-cache check code
 720             // => the nmethod is not yet active (i.e., the frame
 721             // is not set up yet) => use return address pushed by
 722             // caller => don't push another return address
 723             return StubRoutines::throw_NullPointerException_at_call_entry();
 724           }
 725 
 726 #ifndef PRODUCT
 727           _implicit_null_throws++;
 728 #endif
 729           target_pc = nm->continuation_for_implicit_exception(pc);
 730           // If there's an unexpected fault, target_pc might be NULL,
 731           // in which case we want to fall through into the normal
 732           // error handling code.
 733         }
 734 
 735         break; // fall through
 736       }
 737 
 738 
 739       case IMPLICIT_DIVIDE_BY_ZERO: {
 740         nmethod* nm = CodeCache::find_nmethod(pc);
 741         guarantee(nm != NULL, "must have containing nmethod for implicit division-by-zero exceptions");
 742 #ifndef PRODUCT
 743         _implicit_div0_throws++;
 744 #endif
 745         target_pc = nm->continuation_for_implicit_exception(pc);
 746         // If there's an unexpected fault, target_pc might be NULL,
 747         // in which case we want to fall through into the normal
 748         // error handling code.
 749         break; // fall through
 750       }
 751 
 752       default: ShouldNotReachHere();
 753     }
 754 
 755     assert(exception_kind == IMPLICIT_NULL || exception_kind == IMPLICIT_DIVIDE_BY_ZERO, "wrong implicit exception kind");
 756 
 757     // for AbortVMOnException flag
 758     NOT_PRODUCT(Exceptions::debug_check_abort("java.lang.NullPointerException"));
 759     if (exception_kind == IMPLICIT_NULL) {
 760       Events::log("Implicit null exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, pc, target_pc);
 761     } else {
 762       Events::log("Implicit division by zero exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, pc, target_pc);
 763     }
 764     return target_pc;
 765   }
 766 
 767   ShouldNotReachHere();
 768   return NULL;
 769 }
 770 
 771 
 772 JNI_ENTRY(void, throw_unsatisfied_link_error(JNIEnv* env, ...))
 773 {
 774   THROW(vmSymbols::java_lang_UnsatisfiedLinkError());
 775 }
 776 JNI_END
 777 
 778 
 779 address SharedRuntime::native_method_throw_unsatisfied_link_error_entry() {
 780   return CAST_FROM_FN_PTR(address, &throw_unsatisfied_link_error);
 781 }
 782 
 783 
 784 #ifndef PRODUCT
 785 JRT_ENTRY(intptr_t, SharedRuntime::trace_bytecode(JavaThread* thread, intptr_t preserve_this_value, intptr_t tos, intptr_t tos2))
 786   const frame f = thread->last_frame();
 787   assert(f.is_interpreted_frame(), "must be an interpreted frame");
 788 #ifndef PRODUCT
 789   methodHandle mh(THREAD, f.interpreter_frame_method());
 790   BytecodeTracer::trace(mh, f.interpreter_frame_bcp(), tos, tos2);
 791 #endif // !PRODUCT
 792   return preserve_this_value;
 793 JRT_END
 794 #endif // !PRODUCT
 795 
 796 
 797 JRT_ENTRY(void, SharedRuntime::yield_all(JavaThread* thread, int attempts))
 798   os::yield_all(attempts);
 799 JRT_END
 800 
 801 
 802 JRT_ENTRY_NO_ASYNC(void, SharedRuntime::register_finalizer(JavaThread* thread, oopDesc* obj))
 803   assert(obj->is_oop(), "must be a valid oop");
 804   assert(obj->klass()->klass_part()->has_finalizer(), "shouldn't be here otherwise");
 805   instanceKlass::register_finalizer(instanceOop(obj), CHECK);
 806 JRT_END
 807 
 808 
 809 jlong SharedRuntime::get_java_tid(Thread* thread) {
 810   if (thread != NULL) {
 811     if (thread->is_Java_thread()) {
 812       oop obj = ((JavaThread*)thread)->threadObj();
 813       return (obj == NULL) ? 0 : java_lang_Thread::thread_id(obj);
 814     }
 815   }
 816   return 0;
 817 }
 818 
 819 /**
 820  * This function ought to be a void function, but cannot be because
 821  * it gets turned into a tail-call on sparc, which runs into dtrace bug
 822  * 6254741.  Once that is fixed we can remove the dummy return value.
 823  */
 824 int SharedRuntime::dtrace_object_alloc(oopDesc* o) {
 825   return dtrace_object_alloc_base(Thread::current(), o);
 826 }
 827 
 828 int SharedRuntime::dtrace_object_alloc_base(Thread* thread, oopDesc* o) {
 829   assert(DTraceAllocProbes, "wrong call");
 830   Klass* klass = o->blueprint();
 831   int size = o->size();
 832   symbolOop name = klass->name();
 833   HS_DTRACE_PROBE4(hotspot, object__alloc, get_java_tid(thread),
 834                    name->bytes(), name->utf8_length(), size * HeapWordSize);
 835   return 0;
 836 }
 837 
 838 JRT_LEAF(int, SharedRuntime::dtrace_method_entry(
 839     JavaThread* thread, methodOopDesc* method))
 840   assert(DTraceMethodProbes, "wrong call");
 841   symbolOop kname = method->klass_name();
 842   symbolOop name = method->name();
 843   symbolOop sig = method->signature();
 844   HS_DTRACE_PROBE7(hotspot, method__entry, get_java_tid(thread),
 845       kname->bytes(), kname->utf8_length(),
 846       name->bytes(), name->utf8_length(),
 847       sig->bytes(), sig->utf8_length());
 848   return 0;
 849 JRT_END
 850 
 851 JRT_LEAF(int, SharedRuntime::dtrace_method_exit(
 852     JavaThread* thread, methodOopDesc* method))
 853   assert(DTraceMethodProbes, "wrong call");
 854   symbolOop kname = method->klass_name();
 855   symbolOop name = method->name();
 856   symbolOop sig = method->signature();
 857   HS_DTRACE_PROBE7(hotspot, method__return, get_java_tid(thread),
 858       kname->bytes(), kname->utf8_length(),
 859       name->bytes(), name->utf8_length(),
 860       sig->bytes(), sig->utf8_length());
 861   return 0;
 862 JRT_END
 863 
 864 
 865 // Finds receiver, CallInfo (i.e. receiver method), and calling bytecode)
 866 // for a call current in progress, i.e., arguments has been pushed on stack
 867 // put callee has not been invoked yet.  Used by: resolve virtual/static,
 868 // vtable updates, etc.  Caller frame must be compiled.
 869 Handle SharedRuntime::find_callee_info(JavaThread* thread, Bytecodes::Code& bc, CallInfo& callinfo, TRAPS) {
 870   ResourceMark rm(THREAD);
 871 
 872   // last java frame on stack (which includes native call frames)
 873   vframeStream vfst(thread, true);  // Do not skip and javaCalls
 874 
 875   return find_callee_info_helper(thread, vfst, bc, callinfo, CHECK_(Handle()));
 876 }
 877 
 878 
 879 // Finds receiver, CallInfo (i.e. receiver method), and calling bytecode
 880 // for a call current in progress, i.e., arguments has been pushed on stack
 881 // but callee has not been invoked yet.  Caller frame must be compiled.
 882 Handle SharedRuntime::find_callee_info_helper(JavaThread* thread,
 883                                               vframeStream& vfst,
 884                                               Bytecodes::Code& bc,
 885                                               CallInfo& callinfo, TRAPS) {
 886   Handle receiver;
 887   Handle nullHandle;  //create a handy null handle for exception returns
 888 
 889   assert(!vfst.at_end(), "Java frame must exist");
 890 
 891   // Find caller and bci from vframe
 892   methodHandle caller (THREAD, vfst.method());
 893   int          bci    = vfst.bci();
 894 
 895   // Find bytecode
 896   Bytecode_invoke* bytecode = Bytecode_invoke_at(caller, bci);
 897   bc = bytecode->java_code();
 898   int bytecode_index = bytecode->index();
 899 
 900   // Find receiver for non-static call
 901   if (bc != Bytecodes::_invokestatic) {
 902     // This register map must be update since we need to find the receiver for
 903     // compiled frames. The receiver might be in a register.
 904     RegisterMap reg_map2(thread);
 905     frame stubFrame   = thread->last_frame();
 906     // Caller-frame is a compiled frame
 907     frame callerFrame = stubFrame.sender(&reg_map2);
 908 
 909     methodHandle callee = bytecode->static_target(CHECK_(nullHandle));
 910     if (callee.is_null()) {
 911       THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle);
 912     }
 913     // Retrieve from a compiled argument list
 914     receiver = Handle(THREAD, callerFrame.retrieve_receiver(&reg_map2));
 915 
 916     if (receiver.is_null()) {
 917       THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle);
 918     }
 919   }
 920 
 921   // Resolve method. This is parameterized by bytecode.
 922   constantPoolHandle constants (THREAD, caller->constants());
 923   assert (receiver.is_null() || receiver->is_oop(), "wrong receiver");
 924   LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_(nullHandle));
 925 
 926 #ifdef ASSERT
 927   // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
 928   if (bc != Bytecodes::_invokestatic && bc != Bytecodes::_invokedynamic) {
 929     assert(receiver.not_null(), "should have thrown exception");
 930     KlassHandle receiver_klass (THREAD, receiver->klass());
 931     klassOop rk = constants->klass_ref_at(bytecode_index, CHECK_(nullHandle));
 932                             // klass is already loaded
 933     KlassHandle static_receiver_klass (THREAD, rk);
 934     assert(receiver_klass->is_subtype_of(static_receiver_klass()), "actual receiver must be subclass of static receiver klass");
 935     if (receiver_klass->oop_is_instance()) {
 936       if (instanceKlass::cast(receiver_klass())->is_not_initialized()) {
 937         tty->print_cr("ERROR: Klass not yet initialized!!");
 938         receiver_klass.print();
 939       }
 940       assert (!instanceKlass::cast(receiver_klass())->is_not_initialized(), "receiver_klass must be initialized");
 941     }
 942   }
 943 #endif
 944 
 945   return receiver;
 946 }
 947 
 948 methodHandle SharedRuntime::find_callee_method(JavaThread* thread, TRAPS) {
 949   ResourceMark rm(THREAD);
 950   // We need first to check if any Java activations (compiled, interpreted)
 951   // exist on the stack since last JavaCall.  If not, we need
 952   // to get the target method from the JavaCall wrapper.
 953   vframeStream vfst(thread, true);  // Do not skip any javaCalls
 954   methodHandle callee_method;
 955   if (vfst.at_end()) {
 956     // No Java frames were found on stack since we did the JavaCall.
 957     // Hence the stack can only contain an entry_frame.  We need to
 958     // find the target method from the stub frame.
 959     RegisterMap reg_map(thread, false);
 960     frame fr = thread->last_frame();
 961     assert(fr.is_runtime_frame(), "must be a runtimeStub");
 962     fr = fr.sender(&reg_map);
 963     assert(fr.is_entry_frame(), "must be");
 964     // fr is now pointing to the entry frame.
 965     callee_method = methodHandle(THREAD, fr.entry_frame_call_wrapper()->callee_method());
 966     assert(fr.entry_frame_call_wrapper()->receiver() == NULL || !callee_method->is_static(), "non-null receiver for static call??");
 967   } else {
 968     Bytecodes::Code bc;
 969     CallInfo callinfo;
 970     find_callee_info_helper(thread, vfst, bc, callinfo, CHECK_(methodHandle()));
 971     callee_method = callinfo.selected_method();
 972   }
 973   assert(callee_method()->is_method(), "must be");
 974   return callee_method;
 975 }
 976 
 977 // Resolves a call.
 978 methodHandle SharedRuntime::resolve_helper(JavaThread *thread,
 979                                            bool is_virtual,
 980                                            bool is_optimized, TRAPS) {
 981   methodHandle callee_method;
 982   callee_method = resolve_sub_helper(thread, is_virtual, is_optimized, THREAD);
 983   if (JvmtiExport::can_hotswap_or_post_breakpoint()) {
 984     int retry_count = 0;
 985     while (!HAS_PENDING_EXCEPTION && callee_method->is_old() &&
 986            callee_method->method_holder() != SystemDictionary::Object_klass()) {
 987       // If has a pending exception then there is no need to re-try to
 988       // resolve this method.
 989       // If the method has been redefined, we need to try again.
 990       // Hack: we have no way to update the vtables of arrays, so don't
 991       // require that java.lang.Object has been updated.
 992 
 993       // It is very unlikely that method is redefined more than 100 times
 994       // in the middle of resolve. If it is looping here more than 100 times
 995       // means then there could be a bug here.
 996       guarantee((retry_count++ < 100),
 997                 "Could not resolve to latest version of redefined method");
 998       // method is redefined in the middle of resolve so re-try.
 999       callee_method = resolve_sub_helper(thread, is_virtual, is_optimized, THREAD);
1000     }
1001   }
1002   return callee_method;
1003 }
1004 
1005 // Resolves a call.  The compilers generate code for calls that go here
1006 // and are patched with the real destination of the call.
1007 methodHandle SharedRuntime::resolve_sub_helper(JavaThread *thread,
1008                                            bool is_virtual,
1009                                            bool is_optimized, TRAPS) {
1010 
1011   ResourceMark rm(thread);
1012   RegisterMap cbl_map(thread, false);
1013   frame caller_frame = thread->last_frame().sender(&cbl_map);
1014 
1015   CodeBlob* caller_cb = caller_frame.cb();
1016   guarantee(caller_cb != NULL && caller_cb->is_nmethod(), "must be called from nmethod");
1017   nmethod* caller_nm = caller_cb->as_nmethod_or_null();
1018   // make sure caller is not getting deoptimized
1019   // and removed before we are done with it.
1020   // CLEANUP - with lazy deopt shouldn't need this lock
1021   nmethodLocker caller_lock(caller_nm);
1022 
1023 
1024   // determine call info & receiver
1025   // note: a) receiver is NULL for static calls
1026   //       b) an exception is thrown if receiver is NULL for non-static calls
1027   CallInfo call_info;
1028   Bytecodes::Code invoke_code = Bytecodes::_illegal;
1029   Handle receiver = find_callee_info(thread, invoke_code,
1030                                      call_info, CHECK_(methodHandle()));
1031   methodHandle callee_method = call_info.selected_method();
1032 
1033   assert((!is_virtual && invoke_code == Bytecodes::_invokestatic) ||
1034          ( is_virtual && invoke_code != Bytecodes::_invokestatic), "inconsistent bytecode");
1035 
1036 #ifndef PRODUCT
1037   // tracing/debugging/statistics
1038   int *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :
1039                 (is_virtual) ? (&_resolve_virtual_ctr) :
1040                                (&_resolve_static_ctr);
1041   Atomic::inc(addr);
1042 
1043   if (TraceCallFixup) {
1044     ResourceMark rm(thread);
1045     tty->print("resolving %s%s (%s) call to",
1046       (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static",
1047       Bytecodes::name(invoke_code));
1048     callee_method->print_short_name(tty);
1049     tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
1050   }
1051 #endif
1052 
1053   // JSR 292
1054   // If the resolved method is a MethodHandle invoke target the call
1055   // site must be a MethodHandle call site.
1056   if (callee_method->is_method_handle_invoke()) {
1057     assert(caller_nm->is_method_handle_return(caller_frame.pc()), "must be MH call site");
1058   }
1059 
1060   // Compute entry points. This might require generation of C2I converter
1061   // frames, so we cannot be holding any locks here. Furthermore, the
1062   // computation of the entry points is independent of patching the call.  We
1063   // always return the entry-point, but we only patch the stub if the call has
1064   // not been deoptimized.  Return values: For a virtual call this is an
1065   // (cached_oop, destination address) pair. For a static call/optimized
1066   // virtual this is just a destination address.
1067 
1068   StaticCallInfo static_call_info;
1069   CompiledICInfo virtual_call_info;
1070 
1071   // Make sure the callee nmethod does not get deoptimized and removed before
1072   // we are done patching the code.
1073   nmethod* callee_nm = callee_method->code();
1074   nmethodLocker nl_callee(callee_nm);
1075 #ifdef ASSERT
1076   address dest_entry_point = callee_nm == NULL ? 0 : callee_nm->entry_point(); // used below
1077 #endif
1078 
1079   if (is_virtual) {
1080     assert(receiver.not_null(), "sanity check");
1081     bool static_bound = call_info.resolved_method()->can_be_statically_bound();
1082     KlassHandle h_klass(THREAD, receiver->klass());
1083     CompiledIC::compute_monomorphic_entry(callee_method, h_klass,
1084                      is_optimized, static_bound, virtual_call_info,
1085                      CHECK_(methodHandle()));
1086   } else {
1087     // static call
1088     CompiledStaticCall::compute_entry(callee_method, static_call_info);
1089   }
1090 
1091   // grab lock, check for deoptimization and potentially patch caller
1092   {
1093     MutexLocker ml_patch(CompiledIC_lock);
1094 
1095     // Now that we are ready to patch if the methodOop was redefined then
1096     // don't update call site and let the caller retry.
1097 
1098     if (!callee_method->is_old()) {
1099 #ifdef ASSERT
1100       // We must not try to patch to jump to an already unloaded method.
1101       if (dest_entry_point != 0) {
1102         assert(CodeCache::find_blob(dest_entry_point) != NULL,
1103                "should not unload nmethod while locked");
1104       }
1105 #endif
1106       if (is_virtual) {
1107         CompiledIC* inline_cache = CompiledIC_before(caller_frame.pc());
1108         if (inline_cache->is_clean()) {
1109           inline_cache->set_to_monomorphic(virtual_call_info);
1110         }
1111       } else {
1112         CompiledStaticCall* ssc = compiledStaticCall_before(caller_frame.pc());
1113         if (ssc->is_clean()) ssc->set(static_call_info);
1114       }
1115     }
1116 
1117   } // unlock CompiledIC_lock
1118 
1119   return callee_method;
1120 }
1121 
1122 
1123 // Inline caches exist only in compiled code
1124 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* thread))
1125 #ifdef ASSERT
1126   RegisterMap reg_map(thread, false);
1127   frame stub_frame = thread->last_frame();
1128   assert(stub_frame.is_runtime_frame(), "sanity check");
1129   frame caller_frame = stub_frame.sender(&reg_map);
1130   assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame(), "unexpected frame");
1131 #endif /* ASSERT */
1132 
1133   methodHandle callee_method;
1134   JRT_BLOCK
1135     callee_method = SharedRuntime::handle_ic_miss_helper(thread, CHECK_NULL);
1136     // Return methodOop through TLS
1137     thread->set_vm_result(callee_method());
1138   JRT_BLOCK_END
1139   // return compiled code entry point after potential safepoints
1140   assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1141   return callee_method->verified_code_entry();
1142 JRT_END
1143 
1144 
1145 // Handle call site that has been made non-entrant
1146 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* thread))
1147   // 6243940 We might end up in here if the callee is deoptimized
1148   // as we race to call it.  We don't want to take a safepoint if
1149   // the caller was interpreted because the caller frame will look
1150   // interpreted to the stack walkers and arguments are now
1151   // "compiled" so it is much better to make this transition
1152   // invisible to the stack walking code. The i2c path will
1153   // place the callee method in the callee_target. It is stashed
1154   // there because if we try and find the callee by normal means a
1155   // safepoint is possible and have trouble gc'ing the compiled args.
1156   RegisterMap reg_map(thread, false);
1157   frame stub_frame = thread->last_frame();
1158   assert(stub_frame.is_runtime_frame(), "sanity check");
1159   frame caller_frame = stub_frame.sender(&reg_map);
1160 
1161   // MethodHandle invokes don't have a CompiledIC and should always
1162   // simply redispatch to the callee_target.
1163   address   sender_pc = caller_frame.pc();
1164   CodeBlob* sender_cb = caller_frame.cb();
1165   nmethod*  sender_nm = sender_cb->as_nmethod_or_null();
1166   bool is_mh_invoke_via_adapter = false;  // Direct c2c call or via adapter?
1167   if (sender_nm != NULL && sender_nm->is_method_handle_return(sender_pc)) {
1168     // If the callee_target is set, then we have come here via an i2c
1169     // adapter.
1170     methodOop callee = thread->callee_target();
1171     if (callee != NULL) {
1172       assert(callee->is_method(), "sanity");
1173       is_mh_invoke_via_adapter = true;
1174     }
1175   }
1176 
1177   if (caller_frame.is_interpreted_frame() ||
1178       caller_frame.is_entry_frame()       ||
1179       is_mh_invoke_via_adapter) {
1180     methodOop callee = thread->callee_target();
1181     guarantee(callee != NULL && callee->is_method(), "bad handshake");
1182     thread->set_vm_result(callee);
1183     thread->set_callee_target(NULL);
1184     return callee->get_c2i_entry();
1185   }
1186 
1187   // Must be compiled to compiled path which is safe to stackwalk
1188   methodHandle callee_method;
1189   JRT_BLOCK
1190     // Force resolving of caller (if we called from compiled frame)
1191     callee_method = SharedRuntime::reresolve_call_site(thread, CHECK_NULL);
1192     thread->set_vm_result(callee_method());
1193   JRT_BLOCK_END
1194   // return compiled code entry point after potential safepoints
1195   assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1196   return callee_method->verified_code_entry();
1197 JRT_END
1198 
1199 
1200 // resolve a static call and patch code
1201 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread *thread ))
1202   methodHandle callee_method;
1203   JRT_BLOCK
1204     callee_method = SharedRuntime::resolve_helper(thread, false, false, CHECK_NULL);
1205     thread->set_vm_result(callee_method());
1206   JRT_BLOCK_END
1207   // return compiled code entry point after potential safepoints
1208   assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1209   return callee_method->verified_code_entry();
1210 JRT_END
1211 
1212 
1213 // resolve virtual call and update inline cache to monomorphic
1214 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread *thread ))
1215   methodHandle callee_method;
1216   JRT_BLOCK
1217     callee_method = SharedRuntime::resolve_helper(thread, true, false, CHECK_NULL);
1218     thread->set_vm_result(callee_method());
1219   JRT_BLOCK_END
1220   // return compiled code entry point after potential safepoints
1221   assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1222   return callee_method->verified_code_entry();
1223 JRT_END
1224 
1225 
1226 // Resolve a virtual call that can be statically bound (e.g., always
1227 // monomorphic, so it has no inline cache).  Patch code to resolved target.
1228 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread *thread))
1229   methodHandle callee_method;
1230   JRT_BLOCK
1231     callee_method = SharedRuntime::resolve_helper(thread, true, true, CHECK_NULL);
1232     thread->set_vm_result(callee_method());
1233   JRT_BLOCK_END
1234   // return compiled code entry point after potential safepoints
1235   assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1236   return callee_method->verified_code_entry();
1237 JRT_END
1238 
1239 
1240 
1241 
1242 
1243 methodHandle SharedRuntime::handle_ic_miss_helper(JavaThread *thread, TRAPS) {
1244   ResourceMark rm(thread);
1245   CallInfo call_info;
1246   Bytecodes::Code bc;
1247 
1248   // receiver is NULL for static calls. An exception is thrown for NULL
1249   // receivers for non-static calls
1250   Handle receiver = find_callee_info(thread, bc, call_info,
1251                                      CHECK_(methodHandle()));
1252   // Compiler1 can produce virtual call sites that can actually be statically bound
1253   // If we fell thru to below we would think that the site was going megamorphic
1254   // when in fact the site can never miss. Worse because we'd think it was megamorphic
1255   // we'd try and do a vtable dispatch however methods that can be statically bound
1256   // don't have vtable entries (vtable_index < 0) and we'd blow up. So we force a
1257   // reresolution of the  call site (as if we did a handle_wrong_method and not an
1258   // plain ic_miss) and the site will be converted to an optimized virtual call site
1259   // never to miss again. I don't believe C2 will produce code like this but if it
1260   // did this would still be the correct thing to do for it too, hence no ifdef.
1261   //
1262   if (call_info.resolved_method()->can_be_statically_bound()) {
1263     methodHandle callee_method = SharedRuntime::reresolve_call_site(thread, CHECK_(methodHandle()));
1264     if (TraceCallFixup) {
1265       RegisterMap reg_map(thread, false);
1266       frame caller_frame = thread->last_frame().sender(&reg_map);
1267       ResourceMark rm(thread);
1268       tty->print("converting IC miss to reresolve (%s) call to", Bytecodes::name(bc));
1269       callee_method->print_short_name(tty);
1270       tty->print_cr(" from pc: " INTPTR_FORMAT, caller_frame.pc());
1271       tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
1272     }
1273     return callee_method;
1274   }
1275 
1276   methodHandle callee_method = call_info.selected_method();
1277 
1278   bool should_be_mono = false;
1279 
1280 #ifndef PRODUCT
1281   Atomic::inc(&_ic_miss_ctr);
1282 
1283   // Statistics & Tracing
1284   if (TraceCallFixup) {
1285     ResourceMark rm(thread);
1286     tty->print("IC miss (%s) call to", Bytecodes::name(bc));
1287     callee_method->print_short_name(tty);
1288     tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
1289   }
1290 
1291   if (ICMissHistogram) {
1292     MutexLocker m(VMStatistic_lock);
1293     RegisterMap reg_map(thread, false);
1294     frame f = thread->last_frame().real_sender(&reg_map);// skip runtime stub
1295     // produce statistics under the lock
1296     trace_ic_miss(f.pc());
1297   }
1298 #endif
1299 
1300   // install an event collector so that when a vtable stub is created the
1301   // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The
1302   // event can't be posted when the stub is created as locks are held
1303   // - instead the event will be deferred until the event collector goes
1304   // out of scope.
1305   JvmtiDynamicCodeEventCollector event_collector;
1306 
1307   // Update inline cache to megamorphic. Skip update if caller has been
1308   // made non-entrant or we are called from interpreted.
1309   { MutexLocker ml_patch (CompiledIC_lock);
1310     RegisterMap reg_map(thread, false);
1311     frame caller_frame = thread->last_frame().sender(&reg_map);
1312     CodeBlob* cb = caller_frame.cb();
1313     if (cb->is_nmethod() && ((nmethod*)cb)->is_in_use()) {
1314       // Not a non-entrant nmethod, so find inline_cache
1315       CompiledIC* inline_cache = CompiledIC_before(caller_frame.pc());
1316       bool should_be_mono = false;
1317       if (inline_cache->is_optimized()) {
1318         if (TraceCallFixup) {
1319           ResourceMark rm(thread);
1320           tty->print("OPTIMIZED IC miss (%s) call to", Bytecodes::name(bc));
1321           callee_method->print_short_name(tty);
1322           tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
1323         }
1324         should_be_mono = true;
1325       } else {
1326         compiledICHolderOop ic_oop = (compiledICHolderOop) inline_cache->cached_oop();
1327         if ( ic_oop != NULL && ic_oop->is_compiledICHolder()) {
1328 
1329           if (receiver()->klass() == ic_oop->holder_klass()) {
1330             // This isn't a real miss. We must have seen that compiled code
1331             // is now available and we want the call site converted to a
1332             // monomorphic compiled call site.
1333             // We can't assert for callee_method->code() != NULL because it
1334             // could have been deoptimized in the meantime
1335             if (TraceCallFixup) {
1336               ResourceMark rm(thread);
1337               tty->print("FALSE IC miss (%s) converting to compiled call to", Bytecodes::name(bc));
1338               callee_method->print_short_name(tty);
1339               tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
1340             }
1341             should_be_mono = true;
1342           }
1343         }
1344       }
1345 
1346       if (should_be_mono) {
1347 
1348         // We have a path that was monomorphic but was going interpreted
1349         // and now we have (or had) a compiled entry. We correct the IC
1350         // by using a new icBuffer.
1351         CompiledICInfo info;
1352         KlassHandle receiver_klass(THREAD, receiver()->klass());
1353         inline_cache->compute_monomorphic_entry(callee_method,
1354                                                 receiver_klass,
1355                                                 inline_cache->is_optimized(),
1356                                                 false,
1357                                                 info, CHECK_(methodHandle()));
1358         inline_cache->set_to_monomorphic(info);
1359       } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) {
1360         // Change to megamorphic
1361         inline_cache->set_to_megamorphic(&call_info, bc, CHECK_(methodHandle()));
1362       } else {
1363         // Either clean or megamorphic
1364       }
1365     }
1366   } // Release CompiledIC_lock
1367 
1368   return callee_method;
1369 }
1370 
1371 //
1372 // Resets a call-site in compiled code so it will get resolved again.
1373 // This routines handles both virtual call sites, optimized virtual call
1374 // sites, and static call sites. Typically used to change a call sites
1375 // destination from compiled to interpreted.
1376 //
1377 methodHandle SharedRuntime::reresolve_call_site(JavaThread *thread, TRAPS) {
1378   ResourceMark rm(thread);
1379   RegisterMap reg_map(thread, false);
1380   frame stub_frame = thread->last_frame();
1381   assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1382   frame caller = stub_frame.sender(&reg_map);
1383 
1384   // Do nothing if the frame isn't a live compiled frame.
1385   // nmethod could be deoptimized by the time we get here
1386   // so no update to the caller is needed.
1387 
1388   if (caller.is_compiled_frame() && !caller.is_deoptimized_frame()) {
1389 
1390     address pc = caller.pc();
1391     Events::log("update call-site at pc " INTPTR_FORMAT, pc);
1392 
1393     // Default call_addr is the location of the "basic" call.
1394     // Determine the address of the call we a reresolving. With
1395     // Inline Caches we will always find a recognizable call.
1396     // With Inline Caches disabled we may or may not find a
1397     // recognizable call. We will always find a call for static
1398     // calls and for optimized virtual calls. For vanilla virtual
1399     // calls it depends on the state of the UseInlineCaches switch.
1400     //
1401     // With Inline Caches disabled we can get here for a virtual call
1402     // for two reasons:
1403     //   1 - calling an abstract method. The vtable for abstract methods
1404     //       will run us thru handle_wrong_method and we will eventually
1405     //       end up in the interpreter to throw the ame.
1406     //   2 - a racing deoptimization. We could be doing a vanilla vtable
1407     //       call and between the time we fetch the entry address and
1408     //       we jump to it the target gets deoptimized. Similar to 1
1409     //       we will wind up in the interprter (thru a c2i with c2).
1410     //
1411     address call_addr = NULL;
1412     {
1413       // Get call instruction under lock because another thread may be
1414       // busy patching it.
1415       MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag);
1416       // Location of call instruction
1417       if (NativeCall::is_call_before(pc)) {
1418         NativeCall *ncall = nativeCall_before(pc);
1419         call_addr = ncall->instruction_address();
1420       }
1421     }
1422 
1423     // Check for static or virtual call
1424     bool is_static_call = false;
1425     nmethod* caller_nm = CodeCache::find_nmethod(pc);
1426     // Make sure nmethod doesn't get deoptimized and removed until
1427     // this is done with it.
1428     // CLEANUP - with lazy deopt shouldn't need this lock
1429     nmethodLocker nmlock(caller_nm);
1430 
1431     if (call_addr != NULL) {
1432       RelocIterator iter(caller_nm, call_addr, call_addr+1);
1433       int ret = iter.next(); // Get item
1434       if (ret) {
1435         assert(iter.addr() == call_addr, "must find call");
1436         if (iter.type() == relocInfo::static_call_type) {
1437           is_static_call = true;
1438         } else {
1439           assert(iter.type() == relocInfo::virtual_call_type ||
1440                  iter.type() == relocInfo::opt_virtual_call_type
1441                 , "unexpected relocInfo. type");
1442         }
1443       } else {
1444         assert(!UseInlineCaches, "relocation info. must exist for this address");
1445       }
1446 
1447       // Cleaning the inline cache will force a new resolve. This is more robust
1448       // than directly setting it to the new destination, since resolving of calls
1449       // is always done through the same code path. (experience shows that it
1450       // leads to very hard to track down bugs, if an inline cache gets updated
1451       // to a wrong method). It should not be performance critical, since the
1452       // resolve is only done once.
1453 
1454       MutexLocker ml(CompiledIC_lock);
1455       //
1456       // We do not patch the call site if the nmethod has been made non-entrant
1457       // as it is a waste of time
1458       //
1459       if (caller_nm->is_in_use()) {
1460         if (is_static_call) {
1461           CompiledStaticCall* ssc= compiledStaticCall_at(call_addr);
1462           ssc->set_to_clean();
1463         } else {
1464           // compiled, dispatched call (which used to call an interpreted method)
1465           CompiledIC* inline_cache = CompiledIC_at(call_addr);
1466           inline_cache->set_to_clean();
1467         }
1468       }
1469     }
1470 
1471   }
1472 
1473   methodHandle callee_method = find_callee_method(thread, CHECK_(methodHandle()));
1474 
1475 
1476 #ifndef PRODUCT
1477   Atomic::inc(&_wrong_method_ctr);
1478 
1479   if (TraceCallFixup) {
1480     ResourceMark rm(thread);
1481     tty->print("handle_wrong_method reresolving call to");
1482     callee_method->print_short_name(tty);
1483     tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
1484   }
1485 #endif
1486 
1487   return callee_method;
1488 }
1489 
1490 // ---------------------------------------------------------------------------
1491 // We are calling the interpreter via a c2i. Normally this would mean that
1492 // we were called by a compiled method. However we could have lost a race
1493 // where we went int -> i2c -> c2i and so the caller could in fact be
1494 // interpreted. If the caller is compiled we attempt to patch the caller
1495 // so he no longer calls into the interpreter.
1496 IRT_LEAF(void, SharedRuntime::fixup_callers_callsite(methodOopDesc* method, address caller_pc))
1497   methodOop moop(method);
1498 
1499   address entry_point = moop->from_compiled_entry();
1500 
1501   // It's possible that deoptimization can occur at a call site which hasn't
1502   // been resolved yet, in which case this function will be called from
1503   // an nmethod that has been patched for deopt and we can ignore the
1504   // request for a fixup.
1505   // Also it is possible that we lost a race in that from_compiled_entry
1506   // is now back to the i2c in that case we don't need to patch and if
1507   // we did we'd leap into space because the callsite needs to use
1508   // "to interpreter" stub in order to load up the methodOop. Don't
1509   // ask me how I know this...
1510 
1511   CodeBlob* cb = CodeCache::find_blob(caller_pc);
1512   if (!cb->is_nmethod() || entry_point == moop->get_c2i_entry()) {
1513     return;
1514   }
1515 
1516   // The check above makes sure this is a nmethod.
1517   nmethod* nm = cb->as_nmethod_or_null();
1518   assert(nm, "must be");
1519 
1520   // Don't fixup MethodHandle call sites as c2i/i2c adapters are used
1521   // to implement MethodHandle actions.
1522   if (nm->is_method_handle_return(caller_pc)) {
1523     return;
1524   }
1525 
1526   // There is a benign race here. We could be attempting to patch to a compiled
1527   // entry point at the same time the callee is being deoptimized. If that is
1528   // the case then entry_point may in fact point to a c2i and we'd patch the
1529   // call site with the same old data. clear_code will set code() to NULL
1530   // at the end of it. If we happen to see that NULL then we can skip trying
1531   // to patch. If we hit the window where the callee has a c2i in the
1532   // from_compiled_entry and the NULL isn't present yet then we lose the race
1533   // and patch the code with the same old data. Asi es la vida.
1534 
1535   if (moop->code() == NULL) return;
1536 
1537   if (nm->is_in_use()) {
1538 
1539     // Expect to find a native call there (unless it was no-inline cache vtable dispatch)
1540     MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag);
1541     if (NativeCall::is_call_before(caller_pc + frame::pc_return_offset)) {
1542       NativeCall *call = nativeCall_before(caller_pc + frame::pc_return_offset);
1543       //
1544       // bug 6281185. We might get here after resolving a call site to a vanilla
1545       // virtual call. Because the resolvee uses the verified entry it may then
1546       // see compiled code and attempt to patch the site by calling us. This would
1547       // then incorrectly convert the call site to optimized and its downhill from
1548       // there. If you're lucky you'll get the assert in the bugid, if not you've
1549       // just made a call site that could be megamorphic into a monomorphic site
1550       // for the rest of its life! Just another racing bug in the life of
1551       // fixup_callers_callsite ...
1552       //
1553       RelocIterator iter(nm, call->instruction_address(), call->next_instruction_address());
1554       iter.next();
1555       assert(iter.has_current(), "must have a reloc at java call site");
1556       relocInfo::relocType typ = iter.reloc()->type();
1557       if ( typ != relocInfo::static_call_type &&
1558            typ != relocInfo::opt_virtual_call_type &&
1559            typ != relocInfo::static_stub_type) {
1560         return;
1561       }
1562       address destination = call->destination();
1563       if (destination != entry_point) {
1564         CodeBlob* callee = CodeCache::find_blob(destination);
1565         // callee == cb seems weird. It means calling interpreter thru stub.
1566         if (callee == cb || callee->is_adapter_blob()) {
1567           // static call or optimized virtual
1568           if (TraceCallFixup) {
1569             tty->print("fixup callsite           at " INTPTR_FORMAT " to compiled code for", caller_pc);
1570             moop->print_short_name(tty);
1571             tty->print_cr(" to " INTPTR_FORMAT, entry_point);
1572           }
1573           call->set_destination_mt_safe(entry_point);
1574         } else {
1575           if (TraceCallFixup) {
1576             tty->print("failed to fixup callsite at " INTPTR_FORMAT " to compiled code for", caller_pc);
1577             moop->print_short_name(tty);
1578             tty->print_cr(" to " INTPTR_FORMAT, entry_point);
1579           }
1580           // assert is too strong could also be resolve destinations.
1581           // assert(InlineCacheBuffer::contains(destination) || VtableStubs::contains(destination), "must be");
1582         }
1583       } else {
1584           if (TraceCallFixup) {
1585             tty->print("already patched callsite at " INTPTR_FORMAT " to compiled code for", caller_pc);
1586             moop->print_short_name(tty);
1587             tty->print_cr(" to " INTPTR_FORMAT, entry_point);
1588           }
1589       }
1590     }
1591   }
1592 
1593 IRT_END
1594 
1595 
1596 // same as JVM_Arraycopy, but called directly from compiled code
1597 JRT_ENTRY(void, SharedRuntime::slow_arraycopy_C(oopDesc* src,  jint src_pos,
1598                                                 oopDesc* dest, jint dest_pos,
1599                                                 jint length,
1600                                                 JavaThread* thread)) {
1601 #ifndef PRODUCT
1602   _slow_array_copy_ctr++;
1603 #endif
1604   // Check if we have null pointers
1605   if (src == NULL || dest == NULL) {
1606     THROW(vmSymbols::java_lang_NullPointerException());
1607   }
1608   // Do the copy.  The casts to arrayOop are necessary to the copy_array API,
1609   // even though the copy_array API also performs dynamic checks to ensure
1610   // that src and dest are truly arrays (and are conformable).
1611   // The copy_array mechanism is awkward and could be removed, but
1612   // the compilers don't call this function except as a last resort,
1613   // so it probably doesn't matter.
1614   Klass::cast(src->klass())->copy_array((arrayOopDesc*)src,  src_pos,
1615                                         (arrayOopDesc*)dest, dest_pos,
1616                                         length, thread);
1617 }
1618 JRT_END
1619 
1620 char* SharedRuntime::generate_class_cast_message(
1621     JavaThread* thread, const char* objName) {
1622 
1623   // Get target class name from the checkcast instruction
1624   vframeStream vfst(thread, true);
1625   assert(!vfst.at_end(), "Java frame must exist");
1626   Bytecode_checkcast* cc = Bytecode_checkcast_at(
1627     vfst.method()->bcp_from(vfst.bci()));
1628   Klass* targetKlass = Klass::cast(vfst.method()->constants()->klass_at(
1629     cc->index(), thread));
1630   return generate_class_cast_message(objName, targetKlass->external_name());
1631 }
1632 
1633 char* SharedRuntime::generate_wrong_method_type_message(JavaThread* thread,
1634                                                         oopDesc* required,
1635                                                         oopDesc* actual) {
1636   if (TraceMethodHandles) {
1637     tty->print_cr("WrongMethodType thread="PTR_FORMAT" req="PTR_FORMAT" act="PTR_FORMAT"",
1638                   thread, required, actual);
1639   }
1640   assert(EnableMethodHandles, "");
1641   oop singleKlass = wrong_method_type_is_for_single_argument(thread, required);
1642   char* message = NULL;
1643   if (singleKlass != NULL) {
1644     const char* objName = "argument or return value";
1645     if (actual != NULL) {
1646       // be flexible about the junk passed in:
1647       klassOop ak = (actual->is_klass()
1648                      ? (klassOop)actual
1649                      : actual->klass());
1650       objName = Klass::cast(ak)->external_name();
1651     }
1652     Klass* targetKlass = Klass::cast(required->is_klass()
1653                                      ? (klassOop)required
1654                                      : java_lang_Class::as_klassOop(required));
1655     message = generate_class_cast_message(objName, targetKlass->external_name());
1656   } else {
1657     // %%% need to get the MethodType string, without messing around too much
1658     // Get a signature from the invoke instruction
1659     const char* mhName = "method handle";
1660     const char* targetType = "the required signature";
1661     vframeStream vfst(thread, true);
1662     if (!vfst.at_end()) {
1663       Bytecode_invoke* call = Bytecode_invoke_at(vfst.method(), vfst.bci());
1664       methodHandle target;
1665       {
1666         EXCEPTION_MARK;
1667         target = call->static_target(THREAD);
1668         if (HAS_PENDING_EXCEPTION) { CLEAR_PENDING_EXCEPTION; }
1669       }
1670       if (target.not_null()
1671           && target->is_method_handle_invoke()
1672           && required == target->method_handle_type()) {
1673         targetType = target->signature()->as_C_string();
1674       }
1675     }
1676     klassOop kignore; int fignore;
1677     methodOop actual_method = MethodHandles::decode_method(actual,
1678                                                           kignore, fignore);
1679     if (actual_method != NULL) {
1680       if (methodOopDesc::is_method_handle_invoke_name(actual_method->name()))
1681         mhName = "$";
1682       else
1683         mhName = actual_method->signature()->as_C_string();
1684       if (mhName[0] == '$')
1685         mhName = actual_method->signature()->as_C_string();
1686     }
1687     message = generate_class_cast_message(mhName, targetType,
1688                                           " cannot be called as ");
1689   }
1690   if (TraceMethodHandles) {
1691     tty->print_cr("WrongMethodType => message=%s", message);
1692   }
1693   return message;
1694 }
1695 
1696 oop SharedRuntime::wrong_method_type_is_for_single_argument(JavaThread* thr,
1697                                                             oopDesc* required) {
1698   if (required == NULL)  return NULL;
1699   if (required->klass() == SystemDictionary::Class_klass())
1700     return required;
1701   if (required->is_klass())
1702     return Klass::cast(klassOop(required))->java_mirror();
1703   return NULL;
1704 }
1705 
1706 
1707 char* SharedRuntime::generate_class_cast_message(
1708     const char* objName, const char* targetKlassName, const char* desc) {
1709   size_t msglen = strlen(objName) + strlen(desc) + strlen(targetKlassName) + 1;
1710 
1711   char* message = NEW_RESOURCE_ARRAY(char, msglen);
1712   if (NULL == message) {
1713     // Shouldn't happen, but don't cause even more problems if it does
1714     message = const_cast<char*>(objName);
1715   } else {
1716     jio_snprintf(message, msglen, "%s%s%s", objName, desc, targetKlassName);
1717   }
1718   return message;
1719 }
1720 
1721 JRT_LEAF(void, SharedRuntime::reguard_yellow_pages())
1722   (void) JavaThread::current()->reguard_stack();
1723 JRT_END
1724 
1725 
1726 // Handles the uncommon case in locking, i.e., contention or an inflated lock.
1727 #ifndef PRODUCT
1728 int SharedRuntime::_monitor_enter_ctr=0;
1729 #endif
1730 JRT_ENTRY_NO_ASYNC(void, SharedRuntime::complete_monitor_locking_C(oopDesc* _obj, BasicLock* lock, JavaThread* thread))
1731   oop obj(_obj);
1732 #ifndef PRODUCT
1733   _monitor_enter_ctr++;             // monitor enter slow
1734 #endif
1735   if (PrintBiasedLockingStatistics) {
1736     Atomic::inc(BiasedLocking::slow_path_entry_count_addr());
1737   }
1738   Handle h_obj(THREAD, obj);
1739   if (UseBiasedLocking) {
1740     // Retry fast entry if bias is revoked to avoid unnecessary inflation
1741     ObjectSynchronizer::fast_enter(h_obj, lock, true, CHECK);
1742   } else {
1743     ObjectSynchronizer::slow_enter(h_obj, lock, CHECK);
1744   }
1745   assert(!HAS_PENDING_EXCEPTION, "Should have no exception here");
1746 JRT_END
1747 
1748 #ifndef PRODUCT
1749 int SharedRuntime::_monitor_exit_ctr=0;
1750 #endif
1751 // Handles the uncommon cases of monitor unlocking in compiled code
1752 JRT_LEAF(void, SharedRuntime::complete_monitor_unlocking_C(oopDesc* _obj, BasicLock* lock))
1753    oop obj(_obj);
1754 #ifndef PRODUCT
1755   _monitor_exit_ctr++;              // monitor exit slow
1756 #endif
1757   Thread* THREAD = JavaThread::current();
1758   // I'm not convinced we need the code contained by MIGHT_HAVE_PENDING anymore
1759   // testing was unable to ever fire the assert that guarded it so I have removed it.
1760   assert(!HAS_PENDING_EXCEPTION, "Do we need code below anymore?");
1761 #undef MIGHT_HAVE_PENDING
1762 #ifdef MIGHT_HAVE_PENDING
1763   // Save and restore any pending_exception around the exception mark.
1764   // While the slow_exit must not throw an exception, we could come into
1765   // this routine with one set.
1766   oop pending_excep = NULL;
1767   const char* pending_file;
1768   int pending_line;
1769   if (HAS_PENDING_EXCEPTION) {
1770     pending_excep = PENDING_EXCEPTION;
1771     pending_file  = THREAD->exception_file();
1772     pending_line  = THREAD->exception_line();
1773     CLEAR_PENDING_EXCEPTION;
1774   }
1775 #endif /* MIGHT_HAVE_PENDING */
1776 
1777   {
1778     // Exit must be non-blocking, and therefore no exceptions can be thrown.
1779     EXCEPTION_MARK;
1780     ObjectSynchronizer::slow_exit(obj, lock, THREAD);
1781   }
1782 
1783 #ifdef MIGHT_HAVE_PENDING
1784   if (pending_excep != NULL) {
1785     THREAD->set_pending_exception(pending_excep, pending_file, pending_line);
1786   }
1787 #endif /* MIGHT_HAVE_PENDING */
1788 JRT_END
1789 
1790 #ifndef PRODUCT
1791 
1792 void SharedRuntime::print_statistics() {
1793   ttyLocker ttyl;
1794   if (xtty != NULL)  xtty->head("statistics type='SharedRuntime'");
1795 
1796   if (_monitor_enter_ctr ) tty->print_cr("%5d monitor enter slow",  _monitor_enter_ctr);
1797   if (_monitor_exit_ctr  ) tty->print_cr("%5d monitor exit slow",   _monitor_exit_ctr);
1798   if (_throw_null_ctr) tty->print_cr("%5d implicit null throw", _throw_null_ctr);
1799 
1800   SharedRuntime::print_ic_miss_histogram();
1801 
1802   if (CountRemovableExceptions) {
1803     if (_nof_removable_exceptions > 0) {
1804       Unimplemented(); // this counter is not yet incremented
1805       tty->print_cr("Removable exceptions: %d", _nof_removable_exceptions);
1806     }
1807   }
1808 
1809   // Dump the JRT_ENTRY counters
1810   if( _new_instance_ctr ) tty->print_cr("%5d new instance requires GC", _new_instance_ctr);
1811   if( _new_array_ctr ) tty->print_cr("%5d new array requires GC", _new_array_ctr);
1812   if( _multi1_ctr ) tty->print_cr("%5d multianewarray 1 dim", _multi1_ctr);
1813   if( _multi2_ctr ) tty->print_cr("%5d multianewarray 2 dim", _multi2_ctr);
1814   if( _multi3_ctr ) tty->print_cr("%5d multianewarray 3 dim", _multi3_ctr);
1815   if( _multi4_ctr ) tty->print_cr("%5d multianewarray 4 dim", _multi4_ctr);
1816   if( _multi5_ctr ) tty->print_cr("%5d multianewarray 5 dim", _multi5_ctr);
1817 
1818   tty->print_cr("%5d inline cache miss in compiled", _ic_miss_ctr );
1819   tty->print_cr("%5d wrong method", _wrong_method_ctr );
1820   tty->print_cr("%5d unresolved static call site", _resolve_static_ctr );
1821   tty->print_cr("%5d unresolved virtual call site", _resolve_virtual_ctr );
1822   tty->print_cr("%5d unresolved opt virtual call site", _resolve_opt_virtual_ctr );
1823 
1824   if( _mon_enter_stub_ctr ) tty->print_cr("%5d monitor enter stub", _mon_enter_stub_ctr );
1825   if( _mon_exit_stub_ctr ) tty->print_cr("%5d monitor exit stub", _mon_exit_stub_ctr );
1826   if( _mon_enter_ctr ) tty->print_cr("%5d monitor enter slow", _mon_enter_ctr );
1827   if( _mon_exit_ctr ) tty->print_cr("%5d monitor exit slow", _mon_exit_ctr );
1828   if( _partial_subtype_ctr) tty->print_cr("%5d slow partial subtype", _partial_subtype_ctr );
1829   if( _jbyte_array_copy_ctr ) tty->print_cr("%5d byte array copies", _jbyte_array_copy_ctr );
1830   if( _jshort_array_copy_ctr ) tty->print_cr("%5d short array copies", _jshort_array_copy_ctr );
1831   if( _jint_array_copy_ctr ) tty->print_cr("%5d int array copies", _jint_array_copy_ctr );
1832   if( _jlong_array_copy_ctr ) tty->print_cr("%5d long array copies", _jlong_array_copy_ctr );
1833   if( _oop_array_copy_ctr ) tty->print_cr("%5d oop array copies", _oop_array_copy_ctr );
1834   if( _checkcast_array_copy_ctr ) tty->print_cr("%5d checkcast array copies", _checkcast_array_copy_ctr );
1835   if( _unsafe_array_copy_ctr ) tty->print_cr("%5d unsafe array copies", _unsafe_array_copy_ctr );
1836   if( _generic_array_copy_ctr ) tty->print_cr("%5d generic array copies", _generic_array_copy_ctr );
1837   if( _slow_array_copy_ctr ) tty->print_cr("%5d slow array copies", _slow_array_copy_ctr );
1838   if( _find_handler_ctr ) tty->print_cr("%5d find exception handler", _find_handler_ctr );
1839   if( _rethrow_ctr ) tty->print_cr("%5d rethrow handler", _rethrow_ctr );
1840 
1841   AdapterHandlerLibrary::print_statistics();
1842 
1843   if (xtty != NULL)  xtty->tail("statistics");
1844 }
1845 
1846 inline double percent(int x, int y) {
1847   return 100.0 * x / MAX2(y, 1);
1848 }
1849 
1850 class MethodArityHistogram {
1851  public:
1852   enum { MAX_ARITY = 256 };
1853  private:
1854   static int _arity_histogram[MAX_ARITY];     // histogram of #args
1855   static int _size_histogram[MAX_ARITY];      // histogram of arg size in words
1856   static int _max_arity;                      // max. arity seen
1857   static int _max_size;                       // max. arg size seen
1858 
1859   static void add_method_to_histogram(nmethod* nm) {
1860     methodOop m = nm->method();
1861     ArgumentCount args(m->signature());
1862     int arity   = args.size() + (m->is_static() ? 0 : 1);
1863     int argsize = m->size_of_parameters();
1864     arity   = MIN2(arity, MAX_ARITY-1);
1865     argsize = MIN2(argsize, MAX_ARITY-1);
1866     int count = nm->method()->compiled_invocation_count();
1867     _arity_histogram[arity]  += count;
1868     _size_histogram[argsize] += count;
1869     _max_arity = MAX2(_max_arity, arity);
1870     _max_size  = MAX2(_max_size, argsize);
1871   }
1872 
1873   void print_histogram_helper(int n, int* histo, const char* name) {
1874     const int N = MIN2(5, n);
1875     tty->print_cr("\nHistogram of call arity (incl. rcvr, calls to compiled methods only):");
1876     double sum = 0;
1877     double weighted_sum = 0;
1878     int i;
1879     for (i = 0; i <= n; i++) { sum += histo[i]; weighted_sum += i*histo[i]; }
1880     double rest = sum;
1881     double percent = sum / 100;
1882     for (i = 0; i <= N; i++) {
1883       rest -= histo[i];
1884       tty->print_cr("%4d: %7d (%5.1f%%)", i, histo[i], histo[i] / percent);
1885     }
1886     tty->print_cr("rest: %7d (%5.1f%%))", (int)rest, rest / percent);
1887     tty->print_cr("(avg. %s = %3.1f, max = %d)", name, weighted_sum / sum, n);
1888   }
1889 
1890   void print_histogram() {
1891     tty->print_cr("\nHistogram of call arity (incl. rcvr, calls to compiled methods only):");
1892     print_histogram_helper(_max_arity, _arity_histogram, "arity");
1893     tty->print_cr("\nSame for parameter size (in words):");
1894     print_histogram_helper(_max_size, _size_histogram, "size");
1895     tty->cr();
1896   }
1897 
1898  public:
1899   MethodArityHistogram() {
1900     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1901     _max_arity = _max_size = 0;
1902     for (int i = 0; i < MAX_ARITY; i++) _arity_histogram[i] = _size_histogram [i] = 0;
1903     CodeCache::nmethods_do(add_method_to_histogram);
1904     print_histogram();
1905   }
1906 };
1907 
1908 int MethodArityHistogram::_arity_histogram[MethodArityHistogram::MAX_ARITY];
1909 int MethodArityHistogram::_size_histogram[MethodArityHistogram::MAX_ARITY];
1910 int MethodArityHistogram::_max_arity;
1911 int MethodArityHistogram::_max_size;
1912 
1913 void SharedRuntime::print_call_statistics(int comp_total) {
1914   tty->print_cr("Calls from compiled code:");
1915   int total  = _nof_normal_calls + _nof_interface_calls + _nof_static_calls;
1916   int mono_c = _nof_normal_calls - _nof_optimized_calls - _nof_megamorphic_calls;
1917   int mono_i = _nof_interface_calls - _nof_optimized_interface_calls - _nof_megamorphic_interface_calls;
1918   tty->print_cr("\t%9d   (%4.1f%%) total non-inlined   ", total, percent(total, total));
1919   tty->print_cr("\t%9d   (%4.1f%%) virtual calls       ", _nof_normal_calls, percent(_nof_normal_calls, total));
1920   tty->print_cr("\t  %9d  (%3.0f%%)   inlined          ", _nof_inlined_calls, percent(_nof_inlined_calls, _nof_normal_calls));
1921   tty->print_cr("\t  %9d  (%3.0f%%)   optimized        ", _nof_optimized_calls, percent(_nof_optimized_calls, _nof_normal_calls));
1922   tty->print_cr("\t  %9d  (%3.0f%%)   monomorphic      ", mono_c, percent(mono_c, _nof_normal_calls));
1923   tty->print_cr("\t  %9d  (%3.0f%%)   megamorphic      ", _nof_megamorphic_calls, percent(_nof_megamorphic_calls, _nof_normal_calls));
1924   tty->print_cr("\t%9d   (%4.1f%%) interface calls     ", _nof_interface_calls, percent(_nof_interface_calls, total));
1925   tty->print_cr("\t  %9d  (%3.0f%%)   inlined          ", _nof_inlined_interface_calls, percent(_nof_inlined_interface_calls, _nof_interface_calls));
1926   tty->print_cr("\t  %9d  (%3.0f%%)   optimized        ", _nof_optimized_interface_calls, percent(_nof_optimized_interface_calls, _nof_interface_calls));
1927   tty->print_cr("\t  %9d  (%3.0f%%)   monomorphic      ", mono_i, percent(mono_i, _nof_interface_calls));
1928   tty->print_cr("\t  %9d  (%3.0f%%)   megamorphic      ", _nof_megamorphic_interface_calls, percent(_nof_megamorphic_interface_calls, _nof_interface_calls));
1929   tty->print_cr("\t%9d   (%4.1f%%) static/special calls", _nof_static_calls, percent(_nof_static_calls, total));
1930   tty->print_cr("\t  %9d  (%3.0f%%)   inlined          ", _nof_inlined_static_calls, percent(_nof_inlined_static_calls, _nof_static_calls));
1931   tty->cr();
1932   tty->print_cr("Note 1: counter updates are not MT-safe.");
1933   tty->print_cr("Note 2: %% in major categories are relative to total non-inlined calls;");
1934   tty->print_cr("        %% in nested categories are relative to their category");
1935   tty->print_cr("        (and thus add up to more than 100%% with inlining)");
1936   tty->cr();
1937 
1938   MethodArityHistogram h;
1939 }
1940 #endif
1941 
1942 
1943 // A simple wrapper class around the calling convention information
1944 // that allows sharing of adapters for the same calling convention.
1945 class AdapterFingerPrint : public CHeapObj {
1946  private:
1947   union {
1948     int  _compact[3];
1949     int* _fingerprint;
1950   } _value;
1951   int _length; // A negative length indicates the fingerprint is in the compact form,
1952                // Otherwise _value._fingerprint is the array.
1953 
1954   // Remap BasicTypes that are handled equivalently by the adapters.
1955   // These are correct for the current system but someday it might be
1956   // necessary to make this mapping platform dependent.
1957   static BasicType adapter_encoding(BasicType in) {
1958     assert((~0xf & in) == 0, "must fit in 4 bits");
1959     switch(in) {
1960       case T_BOOLEAN:
1961       case T_BYTE:
1962       case T_SHORT:
1963       case T_CHAR:
1964         // There are all promoted to T_INT in the calling convention
1965         return T_INT;
1966 
1967       case T_OBJECT:
1968       case T_ARRAY:
1969 #ifdef _LP64
1970         return T_LONG;
1971 #else
1972         return T_INT;
1973 #endif
1974 
1975       case T_INT:
1976       case T_LONG:
1977       case T_FLOAT:
1978       case T_DOUBLE:
1979       case T_VOID:
1980         return in;
1981 
1982       default:
1983         ShouldNotReachHere();
1984         return T_CONFLICT;
1985     }
1986   }
1987 
1988  public:
1989   AdapterFingerPrint(int total_args_passed, BasicType* sig_bt) {
1990     // The fingerprint is based on the BasicType signature encoded
1991     // into an array of ints with four entries per int.
1992     int* ptr;
1993     int len = (total_args_passed + 3) >> 2;
1994     if (len <= (int)(sizeof(_value._compact) / sizeof(int))) {
1995       _value._compact[0] = _value._compact[1] = _value._compact[2] = 0;
1996       // Storing the signature encoded as signed chars hits about 98%
1997       // of the time.
1998       _length = -len;
1999       ptr = _value._compact;
2000     } else {
2001       _length = len;
2002       _value._fingerprint = NEW_C_HEAP_ARRAY(int, _length);
2003       ptr = _value._fingerprint;
2004     }
2005 
2006     // Now pack the BasicTypes with 4 per int
2007     int sig_index = 0;
2008     for (int index = 0; index < len; index++) {
2009       int value = 0;
2010       for (int byte = 0; byte < 4; byte++) {
2011         if (sig_index < total_args_passed) {
2012           value = (value << 4) | adapter_encoding(sig_bt[sig_index++]);
2013         }
2014       }
2015       ptr[index] = value;
2016     }
2017   }
2018 
2019   ~AdapterFingerPrint() {
2020     if (_length > 0) {
2021       FREE_C_HEAP_ARRAY(int, _value._fingerprint);
2022     }
2023   }
2024 
2025   int value(int index) {
2026     if (_length < 0) {
2027       return _value._compact[index];
2028     }
2029     return _value._fingerprint[index];
2030   }
2031   int length() {
2032     if (_length < 0) return -_length;
2033     return _length;
2034   }
2035 
2036   bool is_compact() {
2037     return _length <= 0;
2038   }
2039 
2040   unsigned int compute_hash() {
2041     int hash = 0;
2042     for (int i = 0; i < length(); i++) {
2043       int v = value(i);
2044       hash = (hash << 8) ^ v ^ (hash >> 5);
2045     }
2046     return (unsigned int)hash;
2047   }
2048 
2049   const char* as_string() {
2050     stringStream st;
2051     for (int i = 0; i < length(); i++) {
2052       st.print(PTR_FORMAT, value(i));
2053     }
2054     return st.as_string();
2055   }
2056 
2057   bool equals(AdapterFingerPrint* other) {
2058     if (other->_length != _length) {
2059       return false;
2060     }
2061     if (_length < 0) {
2062       return _value._compact[0] == other->_value._compact[0] &&
2063              _value._compact[1] == other->_value._compact[1] &&
2064              _value._compact[2] == other->_value._compact[2];
2065     } else {
2066       for (int i = 0; i < _length; i++) {
2067         if (_value._fingerprint[i] != other->_value._fingerprint[i]) {
2068           return false;
2069         }
2070       }
2071     }
2072     return true;
2073   }
2074 };
2075 
2076 
2077 // A hashtable mapping from AdapterFingerPrints to AdapterHandlerEntries
2078 class AdapterHandlerTable : public BasicHashtable {
2079   friend class AdapterHandlerTableIterator;
2080 
2081  private:
2082 
2083 #ifndef PRODUCT
2084   static int _lookups; // number of calls to lookup
2085   static int _buckets; // number of buckets checked
2086   static int _equals;  // number of buckets checked with matching hash
2087   static int _hits;    // number of successful lookups
2088   static int _compact; // number of equals calls with compact signature
2089 #endif
2090 
2091   AdapterHandlerEntry* bucket(int i) {
2092     return (AdapterHandlerEntry*)BasicHashtable::bucket(i);
2093   }
2094 
2095  public:
2096   AdapterHandlerTable()
2097     : BasicHashtable(293, sizeof(AdapterHandlerEntry)) { }
2098 
2099   // Create a new entry suitable for insertion in the table
2100   AdapterHandlerEntry* new_entry(AdapterFingerPrint* fingerprint, address i2c_entry, address c2i_entry, address c2i_unverified_entry) {
2101     AdapterHandlerEntry* entry = (AdapterHandlerEntry*)BasicHashtable::new_entry(fingerprint->compute_hash());
2102     entry->init(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
2103     return entry;
2104   }
2105 
2106   // Insert an entry into the table
2107   void add(AdapterHandlerEntry* entry) {
2108     int index = hash_to_index(entry->hash());
2109     add_entry(index, entry);
2110   }
2111 
2112   void free_entry(AdapterHandlerEntry* entry) {
2113     entry->deallocate();
2114     BasicHashtable::free_entry(entry);
2115   }
2116 
2117   // Find a entry with the same fingerprint if it exists
2118   AdapterHandlerEntry* lookup(int total_args_passed, BasicType* sig_bt) {
2119     NOT_PRODUCT(_lookups++);
2120     AdapterFingerPrint fp(total_args_passed, sig_bt);
2121     unsigned int hash = fp.compute_hash();
2122     int index = hash_to_index(hash);
2123     for (AdapterHandlerEntry* e = bucket(index); e != NULL; e = e->next()) {
2124       NOT_PRODUCT(_buckets++);
2125       if (e->hash() == hash) {
2126         NOT_PRODUCT(_equals++);
2127         if (fp.equals(e->fingerprint())) {
2128 #ifndef PRODUCT
2129           if (fp.is_compact()) _compact++;
2130           _hits++;
2131 #endif
2132           return e;
2133         }
2134       }
2135     }
2136     return NULL;
2137   }
2138 
2139 #ifndef PRODUCT
2140   void print_statistics() {
2141     ResourceMark rm;
2142     int longest = 0;
2143     int empty = 0;
2144     int total = 0;
2145     int nonempty = 0;
2146     for (int index = 0; index < table_size(); index++) {
2147       int count = 0;
2148       for (AdapterHandlerEntry* e = bucket(index); e != NULL; e = e->next()) {
2149         count++;
2150       }
2151       if (count != 0) nonempty++;
2152       if (count == 0) empty++;
2153       if (count > longest) longest = count;
2154       total += count;
2155     }
2156     tty->print_cr("AdapterHandlerTable: empty %d longest %d total %d average %f",
2157                   empty, longest, total, total / (double)nonempty);
2158     tty->print_cr("AdapterHandlerTable: lookups %d buckets %d equals %d hits %d compact %d",
2159                   _lookups, _buckets, _equals, _hits, _compact);
2160   }
2161 #endif
2162 };
2163 
2164 
2165 #ifndef PRODUCT
2166 
2167 int AdapterHandlerTable::_lookups;
2168 int AdapterHandlerTable::_buckets;
2169 int AdapterHandlerTable::_equals;
2170 int AdapterHandlerTable::_hits;
2171 int AdapterHandlerTable::_compact;
2172 
2173 #endif
2174 
2175 class AdapterHandlerTableIterator : public StackObj {
2176  private:
2177   AdapterHandlerTable* _table;
2178   int _index;
2179   AdapterHandlerEntry* _current;
2180 
2181   void scan() {
2182     while (_index < _table->table_size()) {
2183       AdapterHandlerEntry* a = _table->bucket(_index);
2184       _index++;
2185       if (a != NULL) {
2186         _current = a;
2187         return;
2188       }
2189     }
2190   }
2191 
2192  public:
2193   AdapterHandlerTableIterator(AdapterHandlerTable* table): _table(table), _index(0), _current(NULL) {
2194     scan();
2195   }
2196   bool has_next() {
2197     return _current != NULL;
2198   }
2199   AdapterHandlerEntry* next() {
2200     if (_current != NULL) {
2201       AdapterHandlerEntry* result = _current;
2202       _current = _current->next();
2203       if (_current == NULL) scan();
2204       return result;
2205     } else {
2206       return NULL;
2207     }
2208   }
2209 };
2210 
2211 
2212 // ---------------------------------------------------------------------------
2213 // Implementation of AdapterHandlerLibrary
2214 AdapterHandlerTable* AdapterHandlerLibrary::_adapters = NULL;
2215 AdapterHandlerEntry* AdapterHandlerLibrary::_abstract_method_handler = NULL;
2216 const int AdapterHandlerLibrary_size = 16*K;
2217 BufferBlob* AdapterHandlerLibrary::_buffer = NULL;
2218 
2219 BufferBlob* AdapterHandlerLibrary::buffer_blob() {
2220   // Should be called only when AdapterHandlerLibrary_lock is active.
2221   if (_buffer == NULL) // Initialize lazily
2222       _buffer = BufferBlob::create("adapters", AdapterHandlerLibrary_size);
2223   return _buffer;
2224 }
2225 
2226 void AdapterHandlerLibrary::initialize() {
2227   if (_adapters != NULL) return;
2228   _adapters = new AdapterHandlerTable();
2229 
2230   // Create a special handler for abstract methods.  Abstract methods
2231   // are never compiled so an i2c entry is somewhat meaningless, but
2232   // fill it in with something appropriate just in case.  Pass handle
2233   // wrong method for the c2i transitions.
2234   address wrong_method = SharedRuntime::get_handle_wrong_method_stub();
2235   _abstract_method_handler = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(0, NULL),
2236                                                               StubRoutines::throw_AbstractMethodError_entry(),
2237                                                               wrong_method, wrong_method);
2238 }
2239 
2240 AdapterHandlerEntry* AdapterHandlerLibrary::new_entry(AdapterFingerPrint* fingerprint,
2241                                                       address i2c_entry,
2242                                                       address c2i_entry,
2243                                                       address c2i_unverified_entry) {
2244   return _adapters->new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
2245 }
2246 
2247 AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(methodHandle method) {
2248   // Use customized signature handler.  Need to lock around updates to
2249   // the AdapterHandlerTable (it is not safe for concurrent readers
2250   // and a single writer: this could be fixed if it becomes a
2251   // problem).
2252 
2253   // Get the address of the ic_miss handlers before we grab the
2254   // AdapterHandlerLibrary_lock. This fixes bug 6236259 which
2255   // was caused by the initialization of the stubs happening
2256   // while we held the lock and then notifying jvmti while
2257   // holding it. This just forces the initialization to be a little
2258   // earlier.
2259   address ic_miss = SharedRuntime::get_ic_miss_stub();
2260   assert(ic_miss != NULL, "must have handler");
2261 
2262   ResourceMark rm;
2263 
2264   NOT_PRODUCT(int insts_size);
2265   AdapterBlob* B = NULL;
2266   AdapterHandlerEntry* entry = NULL;
2267   AdapterFingerPrint* fingerprint = NULL;
2268   {
2269     MutexLocker mu(AdapterHandlerLibrary_lock);
2270     // make sure data structure is initialized
2271     initialize();
2272 
2273     if (method->is_abstract()) {
2274       return _abstract_method_handler;
2275     }
2276 
2277     // Fill in the signature array, for the calling-convention call.
2278     int total_args_passed = method->size_of_parameters(); // All args on stack
2279 
2280     BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
2281     VMRegPair* regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
2282     int i = 0;
2283     if (!method->is_static())  // Pass in receiver first
2284       sig_bt[i++] = T_OBJECT;
2285     for (SignatureStream ss(method->signature()); !ss.at_return_type(); ss.next()) {
2286       sig_bt[i++] = ss.type();  // Collect remaining bits of signature
2287       if (ss.type() == T_LONG || ss.type() == T_DOUBLE)
2288         sig_bt[i++] = T_VOID;   // Longs & doubles take 2 Java slots
2289     }
2290     assert(i == total_args_passed, "");
2291 
2292     // Lookup method signature's fingerprint
2293     entry = _adapters->lookup(total_args_passed, sig_bt);
2294 
2295 #ifdef ASSERT
2296     AdapterHandlerEntry* shared_entry = NULL;
2297     if (VerifyAdapterSharing && entry != NULL) {
2298       shared_entry = entry;
2299       entry = NULL;
2300     }
2301 #endif
2302 
2303     if (entry != NULL) {
2304       return entry;
2305     }
2306 
2307     // Get a description of the compiled java calling convention and the largest used (VMReg) stack slot usage
2308     int comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed, false);
2309 
2310     // Make a C heap allocated version of the fingerprint to store in the adapter
2311     fingerprint = new AdapterFingerPrint(total_args_passed, sig_bt);
2312 
2313     // Create I2C & C2I handlers
2314 
2315     BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
2316     if (buf != NULL) {
2317       CodeBuffer buffer(buf);
2318       short buffer_locs[20];
2319       buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
2320                                              sizeof(buffer_locs)/sizeof(relocInfo));
2321       MacroAssembler _masm(&buffer);
2322 
2323       entry = SharedRuntime::generate_i2c2i_adapters(&_masm,
2324                                                      total_args_passed,
2325                                                      comp_args_on_stack,
2326                                                      sig_bt,
2327                                                      regs,
2328                                                      fingerprint);
2329 
2330 #ifdef ASSERT
2331       if (VerifyAdapterSharing) {
2332         if (shared_entry != NULL) {
2333           assert(shared_entry->compare_code(buf->code_begin(), buffer.insts_size(), total_args_passed, sig_bt),
2334                  "code must match");
2335           // Release the one just created and return the original
2336           _adapters->free_entry(entry);
2337           return shared_entry;
2338         } else  {
2339           entry->save_code(buf->code_begin(), buffer.insts_size(), total_args_passed, sig_bt);
2340         }
2341       }
2342 #endif
2343 
2344       B = AdapterBlob::create(&buffer);
2345       NOT_PRODUCT(insts_size = buffer.insts_size());
2346     }
2347     if (B == NULL) {
2348       // CodeCache is full, disable compilation
2349       // Ought to log this but compile log is only per compile thread
2350       // and we're some non descript Java thread.
2351       MutexUnlocker mu(AdapterHandlerLibrary_lock);
2352       CompileBroker::handle_full_code_cache();
2353       return NULL; // Out of CodeCache space
2354     }
2355     entry->relocate(B->content_begin());
2356 #ifndef PRODUCT
2357     // debugging suppport
2358     if (PrintAdapterHandlers) {
2359       tty->cr();
2360       tty->print_cr("i2c argument handler #%d for: %s %s (fingerprint = %s, %d bytes generated)",
2361                     _adapters->number_of_entries(), (method->is_static() ? "static" : "receiver"),
2362                     method->signature()->as_C_string(), fingerprint->as_string(), insts_size );
2363       tty->print_cr("c2i argument handler starts at %p",entry->get_c2i_entry());
2364       Disassembler::decode(entry->get_i2c_entry(), entry->get_i2c_entry() + insts_size);
2365     }
2366 #endif
2367 
2368     _adapters->add(entry);
2369   }
2370   // Outside of the lock
2371   if (B != NULL) {
2372     char blob_id[256];
2373     jio_snprintf(blob_id,
2374                  sizeof(blob_id),
2375                  "%s(%s)@" PTR_FORMAT,
2376                  B->name(),
2377                  fingerprint->as_string(),
2378                  B->content_begin());
2379     Forte::register_stub(blob_id, B->content_begin(), B->content_end());
2380 
2381     if (JvmtiExport::should_post_dynamic_code_generated()) {
2382       JvmtiExport::post_dynamic_code_generated(blob_id, B->content_begin(), B->content_end());
2383     }
2384   }
2385   return entry;
2386 }
2387 
2388 void AdapterHandlerEntry::relocate(address new_base) {
2389     ptrdiff_t delta = new_base - _i2c_entry;
2390     _i2c_entry += delta;
2391     _c2i_entry += delta;
2392     _c2i_unverified_entry += delta;
2393 }
2394 
2395 
2396 void AdapterHandlerEntry::deallocate() {
2397   delete _fingerprint;
2398 #ifdef ASSERT
2399   if (_saved_code) FREE_C_HEAP_ARRAY(unsigned char, _saved_code);
2400   if (_saved_sig)  FREE_C_HEAP_ARRAY(Basictype, _saved_sig);
2401 #endif
2402 }
2403 
2404 
2405 #ifdef ASSERT
2406 // Capture the code before relocation so that it can be compared
2407 // against other versions.  If the code is captured after relocation
2408 // then relative instructions won't be equivalent.
2409 void AdapterHandlerEntry::save_code(unsigned char* buffer, int length, int total_args_passed, BasicType* sig_bt) {
2410   _saved_code = NEW_C_HEAP_ARRAY(unsigned char, length);
2411   _code_length = length;
2412   memcpy(_saved_code, buffer, length);
2413   _total_args_passed = total_args_passed;
2414   _saved_sig = NEW_C_HEAP_ARRAY(BasicType, _total_args_passed);
2415   memcpy(_saved_sig, sig_bt, _total_args_passed * sizeof(BasicType));
2416 }
2417 
2418 
2419 bool AdapterHandlerEntry::compare_code(unsigned char* buffer, int length, int total_args_passed, BasicType* sig_bt) {
2420   if (length != _code_length) {
2421     return false;
2422   }
2423   for (int i = 0; i < length; i++) {
2424     if (buffer[i] != _saved_code[i]) {
2425       return false;
2426     }
2427   }
2428   return true;
2429 }
2430 #endif
2431 
2432 
2433 // Create a native wrapper for this native method.  The wrapper converts the
2434 // java compiled calling convention to the native convention, handlizes
2435 // arguments, and transitions to native.  On return from the native we transition
2436 // back to java blocking if a safepoint is in progress.
2437 nmethod *AdapterHandlerLibrary::create_native_wrapper(methodHandle method) {
2438   ResourceMark rm;
2439   nmethod* nm = NULL;
2440 
2441   if (PrintCompilation) {
2442     ttyLocker ttyl;
2443     tty->print("---   n%s ", (method->is_synchronized() ? "s" : " "));
2444     method->print_short_name(tty);
2445     if (method->is_static()) {
2446       tty->print(" (static)");
2447     }
2448     tty->cr();
2449   }
2450 
2451   assert(method->has_native_function(), "must have something valid to call!");
2452 
2453   {
2454     // perform the work while holding the lock, but perform any printing outside the lock
2455     MutexLocker mu(AdapterHandlerLibrary_lock);
2456     // See if somebody beat us to it
2457     nm = method->code();
2458     if (nm) {
2459       return nm;
2460     }
2461 
2462     ResourceMark rm;
2463 
2464     BufferBlob*  buf = buffer_blob(); // the temporary code buffer in CodeCache
2465     if (buf != NULL) {
2466       CodeBuffer buffer(buf);
2467       double locs_buf[20];
2468       buffer.insts()->initialize_shared_locs((relocInfo*)locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
2469       MacroAssembler _masm(&buffer);
2470 
2471       // Fill in the signature array, for the calling-convention call.
2472       int total_args_passed = method->size_of_parameters();
2473 
2474       BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType,total_args_passed);
2475       VMRegPair*   regs = NEW_RESOURCE_ARRAY(VMRegPair,total_args_passed);
2476       int i=0;
2477       if( !method->is_static() )  // Pass in receiver first
2478         sig_bt[i++] = T_OBJECT;
2479       SignatureStream ss(method->signature());
2480       for( ; !ss.at_return_type(); ss.next()) {
2481         sig_bt[i++] = ss.type();  // Collect remaining bits of signature
2482         if( ss.type() == T_LONG || ss.type() == T_DOUBLE )
2483           sig_bt[i++] = T_VOID;   // Longs & doubles take 2 Java slots
2484       }
2485       assert( i==total_args_passed, "" );
2486       BasicType ret_type = ss.type();
2487 
2488       // Now get the compiled-Java layout as input arguments
2489       int comp_args_on_stack;
2490       comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed, false);
2491 
2492       // Generate the compiled-to-native wrapper code
2493       nm = SharedRuntime::generate_native_wrapper(&_masm,
2494                                                   method,
2495                                                   total_args_passed,
2496                                                   comp_args_on_stack,
2497                                                   sig_bt,regs,
2498                                                   ret_type);
2499     }
2500   }
2501 
2502   // Must unlock before calling set_code
2503 
2504   // Install the generated code.
2505   if (nm != NULL) {
2506     method->set_code(method, nm);
2507     nm->post_compiled_method_load_event();
2508   } else {
2509     // CodeCache is full, disable compilation
2510     CompileBroker::handle_full_code_cache();
2511   }
2512   return nm;
2513 }
2514 
2515 #ifdef HAVE_DTRACE_H
2516 // Create a dtrace nmethod for this method.  The wrapper converts the
2517 // java compiled calling convention to the native convention, makes a dummy call
2518 // (actually nops for the size of the call instruction, which become a trap if
2519 // probe is enabled). The returns to the caller. Since this all looks like a
2520 // leaf no thread transition is needed.
2521 
2522 nmethod *AdapterHandlerLibrary::create_dtrace_nmethod(methodHandle method) {
2523   ResourceMark rm;
2524   nmethod* nm = NULL;
2525 
2526   if (PrintCompilation) {
2527     ttyLocker ttyl;
2528     tty->print("---   n%s  ");
2529     method->print_short_name(tty);
2530     if (method->is_static()) {
2531       tty->print(" (static)");
2532     }
2533     tty->cr();
2534   }
2535 
2536   {
2537     // perform the work while holding the lock, but perform any printing
2538     // outside the lock
2539     MutexLocker mu(AdapterHandlerLibrary_lock);
2540     // See if somebody beat us to it
2541     nm = method->code();
2542     if (nm) {
2543       return nm;
2544     }
2545 
2546     ResourceMark rm;
2547 
2548     BufferBlob*  buf = buffer_blob(); // the temporary code buffer in CodeCache
2549     if (buf != NULL) {
2550       CodeBuffer buffer(buf);
2551       // Need a few relocation entries
2552       double locs_buf[20];
2553       buffer.insts()->initialize_shared_locs(
2554         (relocInfo*)locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
2555       MacroAssembler _masm(&buffer);
2556 
2557       // Generate the compiled-to-native wrapper code
2558       nm = SharedRuntime::generate_dtrace_nmethod(&_masm, method);
2559     }
2560   }
2561   return nm;
2562 }
2563 
2564 // the dtrace method needs to convert java lang string to utf8 string.
2565 void SharedRuntime::get_utf(oopDesc* src, address dst) {
2566   typeArrayOop jlsValue  = java_lang_String::value(src);
2567   int          jlsOffset = java_lang_String::offset(src);
2568   int          jlsLen    = java_lang_String::length(src);
2569   jchar*       jlsPos    = (jlsLen == 0) ? NULL :
2570                                            jlsValue->char_at_addr(jlsOffset);
2571   (void) UNICODE::as_utf8(jlsPos, jlsLen, (char *)dst, max_dtrace_string_size);
2572 }
2573 #endif // ndef HAVE_DTRACE_H
2574 
2575 // -------------------------------------------------------------------------
2576 // Java-Java calling convention
2577 // (what you use when Java calls Java)
2578 
2579 //------------------------------name_for_receiver----------------------------------
2580 // For a given signature, return the VMReg for parameter 0.
2581 VMReg SharedRuntime::name_for_receiver() {
2582   VMRegPair regs;
2583   BasicType sig_bt = T_OBJECT;
2584   (void) java_calling_convention(&sig_bt, &regs, 1, true);
2585   // Return argument 0 register.  In the LP64 build pointers
2586   // take 2 registers, but the VM wants only the 'main' name.
2587   return regs.first();
2588 }
2589 
2590 VMRegPair *SharedRuntime::find_callee_arguments(symbolOop sig, bool has_receiver, int* arg_size) {
2591   // This method is returning a data structure allocating as a
2592   // ResourceObject, so do not put any ResourceMarks in here.
2593   char *s = sig->as_C_string();
2594   int len = (int)strlen(s);
2595   *s++; len--;                  // Skip opening paren
2596   char *t = s+len;
2597   while( *(--t) != ')' ) ;      // Find close paren
2598 
2599   BasicType *sig_bt = NEW_RESOURCE_ARRAY( BasicType, 256 );
2600   VMRegPair *regs = NEW_RESOURCE_ARRAY( VMRegPair, 256 );
2601   int cnt = 0;
2602   if (has_receiver) {
2603     sig_bt[cnt++] = T_OBJECT; // Receiver is argument 0; not in signature
2604   }
2605 
2606   while( s < t ) {
2607     switch( *s++ ) {            // Switch on signature character
2608     case 'B': sig_bt[cnt++] = T_BYTE;    break;
2609     case 'C': sig_bt[cnt++] = T_CHAR;    break;
2610     case 'D': sig_bt[cnt++] = T_DOUBLE;  sig_bt[cnt++] = T_VOID; break;
2611     case 'F': sig_bt[cnt++] = T_FLOAT;   break;
2612     case 'I': sig_bt[cnt++] = T_INT;     break;
2613     case 'J': sig_bt[cnt++] = T_LONG;    sig_bt[cnt++] = T_VOID; break;
2614     case 'S': sig_bt[cnt++] = T_SHORT;   break;
2615     case 'Z': sig_bt[cnt++] = T_BOOLEAN; break;
2616     case 'V': sig_bt[cnt++] = T_VOID;    break;
2617     case 'L':                   // Oop
2618       while( *s++ != ';'  ) ;   // Skip signature
2619       sig_bt[cnt++] = T_OBJECT;
2620       break;
2621     case '[': {                 // Array
2622       do {                      // Skip optional size
2623         while( *s >= '0' && *s <= '9' ) s++;
2624       } while( *s++ == '[' );   // Nested arrays?
2625       // Skip element type
2626       if( s[-1] == 'L' )
2627         while( *s++ != ';'  ) ; // Skip signature
2628       sig_bt[cnt++] = T_ARRAY;
2629       break;
2630     }
2631     default : ShouldNotReachHere();
2632     }
2633   }
2634   assert( cnt < 256, "grow table size" );
2635 
2636   int comp_args_on_stack;
2637   comp_args_on_stack = java_calling_convention(sig_bt, regs, cnt, true);
2638 
2639   // the calling convention doesn't count out_preserve_stack_slots so
2640   // we must add that in to get "true" stack offsets.
2641 
2642   if (comp_args_on_stack) {
2643     for (int i = 0; i < cnt; i++) {
2644       VMReg reg1 = regs[i].first();
2645       if( reg1->is_stack()) {
2646         // Yuck
2647         reg1 = reg1->bias(out_preserve_stack_slots());
2648       }
2649       VMReg reg2 = regs[i].second();
2650       if( reg2->is_stack()) {
2651         // Yuck
2652         reg2 = reg2->bias(out_preserve_stack_slots());
2653       }
2654       regs[i].set_pair(reg2, reg1);
2655     }
2656   }
2657 
2658   // results
2659   *arg_size = cnt;
2660   return regs;
2661 }
2662 
2663 // OSR Migration Code
2664 //
2665 // This code is used convert interpreter frames into compiled frames.  It is
2666 // called from very start of a compiled OSR nmethod.  A temp array is
2667 // allocated to hold the interesting bits of the interpreter frame.  All
2668 // active locks are inflated to allow them to move.  The displaced headers and
2669 // active interpeter locals are copied into the temp buffer.  Then we return
2670 // back to the compiled code.  The compiled code then pops the current
2671 // interpreter frame off the stack and pushes a new compiled frame.  Then it
2672 // copies the interpreter locals and displaced headers where it wants.
2673 // Finally it calls back to free the temp buffer.
2674 //
2675 // All of this is done NOT at any Safepoint, nor is any safepoint or GC allowed.
2676 
2677 JRT_LEAF(intptr_t*, SharedRuntime::OSR_migration_begin( JavaThread *thread) )
2678 
2679 #ifdef IA64
2680   ShouldNotReachHere(); // NYI
2681 #endif /* IA64 */
2682 
2683   //
2684   // This code is dependent on the memory layout of the interpreter local
2685   // array and the monitors. On all of our platforms the layout is identical
2686   // so this code is shared. If some platform lays the their arrays out
2687   // differently then this code could move to platform specific code or
2688   // the code here could be modified to copy items one at a time using
2689   // frame accessor methods and be platform independent.
2690 
2691   frame fr = thread->last_frame();
2692   assert( fr.is_interpreted_frame(), "" );
2693   assert( fr.interpreter_frame_expression_stack_size()==0, "only handle empty stacks" );
2694 
2695   // Figure out how many monitors are active.
2696   int active_monitor_count = 0;
2697   for( BasicObjectLock *kptr = fr.interpreter_frame_monitor_end();
2698        kptr < fr.interpreter_frame_monitor_begin();
2699        kptr = fr.next_monitor_in_interpreter_frame(kptr) ) {
2700     if( kptr->obj() != NULL ) active_monitor_count++;
2701   }
2702 
2703   // QQQ we could place number of active monitors in the array so that compiled code
2704   // could double check it.
2705 
2706   methodOop moop = fr.interpreter_frame_method();
2707   int max_locals = moop->max_locals();
2708   // Allocate temp buffer, 1 word per local & 2 per active monitor
2709   int buf_size_words = max_locals + active_monitor_count*2;
2710   intptr_t *buf = NEW_C_HEAP_ARRAY(intptr_t,buf_size_words);
2711 
2712   // Copy the locals.  Order is preserved so that loading of longs works.
2713   // Since there's no GC I can copy the oops blindly.
2714   assert( sizeof(HeapWord)==sizeof(intptr_t), "fix this code");
2715   Copy::disjoint_words((HeapWord*)fr.interpreter_frame_local_at(max_locals-1),
2716                        (HeapWord*)&buf[0],
2717                        max_locals);
2718 
2719   // Inflate locks.  Copy the displaced headers.  Be careful, there can be holes.
2720   int i = max_locals;
2721   for( BasicObjectLock *kptr2 = fr.interpreter_frame_monitor_end();
2722        kptr2 < fr.interpreter_frame_monitor_begin();
2723        kptr2 = fr.next_monitor_in_interpreter_frame(kptr2) ) {
2724     if( kptr2->obj() != NULL) {         // Avoid 'holes' in the monitor array
2725       BasicLock *lock = kptr2->lock();
2726       // Inflate so the displaced header becomes position-independent
2727       if (lock->displaced_header()->is_unlocked())
2728         ObjectSynchronizer::inflate_helper(kptr2->obj());
2729       // Now the displaced header is free to move
2730       buf[i++] = (intptr_t)lock->displaced_header();
2731       buf[i++] = (intptr_t)kptr2->obj();
2732     }
2733   }
2734   assert( i - max_locals == active_monitor_count*2, "found the expected number of monitors" );
2735 
2736   return buf;
2737 JRT_END
2738 
2739 JRT_LEAF(void, SharedRuntime::OSR_migration_end( intptr_t* buf) )
2740   FREE_C_HEAP_ARRAY(intptr_t,buf);
2741 JRT_END
2742 
2743 bool AdapterHandlerLibrary::contains(CodeBlob* b) {
2744   AdapterHandlerTableIterator iter(_adapters);
2745   while (iter.has_next()) {
2746     AdapterHandlerEntry* a = iter.next();
2747     if ( b == CodeCache::find_blob(a->get_i2c_entry()) ) return true;
2748   }
2749   return false;
2750 }
2751 
2752 void AdapterHandlerLibrary::print_handler_on(outputStream* st, CodeBlob* b) {
2753   AdapterHandlerTableIterator iter(_adapters);
2754   while (iter.has_next()) {
2755     AdapterHandlerEntry* a = iter.next();
2756     if ( b == CodeCache::find_blob(a->get_i2c_entry()) ) {
2757       st->print("Adapter for signature: ");
2758       st->print_cr("%s i2c: " INTPTR_FORMAT " c2i: " INTPTR_FORMAT " c2iUV: " INTPTR_FORMAT,
2759                    a->fingerprint()->as_string(),
2760                    a->get_i2c_entry(), a->get_c2i_entry(), a->get_c2i_unverified_entry());
2761 
2762       return;
2763     }
2764   }
2765   assert(false, "Should have found handler");
2766 }
2767 
2768 #ifndef PRODUCT
2769 
2770 void AdapterHandlerLibrary::print_statistics() {
2771   _adapters->print_statistics();
2772 }
2773 
2774 #endif /* PRODUCT */