1 /*
   2  * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/stringTable.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "classfile/vmSymbols.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "code/compiledIC.hpp"
  31 #include "code/codeCacheExtensions.hpp"
  32 #include "code/scopeDesc.hpp"
  33 #include "code/vtableStubs.hpp"
  34 #include "compiler/abstractCompiler.hpp"
  35 #include "compiler/compileBroker.hpp"
  36 #include "compiler/disassembler.hpp"
  37 #include "gc/shared/gcLocker.inline.hpp"
  38 #include "interpreter/interpreter.hpp"
  39 #include "interpreter/interpreterRuntime.hpp"
  40 #include "logging/log.hpp"
  41 #include "memory/metaspaceShared.hpp"
  42 #include "memory/resourceArea.hpp"
  43 #include "memory/universe.inline.hpp"
  44 #include "oops/klass.hpp"
  45 #include "oops/objArrayKlass.hpp"
  46 #include "oops/oop.inline.hpp"
  47 #include "prims/forte.hpp"
  48 #include "prims/jvmtiExport.hpp"
  49 #include "prims/methodHandles.hpp"
  50 #include "prims/nativeLookup.hpp"
  51 #include "runtime/arguments.hpp"
  52 #include "runtime/atomic.hpp"
  53 #include "runtime/biasedLocking.hpp"
  54 #include "runtime/compilationPolicy.hpp"
  55 #include "runtime/handles.inline.hpp"
  56 #include "runtime/init.hpp"
  57 #include "runtime/interfaceSupport.hpp"
  58 #include "runtime/java.hpp"
  59 #include "runtime/javaCalls.hpp"
  60 #include "runtime/sharedRuntime.hpp"
  61 #include "runtime/stubRoutines.hpp"
  62 #include "runtime/vframe.hpp"
  63 #include "runtime/vframeArray.hpp"
  64 #include "trace/tracing.hpp"
  65 #include "utilities/copy.hpp"
  66 #include "utilities/dtrace.hpp"
  67 #include "utilities/events.hpp"
  68 #include "utilities/hashtable.inline.hpp"
  69 #include "utilities/macros.hpp"
  70 #include "utilities/xmlstream.hpp"
  71 #ifdef COMPILER1
  72 #include "c1/c1_Runtime1.hpp"
  73 #endif
  74 
  75 // Shared stub locations
  76 RuntimeStub*        SharedRuntime::_wrong_method_blob;
  77 RuntimeStub*        SharedRuntime::_wrong_method_abstract_blob;
  78 RuntimeStub*        SharedRuntime::_ic_miss_blob;
  79 RuntimeStub*        SharedRuntime::_resolve_opt_virtual_call_blob;
  80 RuntimeStub*        SharedRuntime::_resolve_virtual_call_blob;
  81 RuntimeStub*        SharedRuntime::_resolve_static_call_blob;
  82 
  83 DeoptimizationBlob* SharedRuntime::_deopt_blob;
  84 SafepointBlob*      SharedRuntime::_polling_page_vectors_safepoint_handler_blob;
  85 SafepointBlob*      SharedRuntime::_polling_page_safepoint_handler_blob;
  86 SafepointBlob*      SharedRuntime::_polling_page_return_handler_blob;
  87 
  88 #ifdef COMPILER2
  89 UncommonTrapBlob*   SharedRuntime::_uncommon_trap_blob;
  90 #endif // COMPILER2
  91 
  92 
  93 //----------------------------generate_stubs-----------------------------------
  94 void SharedRuntime::generate_stubs() {
  95   _wrong_method_blob                   = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method),          "wrong_method_stub");
  96   _wrong_method_abstract_blob          = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_abstract), "wrong_method_abstract_stub");
  97   _ic_miss_blob                        = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss),  "ic_miss_stub");
  98   _resolve_opt_virtual_call_blob       = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C),   "resolve_opt_virtual_call");
  99   _resolve_virtual_call_blob           = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C),       "resolve_virtual_call");
 100   _resolve_static_call_blob            = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C),        "resolve_static_call");
 101 
 102 #if defined(COMPILER2) || INCLUDE_JVMCI
 103   // Vectors are generated only by C2 and JVMCI.
 104   bool support_wide = is_wide_vector(MaxVectorSize);
 105   if (support_wide) {
 106     _polling_page_vectors_safepoint_handler_blob = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), POLL_AT_VECTOR_LOOP);
 107   }
 108 #endif // COMPILER2 || INCLUDE_JVMCI
 109   _polling_page_safepoint_handler_blob = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), POLL_AT_LOOP);
 110   _polling_page_return_handler_blob    = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), POLL_AT_RETURN);
 111 
 112   generate_deopt_blob();
 113 
 114 #ifdef COMPILER2
 115   generate_uncommon_trap_blob();
 116 #endif // COMPILER2
 117 }
 118 
 119 #include <math.h>
 120 
 121 // Implementation of SharedRuntime
 122 
 123 #ifndef PRODUCT
 124 // For statistics
 125 int SharedRuntime::_ic_miss_ctr = 0;
 126 int SharedRuntime::_wrong_method_ctr = 0;
 127 int SharedRuntime::_resolve_static_ctr = 0;
 128 int SharedRuntime::_resolve_virtual_ctr = 0;
 129 int SharedRuntime::_resolve_opt_virtual_ctr = 0;
 130 int SharedRuntime::_implicit_null_throws = 0;
 131 int SharedRuntime::_implicit_div0_throws = 0;
 132 int SharedRuntime::_throw_null_ctr = 0;
 133 
 134 int SharedRuntime::_nof_normal_calls = 0;
 135 int SharedRuntime::_nof_optimized_calls = 0;
 136 int SharedRuntime::_nof_inlined_calls = 0;
 137 int SharedRuntime::_nof_megamorphic_calls = 0;
 138 int SharedRuntime::_nof_static_calls = 0;
 139 int SharedRuntime::_nof_inlined_static_calls = 0;
 140 int SharedRuntime::_nof_interface_calls = 0;
 141 int SharedRuntime::_nof_optimized_interface_calls = 0;
 142 int SharedRuntime::_nof_inlined_interface_calls = 0;
 143 int SharedRuntime::_nof_megamorphic_interface_calls = 0;
 144 int SharedRuntime::_nof_removable_exceptions = 0;
 145 
 146 int SharedRuntime::_new_instance_ctr=0;
 147 int SharedRuntime::_new_array_ctr=0;
 148 int SharedRuntime::_multi1_ctr=0;
 149 int SharedRuntime::_multi2_ctr=0;
 150 int SharedRuntime::_multi3_ctr=0;
 151 int SharedRuntime::_multi4_ctr=0;
 152 int SharedRuntime::_multi5_ctr=0;
 153 int SharedRuntime::_mon_enter_stub_ctr=0;
 154 int SharedRuntime::_mon_exit_stub_ctr=0;
 155 int SharedRuntime::_mon_enter_ctr=0;
 156 int SharedRuntime::_mon_exit_ctr=0;
 157 int SharedRuntime::_partial_subtype_ctr=0;
 158 int SharedRuntime::_jbyte_array_copy_ctr=0;
 159 int SharedRuntime::_jshort_array_copy_ctr=0;
 160 int SharedRuntime::_jint_array_copy_ctr=0;
 161 int SharedRuntime::_jlong_array_copy_ctr=0;
 162 int SharedRuntime::_oop_array_copy_ctr=0;
 163 int SharedRuntime::_checkcast_array_copy_ctr=0;
 164 int SharedRuntime::_unsafe_array_copy_ctr=0;
 165 int SharedRuntime::_generic_array_copy_ctr=0;
 166 int SharedRuntime::_slow_array_copy_ctr=0;
 167 int SharedRuntime::_find_handler_ctr=0;
 168 int SharedRuntime::_rethrow_ctr=0;
 169 
 170 int     SharedRuntime::_ICmiss_index                    = 0;
 171 int     SharedRuntime::_ICmiss_count[SharedRuntime::maxICmiss_count];
 172 address SharedRuntime::_ICmiss_at[SharedRuntime::maxICmiss_count];
 173 
 174 
 175 void SharedRuntime::trace_ic_miss(address at) {
 176   for (int i = 0; i < _ICmiss_index; i++) {
 177     if (_ICmiss_at[i] == at) {
 178       _ICmiss_count[i]++;
 179       return;
 180     }
 181   }
 182   int index = _ICmiss_index++;
 183   if (_ICmiss_index >= maxICmiss_count) _ICmiss_index = maxICmiss_count - 1;
 184   _ICmiss_at[index] = at;
 185   _ICmiss_count[index] = 1;
 186 }
 187 
 188 void SharedRuntime::print_ic_miss_histogram() {
 189   if (ICMissHistogram) {
 190     tty->print_cr("IC Miss Histogram:");
 191     int tot_misses = 0;
 192     for (int i = 0; i < _ICmiss_index; i++) {
 193       tty->print_cr("  at: " INTPTR_FORMAT "  nof: %d", p2i(_ICmiss_at[i]), _ICmiss_count[i]);
 194       tot_misses += _ICmiss_count[i];
 195     }
 196     tty->print_cr("Total IC misses: %7d", tot_misses);
 197   }
 198 }
 199 #endif // PRODUCT
 200 
 201 #if INCLUDE_ALL_GCS
 202 
 203 // G1 write-barrier pre: executed before a pointer store.
 204 JRT_LEAF(void, SharedRuntime::g1_wb_pre(oopDesc* orig, JavaThread *thread))
 205   if (orig == NULL) {
 206     assert(false, "should be optimized out");
 207     return;
 208   }
 209   assert(orig->is_oop(true /* ignore mark word */), "Error");
 210   // store the original value that was in the field reference
 211   thread->satb_mark_queue().enqueue(orig);
 212 JRT_END
 213 
 214 // G1 write-barrier post: executed after a pointer store.
 215 JRT_LEAF(void, SharedRuntime::g1_wb_post(void* card_addr, JavaThread* thread))
 216   thread->dirty_card_queue().enqueue(card_addr);
 217 JRT_END
 218 
 219 #endif // INCLUDE_ALL_GCS
 220 
 221 
 222 JRT_LEAF(jlong, SharedRuntime::lmul(jlong y, jlong x))
 223   return x * y;
 224 JRT_END
 225 
 226 
 227 JRT_LEAF(jlong, SharedRuntime::ldiv(jlong y, jlong x))
 228   if (x == min_jlong && y == CONST64(-1)) {
 229     return x;
 230   } else {
 231     return x / y;
 232   }
 233 JRT_END
 234 
 235 
 236 JRT_LEAF(jlong, SharedRuntime::lrem(jlong y, jlong x))
 237   if (x == min_jlong && y == CONST64(-1)) {
 238     return 0;
 239   } else {
 240     return x % y;
 241   }
 242 JRT_END
 243 
 244 
 245 const juint  float_sign_mask  = 0x7FFFFFFF;
 246 const juint  float_infinity   = 0x7F800000;
 247 const julong double_sign_mask = CONST64(0x7FFFFFFFFFFFFFFF);
 248 const julong double_infinity  = CONST64(0x7FF0000000000000);
 249 
 250 JRT_LEAF(jfloat, SharedRuntime::frem(jfloat  x, jfloat  y))
 251 #ifdef _WIN64
 252   // 64-bit Windows on amd64 returns the wrong values for
 253   // infinity operands.
 254   union { jfloat f; juint i; } xbits, ybits;
 255   xbits.f = x;
 256   ybits.f = y;
 257   // x Mod Infinity == x unless x is infinity
 258   if (((xbits.i & float_sign_mask) != float_infinity) &&
 259        ((ybits.i & float_sign_mask) == float_infinity) ) {
 260     return x;
 261   }
 262   return ((jfloat)fmod_winx64((double)x, (double)y));
 263 #else
 264   return ((jfloat)fmod((double)x,(double)y));
 265 #endif
 266 JRT_END
 267 
 268 
 269 JRT_LEAF(jdouble, SharedRuntime::drem(jdouble x, jdouble y))
 270 #ifdef _WIN64
 271   union { jdouble d; julong l; } xbits, ybits;
 272   xbits.d = x;
 273   ybits.d = y;
 274   // x Mod Infinity == x unless x is infinity
 275   if (((xbits.l & double_sign_mask) != double_infinity) &&
 276        ((ybits.l & double_sign_mask) == double_infinity) ) {
 277     return x;
 278   }
 279   return ((jdouble)fmod_winx64((double)x, (double)y));
 280 #else
 281   return ((jdouble)fmod((double)x,(double)y));
 282 #endif
 283 JRT_END
 284 
 285 #ifdef __SOFTFP__
 286 JRT_LEAF(jfloat, SharedRuntime::fadd(jfloat x, jfloat y))
 287   return x + y;
 288 JRT_END
 289 
 290 JRT_LEAF(jfloat, SharedRuntime::fsub(jfloat x, jfloat y))
 291   return x - y;
 292 JRT_END
 293 
 294 JRT_LEAF(jfloat, SharedRuntime::fmul(jfloat x, jfloat y))
 295   return x * y;
 296 JRT_END
 297 
 298 JRT_LEAF(jfloat, SharedRuntime::fdiv(jfloat x, jfloat y))
 299   return x / y;
 300 JRT_END
 301 
 302 JRT_LEAF(jdouble, SharedRuntime::dadd(jdouble x, jdouble y))
 303   return x + y;
 304 JRT_END
 305 
 306 JRT_LEAF(jdouble, SharedRuntime::dsub(jdouble x, jdouble y))
 307   return x - y;
 308 JRT_END
 309 
 310 JRT_LEAF(jdouble, SharedRuntime::dmul(jdouble x, jdouble y))
 311   return x * y;
 312 JRT_END
 313 
 314 JRT_LEAF(jdouble, SharedRuntime::ddiv(jdouble x, jdouble y))
 315   return x / y;
 316 JRT_END
 317 
 318 JRT_LEAF(jfloat, SharedRuntime::i2f(jint x))
 319   return (jfloat)x;
 320 JRT_END
 321 
 322 JRT_LEAF(jdouble, SharedRuntime::i2d(jint x))
 323   return (jdouble)x;
 324 JRT_END
 325 
 326 JRT_LEAF(jdouble, SharedRuntime::f2d(jfloat x))
 327   return (jdouble)x;
 328 JRT_END
 329 
 330 JRT_LEAF(int,  SharedRuntime::fcmpl(float x, float y))
 331   return x>y ? 1 : (x==y ? 0 : -1);  /* x<y or is_nan*/
 332 JRT_END
 333 
 334 JRT_LEAF(int,  SharedRuntime::fcmpg(float x, float y))
 335   return x<y ? -1 : (x==y ? 0 : 1);  /* x>y or is_nan */
 336 JRT_END
 337 
 338 JRT_LEAF(int,  SharedRuntime::dcmpl(double x, double y))
 339   return x>y ? 1 : (x==y ? 0 : -1); /* x<y or is_nan */
 340 JRT_END
 341 
 342 JRT_LEAF(int,  SharedRuntime::dcmpg(double x, double y))
 343   return x<y ? -1 : (x==y ? 0 : 1);  /* x>y or is_nan */
 344 JRT_END
 345 
 346 // Functions to return the opposite of the aeabi functions for nan.
 347 JRT_LEAF(int, SharedRuntime::unordered_fcmplt(float x, float y))
 348   return (x < y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
 349 JRT_END
 350 
 351 JRT_LEAF(int, SharedRuntime::unordered_dcmplt(double x, double y))
 352   return (x < y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
 353 JRT_END
 354 
 355 JRT_LEAF(int, SharedRuntime::unordered_fcmple(float x, float y))
 356   return (x <= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
 357 JRT_END
 358 
 359 JRT_LEAF(int, SharedRuntime::unordered_dcmple(double x, double y))
 360   return (x <= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
 361 JRT_END
 362 
 363 JRT_LEAF(int, SharedRuntime::unordered_fcmpge(float x, float y))
 364   return (x >= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
 365 JRT_END
 366 
 367 JRT_LEAF(int, SharedRuntime::unordered_dcmpge(double x, double y))
 368   return (x >= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
 369 JRT_END
 370 
 371 JRT_LEAF(int, SharedRuntime::unordered_fcmpgt(float x, float y))
 372   return (x > y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
 373 JRT_END
 374 
 375 JRT_LEAF(int, SharedRuntime::unordered_dcmpgt(double x, double y))
 376   return (x > y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
 377 JRT_END
 378 
 379 // Intrinsics make gcc generate code for these.
 380 float  SharedRuntime::fneg(float f)   {
 381   return -f;
 382 }
 383 
 384 double SharedRuntime::dneg(double f)  {
 385   return -f;
 386 }
 387 
 388 #endif // __SOFTFP__
 389 
 390 #if defined(__SOFTFP__) || defined(E500V2)
 391 // Intrinsics make gcc generate code for these.
 392 double SharedRuntime::dabs(double f)  {
 393   return (f <= (double)0.0) ? (double)0.0 - f : f;
 394 }
 395 
 396 #endif
 397 
 398 #if defined(__SOFTFP__) || defined(PPC)
 399 double SharedRuntime::dsqrt(double f) {
 400   return sqrt(f);
 401 }
 402 #endif
 403 
 404 JRT_LEAF(jint, SharedRuntime::f2i(jfloat  x))
 405   if (g_isnan(x))
 406     return 0;
 407   if (x >= (jfloat) max_jint)
 408     return max_jint;
 409   if (x <= (jfloat) min_jint)
 410     return min_jint;
 411   return (jint) x;
 412 JRT_END
 413 
 414 
 415 JRT_LEAF(jlong, SharedRuntime::f2l(jfloat  x))
 416   if (g_isnan(x))
 417     return 0;
 418   if (x >= (jfloat) max_jlong)
 419     return max_jlong;
 420   if (x <= (jfloat) min_jlong)
 421     return min_jlong;
 422   return (jlong) x;
 423 JRT_END
 424 
 425 
 426 JRT_LEAF(jint, SharedRuntime::d2i(jdouble x))
 427   if (g_isnan(x))
 428     return 0;
 429   if (x >= (jdouble) max_jint)
 430     return max_jint;
 431   if (x <= (jdouble) min_jint)
 432     return min_jint;
 433   return (jint) x;
 434 JRT_END
 435 
 436 
 437 JRT_LEAF(jlong, SharedRuntime::d2l(jdouble x))
 438   if (g_isnan(x))
 439     return 0;
 440   if (x >= (jdouble) max_jlong)
 441     return max_jlong;
 442   if (x <= (jdouble) min_jlong)
 443     return min_jlong;
 444   return (jlong) x;
 445 JRT_END
 446 
 447 
 448 JRT_LEAF(jfloat, SharedRuntime::d2f(jdouble x))
 449   return (jfloat)x;
 450 JRT_END
 451 
 452 
 453 JRT_LEAF(jfloat, SharedRuntime::l2f(jlong x))
 454   return (jfloat)x;
 455 JRT_END
 456 
 457 
 458 JRT_LEAF(jdouble, SharedRuntime::l2d(jlong x))
 459   return (jdouble)x;
 460 JRT_END
 461 
 462 // Exception handling across interpreter/compiler boundaries
 463 //
 464 // exception_handler_for_return_address(...) returns the continuation address.
 465 // The continuation address is the entry point of the exception handler of the
 466 // previous frame depending on the return address.
 467 
 468 address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* thread, address return_address) {
 469   assert(frame::verify_return_pc(return_address), "must be a return address: " INTPTR_FORMAT, p2i(return_address));
 470   assert(thread->frames_to_pop_failed_realloc() == 0 || Interpreter::contains(return_address), "missed frames to pop?");
 471 
 472   // Reset method handle flag.
 473   thread->set_is_method_handle_return(false);
 474 
 475 #if INCLUDE_JVMCI
 476   // JVMCI's ExceptionHandlerStub expects the thread local exception PC to be clear
 477   // and other exception handler continuations do not read it
 478   thread->set_exception_pc(NULL);
 479 #endif
 480 
 481   // The fastest case first
 482   CodeBlob* blob = CodeCache::find_blob(return_address);
 483   nmethod* nm = (blob != NULL) ? blob->as_nmethod_or_null() : NULL;
 484   if (nm != NULL) {
 485     // Set flag if return address is a method handle call site.
 486     thread->set_is_method_handle_return(nm->is_method_handle_return(return_address));
 487     // native nmethods don't have exception handlers
 488     assert(!nm->is_native_method(), "no exception handler");
 489     assert(nm->header_begin() != nm->exception_begin(), "no exception handler");
 490     if (nm->is_deopt_pc(return_address)) {
 491       // If we come here because of a stack overflow, the stack may be
 492       // unguarded. Reguard the stack otherwise if we return to the
 493       // deopt blob and the stack bang causes a stack overflow we
 494       // crash.
 495       bool guard_pages_enabled = thread->stack_guards_enabled();
 496       if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack();
 497       if (thread->reserved_stack_activation() != thread->stack_base()) {
 498         thread->set_reserved_stack_activation(thread->stack_base());
 499       }
 500       assert(guard_pages_enabled, "stack banging in deopt blob may cause crash");
 501       return SharedRuntime::deopt_blob()->unpack_with_exception();
 502     } else {
 503       return nm->exception_begin();
 504     }
 505   }
 506 
 507   // Entry code
 508   if (StubRoutines::returns_to_call_stub(return_address)) {
 509     return StubRoutines::catch_exception_entry();
 510   }
 511   // Interpreted code
 512   if (Interpreter::contains(return_address)) {
 513     return Interpreter::rethrow_exception_entry();
 514   }
 515 
 516   guarantee(blob == NULL || !blob->is_runtime_stub(), "caller should have skipped stub");
 517   guarantee(!VtableStubs::contains(return_address), "NULL exceptions in vtables should have been handled already!");
 518 
 519 #ifndef PRODUCT
 520   { ResourceMark rm;
 521     tty->print_cr("No exception handler found for exception at " INTPTR_FORMAT " - potential problems:", p2i(return_address));
 522     tty->print_cr("a) exception happened in (new?) code stubs/buffers that is not handled here");
 523     tty->print_cr("b) other problem");
 524   }
 525 #endif // PRODUCT
 526 
 527   ShouldNotReachHere();
 528   return NULL;
 529 }
 530 
 531 
 532 JRT_LEAF(address, SharedRuntime::exception_handler_for_return_address(JavaThread* thread, address return_address))
 533   return raw_exception_handler_for_return_address(thread, return_address);
 534 JRT_END
 535 
 536 
 537 address SharedRuntime::get_poll_stub(address pc) {
 538   address stub;
 539   // Look up the code blob
 540   CodeBlob *cb = CodeCache::find_blob(pc);
 541 
 542   // Should be an nmethod
 543   assert(cb && cb->is_compiled(), "safepoint polling: pc must refer to an nmethod");
 544 
 545   // Look up the relocation information
 546   assert(((CompiledMethod*)cb)->is_at_poll_or_poll_return(pc),
 547     "safepoint polling: type must be poll");
 548 
 549 #ifdef ASSERT
 550   if (!((NativeInstruction*)pc)->is_safepoint_poll()) {
 551     tty->print_cr("bad pc: " PTR_FORMAT, p2i(pc));
 552     Disassembler::decode(cb);
 553     fatal("Only polling locations are used for safepoint");
 554   }
 555 #endif
 556 
 557   bool at_poll_return = ((CompiledMethod*)cb)->is_at_poll_return(pc);
 558   bool has_wide_vectors = ((CompiledMethod*)cb)->has_wide_vectors();
 559   if (at_poll_return) {
 560     assert(SharedRuntime::polling_page_return_handler_blob() != NULL,
 561            "polling page return stub not created yet");
 562     stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
 563   } else if (has_wide_vectors) {
 564     assert(SharedRuntime::polling_page_vectors_safepoint_handler_blob() != NULL,
 565            "polling page vectors safepoint stub not created yet");
 566     stub = SharedRuntime::polling_page_vectors_safepoint_handler_blob()->entry_point();
 567   } else {
 568     assert(SharedRuntime::polling_page_safepoint_handler_blob() != NULL,
 569            "polling page safepoint stub not created yet");
 570     stub = SharedRuntime::polling_page_safepoint_handler_blob()->entry_point();
 571   }
 572   log_debug(safepoint)("... found polling page %s exception at pc = "
 573                        INTPTR_FORMAT ", stub =" INTPTR_FORMAT,
 574                        at_poll_return ? "return" : "loop",
 575                        (intptr_t)pc, (intptr_t)stub);
 576   return stub;
 577 }
 578 
 579 
 580 oop SharedRuntime::retrieve_receiver( Symbol* sig, frame caller ) {
 581   assert(caller.is_interpreted_frame(), "");
 582   int args_size = ArgumentSizeComputer(sig).size() + 1;
 583   assert(args_size <= caller.interpreter_frame_expression_stack_size(), "receiver must be on interpreter stack");
 584   oop result = cast_to_oop(*caller.interpreter_frame_tos_at(args_size - 1));
 585   assert(Universe::heap()->is_in(result) && result->is_oop(), "receiver must be an oop");
 586   return result;
 587 }
 588 
 589 
 590 void SharedRuntime::throw_and_post_jvmti_exception(JavaThread *thread, Handle h_exception) {
 591   if (JvmtiExport::can_post_on_exceptions()) {
 592     vframeStream vfst(thread, true);
 593     methodHandle method = methodHandle(thread, vfst.method());
 594     address bcp = method()->bcp_from(vfst.bci());
 595     JvmtiExport::post_exception_throw(thread, method(), bcp, h_exception());
 596   }
 597   Exceptions::_throw(thread, __FILE__, __LINE__, h_exception);
 598 }
 599 
 600 void SharedRuntime::throw_and_post_jvmti_exception(JavaThread *thread, Symbol* name, const char *message) {
 601   Handle h_exception = Exceptions::new_exception(thread, name, message);
 602   throw_and_post_jvmti_exception(thread, h_exception);
 603 }
 604 
 605 // The interpreter code to call this tracing function is only
 606 // called/generated when UL is on for redefine, class and has the right level
 607 // and tags. Since obsolete methods are never compiled, we don't have
 608 // to modify the compilers to generate calls to this function.
 609 //
 610 JRT_LEAF(int, SharedRuntime::rc_trace_method_entry(
 611     JavaThread* thread, Method* method))
 612   if (method->is_obsolete()) {
 613     // We are calling an obsolete method, but this is not necessarily
 614     // an error. Our method could have been redefined just after we
 615     // fetched the Method* from the constant pool.
 616     ResourceMark rm;
 617     log_trace(redefine, class, obsolete)("calling obsolete method '%s'", method->name_and_sig_as_C_string());
 618   }
 619   return 0;
 620 JRT_END
 621 
 622 // ret_pc points into caller; we are returning caller's exception handler
 623 // for given exception
 624 address SharedRuntime::compute_compiled_exc_handler(CompiledMethod* cm, address ret_pc, Handle& exception,
 625                                                     bool force_unwind, bool top_frame_only, bool& recursive_exception_occurred) {
 626   assert(cm != NULL, "must exist");
 627   ResourceMark rm;
 628 
 629 #if INCLUDE_JVMCI
 630   if (cm->is_compiled_by_jvmci()) {
 631     // lookup exception handler for this pc
 632     int catch_pco = ret_pc - cm->code_begin();
 633     ExceptionHandlerTable table(cm);
 634     HandlerTableEntry *t = table.entry_for(catch_pco, -1, 0);
 635     if (t != NULL) {
 636       return cm->code_begin() + t->pco();
 637     } else {
 638       // there is no exception handler for this pc => deoptimize
 639       cm->make_not_entrant();
 640 
 641       // Use Deoptimization::deoptimize for all of its side-effects:
 642       // revoking biases of monitors, gathering traps statistics, logging...
 643       // it also patches the return pc but we do not care about that
 644       // since we return a continuation to the deopt_blob below.
 645       JavaThread* thread = JavaThread::current();
 646       RegisterMap reg_map(thread, UseBiasedLocking);
 647       frame runtime_frame = thread->last_frame();
 648       frame caller_frame = runtime_frame.sender(&reg_map);
 649       Deoptimization::deoptimize(thread, caller_frame, &reg_map, Deoptimization::Reason_not_compiled_exception_handler);
 650 
 651       return SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
 652     }
 653   }
 654 #endif // INCLUDE_JVMCI
 655 
 656   nmethod* nm = cm->as_nmethod();
 657   ScopeDesc* sd = nm->scope_desc_at(ret_pc);
 658   // determine handler bci, if any
 659   EXCEPTION_MARK;
 660 
 661   int handler_bci = -1;
 662   int scope_depth = 0;
 663   if (!force_unwind) {
 664     int bci = sd->bci();
 665     bool recursive_exception = false;
 666     do {
 667       bool skip_scope_increment = false;
 668       // exception handler lookup
 669       KlassHandle ek (THREAD, exception->klass());
 670       methodHandle mh(THREAD, sd->method());
 671       handler_bci = Method::fast_exception_handler_bci_for(mh, ek, bci, THREAD);
 672       if (HAS_PENDING_EXCEPTION) {
 673         recursive_exception = true;
 674         // We threw an exception while trying to find the exception handler.
 675         // Transfer the new exception to the exception handle which will
 676         // be set into thread local storage, and do another lookup for an
 677         // exception handler for this exception, this time starting at the
 678         // BCI of the exception handler which caused the exception to be
 679         // thrown (bugs 4307310 and 4546590). Set "exception" reference
 680         // argument to ensure that the correct exception is thrown (4870175).
 681         recursive_exception_occurred = true;
 682         exception = Handle(THREAD, PENDING_EXCEPTION);
 683         CLEAR_PENDING_EXCEPTION;
 684         if (handler_bci >= 0) {
 685           bci = handler_bci;
 686           handler_bci = -1;
 687           skip_scope_increment = true;
 688         }
 689       }
 690       else {
 691         recursive_exception = false;
 692       }
 693       if (!top_frame_only && handler_bci < 0 && !skip_scope_increment) {
 694         sd = sd->sender();
 695         if (sd != NULL) {
 696           bci = sd->bci();
 697         }
 698         ++scope_depth;
 699       }
 700     } while (recursive_exception || (!top_frame_only && handler_bci < 0 && sd != NULL));
 701   }
 702 
 703   // found handling method => lookup exception handler
 704   int catch_pco = ret_pc - nm->code_begin();
 705 
 706   ExceptionHandlerTable table(nm);
 707   HandlerTableEntry *t = table.entry_for(catch_pco, handler_bci, scope_depth);
 708   if (t == NULL && (nm->is_compiled_by_c1() || handler_bci != -1)) {
 709     // Allow abbreviated catch tables.  The idea is to allow a method
 710     // to materialize its exceptions without committing to the exact
 711     // routing of exceptions.  In particular this is needed for adding
 712     // a synthetic handler to unlock monitors when inlining
 713     // synchronized methods since the unlock path isn't represented in
 714     // the bytecodes.
 715     t = table.entry_for(catch_pco, -1, 0);
 716   }
 717 
 718 #ifdef COMPILER1
 719   if (t == NULL && nm->is_compiled_by_c1()) {
 720     assert(nm->unwind_handler_begin() != NULL, "");
 721     return nm->unwind_handler_begin();
 722   }
 723 #endif
 724 
 725   if (t == NULL) {
 726     ttyLocker ttyl;
 727     tty->print_cr("MISSING EXCEPTION HANDLER for pc " INTPTR_FORMAT " and handler bci %d", p2i(ret_pc), handler_bci);
 728     tty->print_cr("   Exception:");
 729     exception->print();
 730     tty->cr();
 731     tty->print_cr(" Compiled exception table :");
 732     table.print();
 733     nm->print_code();
 734     guarantee(false, "missing exception handler");
 735     return NULL;
 736   }
 737 
 738   return nm->code_begin() + t->pco();
 739 }
 740 
 741 JRT_ENTRY(void, SharedRuntime::throw_AbstractMethodError(JavaThread* thread))
 742   // These errors occur only at call sites
 743   throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_AbstractMethodError());
 744 JRT_END
 745 
 746 JRT_ENTRY(void, SharedRuntime::throw_IncompatibleClassChangeError(JavaThread* thread))
 747   // These errors occur only at call sites
 748   throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_IncompatibleClassChangeError(), "vtable stub");
 749 JRT_END
 750 
 751 JRT_ENTRY(void, SharedRuntime::throw_ArithmeticException(JavaThread* thread))
 752   throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_ArithmeticException(), "/ by zero");
 753 JRT_END
 754 
 755 JRT_ENTRY(void, SharedRuntime::throw_NullPointerException(JavaThread* thread))
 756   throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_NullPointerException());
 757 JRT_END
 758 
 759 JRT_ENTRY(void, SharedRuntime::throw_NullPointerException_at_call(JavaThread* thread))
 760   // This entry point is effectively only used for NullPointerExceptions which occur at inline
 761   // cache sites (when the callee activation is not yet set up) so we are at a call site
 762   throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_NullPointerException());
 763 JRT_END
 764 
 765 JRT_ENTRY(void, SharedRuntime::throw_StackOverflowError(JavaThread* thread))
 766   throw_StackOverflowError_common(thread, false);
 767 JRT_END
 768 
 769 JRT_ENTRY(void, SharedRuntime::throw_delayed_StackOverflowError(JavaThread* thread))
 770   throw_StackOverflowError_common(thread, true);
 771 JRT_END
 772 
 773 void SharedRuntime::throw_StackOverflowError_common(JavaThread* thread, bool delayed) {
 774   // We avoid using the normal exception construction in this case because
 775   // it performs an upcall to Java, and we're already out of stack space.
 776   Thread* THREAD = thread;
 777   Klass* k = SystemDictionary::StackOverflowError_klass();
 778   oop exception_oop = InstanceKlass::cast(k)->allocate_instance(CHECK);
 779   if (delayed) {
 780     java_lang_Throwable::set_message(exception_oop,
 781                                      Universe::delayed_stack_overflow_error_message());
 782   }
 783   Handle exception (thread, exception_oop);
 784   if (StackTraceInThrowable) {
 785     java_lang_Throwable::fill_in_stack_trace(exception);
 786   }
 787   // Increment counter for hs_err file reporting
 788   Atomic::inc(&Exceptions::_stack_overflow_errors);
 789   throw_and_post_jvmti_exception(thread, exception);
 790 }
 791 
 792 #if INCLUDE_JVMCI
 793 address SharedRuntime::deoptimize_for_implicit_exception(JavaThread* thread, address pc, CompiledMethod* nm, int deopt_reason) {
 794   assert(deopt_reason > Deoptimization::Reason_none && deopt_reason < Deoptimization::Reason_LIMIT, "invalid deopt reason");
 795   thread->set_jvmci_implicit_exception_pc(pc);
 796   thread->set_pending_deoptimization(Deoptimization::make_trap_request((Deoptimization::DeoptReason)deopt_reason, Deoptimization::Action_reinterpret));
 797   return (SharedRuntime::deopt_blob()->implicit_exception_uncommon_trap());
 798 }
 799 #endif // INCLUDE_JVMCI
 800 
 801 address SharedRuntime::continuation_for_implicit_exception(JavaThread* thread,
 802                                                            address pc,
 803                                                            SharedRuntime::ImplicitExceptionKind exception_kind)
 804 {
 805   address target_pc = NULL;
 806 
 807   if (Interpreter::contains(pc)) {
 808 #ifdef CC_INTERP
 809     // C++ interpreter doesn't throw implicit exceptions
 810     ShouldNotReachHere();
 811 #else
 812     switch (exception_kind) {
 813       case IMPLICIT_NULL:           return Interpreter::throw_NullPointerException_entry();
 814       case IMPLICIT_DIVIDE_BY_ZERO: return Interpreter::throw_ArithmeticException_entry();
 815       case STACK_OVERFLOW:          return Interpreter::throw_StackOverflowError_entry();
 816       default:                      ShouldNotReachHere();
 817     }
 818 #endif // !CC_INTERP
 819   } else {
 820     switch (exception_kind) {
 821       case STACK_OVERFLOW: {
 822         // Stack overflow only occurs upon frame setup; the callee is
 823         // going to be unwound. Dispatch to a shared runtime stub
 824         // which will cause the StackOverflowError to be fabricated
 825         // and processed.
 826         // Stack overflow should never occur during deoptimization:
 827         // the compiled method bangs the stack by as much as the
 828         // interpreter would need in case of a deoptimization. The
 829         // deoptimization blob and uncommon trap blob bang the stack
 830         // in a debug VM to verify the correctness of the compiled
 831         // method stack banging.
 832         assert(thread->deopt_mark() == NULL, "no stack overflow from deopt blob/uncommon trap");
 833         Events::log_exception(thread, "StackOverflowError at " INTPTR_FORMAT, p2i(pc));
 834         return StubRoutines::throw_StackOverflowError_entry();
 835       }
 836 
 837       case IMPLICIT_NULL: {
 838         if (VtableStubs::contains(pc)) {
 839           // We haven't yet entered the callee frame. Fabricate an
 840           // exception and begin dispatching it in the caller. Since
 841           // the caller was at a call site, it's safe to destroy all
 842           // caller-saved registers, as these entry points do.
 843           VtableStub* vt_stub = VtableStubs::stub_containing(pc);
 844 
 845           // If vt_stub is NULL, then return NULL to signal handler to report the SEGV error.
 846           if (vt_stub == NULL) return NULL;
 847 
 848           if (vt_stub->is_abstract_method_error(pc)) {
 849             assert(!vt_stub->is_vtable_stub(), "should never see AbstractMethodErrors from vtable-type VtableStubs");
 850             Events::log_exception(thread, "AbstractMethodError at " INTPTR_FORMAT, p2i(pc));
 851             return StubRoutines::throw_AbstractMethodError_entry();
 852           } else {
 853             Events::log_exception(thread, "NullPointerException at vtable entry " INTPTR_FORMAT, p2i(pc));
 854             return StubRoutines::throw_NullPointerException_at_call_entry();
 855           }
 856         } else {
 857           CodeBlob* cb = CodeCache::find_blob(pc);
 858 
 859           // If code blob is NULL, then return NULL to signal handler to report the SEGV error.
 860           if (cb == NULL) return NULL;
 861 
 862           // Exception happened in CodeCache. Must be either:
 863           // 1. Inline-cache check in C2I handler blob,
 864           // 2. Inline-cache check in nmethod, or
 865           // 3. Implicit null exception in nmethod
 866 
 867           if (!cb->is_compiled()) {
 868             bool is_in_blob = cb->is_adapter_blob() || cb->is_method_handles_adapter_blob();
 869             if (!is_in_blob) {
 870               // Allow normal crash reporting to handle this
 871               return NULL;
 872             }
 873             Events::log_exception(thread, "NullPointerException in code blob at " INTPTR_FORMAT, p2i(pc));
 874             // There is no handler here, so we will simply unwind.
 875             return StubRoutines::throw_NullPointerException_at_call_entry();
 876           }
 877 
 878           // Otherwise, it's a compiled method.  Consult its exception handlers.
 879           CompiledMethod* cm = (CompiledMethod*)cb;
 880           if (cm->inlinecache_check_contains(pc)) {
 881             // exception happened inside inline-cache check code
 882             // => the nmethod is not yet active (i.e., the frame
 883             // is not set up yet) => use return address pushed by
 884             // caller => don't push another return address
 885             Events::log_exception(thread, "NullPointerException in IC check " INTPTR_FORMAT, p2i(pc));
 886             return StubRoutines::throw_NullPointerException_at_call_entry();
 887           }
 888 
 889           if (cm->method()->is_method_handle_intrinsic()) {
 890             // exception happened inside MH dispatch code, similar to a vtable stub
 891             Events::log_exception(thread, "NullPointerException in MH adapter " INTPTR_FORMAT, p2i(pc));
 892             return StubRoutines::throw_NullPointerException_at_call_entry();
 893           }
 894 
 895 #ifndef PRODUCT
 896           _implicit_null_throws++;
 897 #endif
 898 #if INCLUDE_JVMCI
 899           if (cm->is_compiled_by_jvmci() && cm->pc_desc_at(pc) != NULL) {
 900             // If there's no PcDesc then we'll die way down inside of
 901             // deopt instead of just getting normal error reporting,
 902             // so only go there if it will succeed.
 903             return deoptimize_for_implicit_exception(thread, pc, cm, Deoptimization::Reason_null_check);
 904           } else {
 905 #endif // INCLUDE_JVMCI
 906           assert (cm->is_nmethod(), "Expect nmethod");
 907           target_pc = ((nmethod*)cm)->continuation_for_implicit_exception(pc);
 908 #if INCLUDE_JVMCI
 909           }
 910 #endif // INCLUDE_JVMCI
 911           // If there's an unexpected fault, target_pc might be NULL,
 912           // in which case we want to fall through into the normal
 913           // error handling code.
 914         }
 915 
 916         break; // fall through
 917       }
 918 
 919 
 920       case IMPLICIT_DIVIDE_BY_ZERO: {
 921         CompiledMethod* cm = CodeCache::find_compiled(pc);
 922         guarantee(cm != NULL, "must have containing compiled method for implicit division-by-zero exceptions");
 923 #ifndef PRODUCT
 924         _implicit_div0_throws++;
 925 #endif
 926 #if INCLUDE_JVMCI
 927         if (cm->is_compiled_by_jvmci() && cm->pc_desc_at(pc) != NULL) {
 928           return deoptimize_for_implicit_exception(thread, pc, cm, Deoptimization::Reason_div0_check);
 929         } else {
 930 #endif // INCLUDE_JVMCI
 931         target_pc = cm->continuation_for_implicit_exception(pc);
 932 #if INCLUDE_JVMCI
 933         }
 934 #endif // INCLUDE_JVMCI
 935         // If there's an unexpected fault, target_pc might be NULL,
 936         // in which case we want to fall through into the normal
 937         // error handling code.
 938         break; // fall through
 939       }
 940 
 941       default: ShouldNotReachHere();
 942     }
 943 
 944     assert(exception_kind == IMPLICIT_NULL || exception_kind == IMPLICIT_DIVIDE_BY_ZERO, "wrong implicit exception kind");
 945 
 946     if (exception_kind == IMPLICIT_NULL) {
 947 #ifndef PRODUCT
 948       // for AbortVMOnException flag
 949       Exceptions::debug_check_abort("java.lang.NullPointerException");
 950 #endif //PRODUCT
 951       Events::log_exception(thread, "Implicit null exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, p2i(pc), p2i(target_pc));
 952     } else {
 953 #ifndef PRODUCT
 954       // for AbortVMOnException flag
 955       Exceptions::debug_check_abort("java.lang.ArithmeticException");
 956 #endif //PRODUCT
 957       Events::log_exception(thread, "Implicit division by zero exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, p2i(pc), p2i(target_pc));
 958     }
 959     return target_pc;
 960   }
 961 
 962   ShouldNotReachHere();
 963   return NULL;
 964 }
 965 
 966 
 967 /**
 968  * Throws an java/lang/UnsatisfiedLinkError.  The address of this method is
 969  * installed in the native function entry of all native Java methods before
 970  * they get linked to their actual native methods.
 971  *
 972  * \note
 973  * This method actually never gets called!  The reason is because
 974  * the interpreter's native entries call NativeLookup::lookup() which
 975  * throws the exception when the lookup fails.  The exception is then
 976  * caught and forwarded on the return from NativeLookup::lookup() call
 977  * before the call to the native function.  This might change in the future.
 978  */
 979 JNI_ENTRY(void*, throw_unsatisfied_link_error(JNIEnv* env, ...))
 980 {
 981   // We return a bad value here to make sure that the exception is
 982   // forwarded before we look at the return value.
 983   THROW_(vmSymbols::java_lang_UnsatisfiedLinkError(), (void*)badJNIHandle);
 984 }
 985 JNI_END
 986 
 987 address SharedRuntime::native_method_throw_unsatisfied_link_error_entry() {
 988   return CAST_FROM_FN_PTR(address, &throw_unsatisfied_link_error);
 989 }
 990 
 991 JRT_ENTRY_NO_ASYNC(void, SharedRuntime::register_finalizer(JavaThread* thread, oopDesc* obj))
 992   assert(obj->is_oop(), "must be a valid oop");
 993 #if INCLUDE_JVMCI
 994   // This removes the requirement for JVMCI compilers to emit code
 995   // performing a dynamic check that obj has a finalizer before
 996   // calling this routine. There should be no performance impact
 997   // for C1 since it emits a dynamic check. C2 and the interpreter
 998   // uses other runtime routines for registering finalizers.
 999   if (!obj->klass()->has_finalizer()) {
1000     return;
1001   }
1002 #endif // INCLUDE_JVMCI
1003   assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise");
1004   InstanceKlass::register_finalizer(instanceOop(obj), CHECK);
1005 JRT_END
1006 
1007 
1008 jlong SharedRuntime::get_java_tid(Thread* thread) {
1009   if (thread != NULL) {
1010     if (thread->is_Java_thread()) {
1011       oop obj = ((JavaThread*)thread)->threadObj();
1012       return (obj == NULL) ? 0 : java_lang_Thread::thread_id(obj);
1013     }
1014   }
1015   return 0;
1016 }
1017 
1018 /**
1019  * This function ought to be a void function, but cannot be because
1020  * it gets turned into a tail-call on sparc, which runs into dtrace bug
1021  * 6254741.  Once that is fixed we can remove the dummy return value.
1022  */
1023 int SharedRuntime::dtrace_object_alloc(oopDesc* o, int size) {
1024   return dtrace_object_alloc_base(Thread::current(), o, size);
1025 }
1026 
1027 int SharedRuntime::dtrace_object_alloc_base(Thread* thread, oopDesc* o, int size) {
1028   assert(DTraceAllocProbes, "wrong call");
1029   Klass* klass = o->klass();
1030   Symbol* name = klass->name();
1031   HOTSPOT_OBJECT_ALLOC(
1032                    get_java_tid(thread),
1033                    (char *) name->bytes(), name->utf8_length(), size * HeapWordSize);
1034   return 0;
1035 }
1036 
1037 JRT_LEAF(int, SharedRuntime::dtrace_method_entry(
1038     JavaThread* thread, Method* method))
1039   assert(DTraceMethodProbes, "wrong call");
1040   Symbol* kname = method->klass_name();
1041   Symbol* name = method->name();
1042   Symbol* sig = method->signature();
1043   HOTSPOT_METHOD_ENTRY(
1044       get_java_tid(thread),
1045       (char *) kname->bytes(), kname->utf8_length(),
1046       (char *) name->bytes(), name->utf8_length(),
1047       (char *) sig->bytes(), sig->utf8_length());
1048   return 0;
1049 JRT_END
1050 
1051 JRT_LEAF(int, SharedRuntime::dtrace_method_exit(
1052     JavaThread* thread, Method* method))
1053   assert(DTraceMethodProbes, "wrong call");
1054   Symbol* kname = method->klass_name();
1055   Symbol* name = method->name();
1056   Symbol* sig = method->signature();
1057   HOTSPOT_METHOD_RETURN(
1058       get_java_tid(thread),
1059       (char *) kname->bytes(), kname->utf8_length(),
1060       (char *) name->bytes(), name->utf8_length(),
1061       (char *) sig->bytes(), sig->utf8_length());
1062   return 0;
1063 JRT_END
1064 
1065 
1066 // Finds receiver, CallInfo (i.e. receiver method), and calling bytecode)
1067 // for a call current in progress, i.e., arguments has been pushed on stack
1068 // put callee has not been invoked yet.  Used by: resolve virtual/static,
1069 // vtable updates, etc.  Caller frame must be compiled.
1070 Handle SharedRuntime::find_callee_info(JavaThread* thread, Bytecodes::Code& bc, CallInfo& callinfo, TRAPS) {
1071   ResourceMark rm(THREAD);
1072 
1073   // last java frame on stack (which includes native call frames)
1074   vframeStream vfst(thread, true);  // Do not skip and javaCalls
1075 
1076   return find_callee_info_helper(thread, vfst, bc, callinfo, THREAD);
1077 }
1078 
1079 methodHandle SharedRuntime::extract_attached_method(vframeStream& vfst) {
1080   CompiledMethod* caller = vfst.nm();
1081 
1082   nmethodLocker caller_lock(caller);
1083 
1084   address pc = vfst.frame_pc();
1085   { // Get call instruction under lock because another thread may be busy patching it.
1086     MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag);
1087     return caller->attached_method_before_pc(pc);
1088   }
1089   return NULL;
1090 }
1091 
1092 // Finds receiver, CallInfo (i.e. receiver method), and calling bytecode
1093 // for a call current in progress, i.e., arguments has been pushed on stack
1094 // but callee has not been invoked yet.  Caller frame must be compiled.
1095 Handle SharedRuntime::find_callee_info_helper(JavaThread* thread,
1096                                               vframeStream& vfst,
1097                                               Bytecodes::Code& bc,
1098                                               CallInfo& callinfo, TRAPS) {
1099   Handle receiver;
1100   Handle nullHandle;  //create a handy null handle for exception returns
1101 
1102   assert(!vfst.at_end(), "Java frame must exist");
1103 
1104   // Find caller and bci from vframe
1105   methodHandle caller(THREAD, vfst.method());
1106   int          bci   = vfst.bci();
1107 
1108   Bytecode_invoke bytecode(caller, bci);
1109   int bytecode_index = bytecode.index();
1110 
1111   methodHandle attached_method = extract_attached_method(vfst);
1112   if (attached_method.not_null()) {
1113     methodHandle callee = bytecode.static_target(CHECK_NH);
1114     vmIntrinsics::ID id = callee->intrinsic_id();
1115     // When VM replaces MH.invokeBasic/linkTo* call with a direct/virtual call,
1116     // it attaches statically resolved method to the call site.
1117     if (MethodHandles::is_signature_polymorphic(id) &&
1118         MethodHandles::is_signature_polymorphic_intrinsic(id)) {
1119       bc = MethodHandles::signature_polymorphic_intrinsic_bytecode(id);
1120 
1121       // Adjust invocation mode according to the attached method.
1122       switch (bc) {
1123         case Bytecodes::_invokeinterface:
1124           if (!attached_method->method_holder()->is_interface()) {
1125             bc = Bytecodes::_invokevirtual;
1126           }
1127           break;
1128         case Bytecodes::_invokehandle:
1129           if (!MethodHandles::is_signature_polymorphic_method(attached_method())) {
1130             bc = attached_method->is_static() ? Bytecodes::_invokestatic
1131                                               : Bytecodes::_invokevirtual;
1132           }
1133           break;
1134       }
1135     }
1136   } else {
1137     bc = bytecode.invoke_code();
1138   }
1139 
1140   bool has_receiver = bc != Bytecodes::_invokestatic &&
1141                       bc != Bytecodes::_invokedynamic &&
1142                       bc != Bytecodes::_invokehandle;
1143 
1144   // Find receiver for non-static call
1145   if (has_receiver) {
1146     // This register map must be update since we need to find the receiver for
1147     // compiled frames. The receiver might be in a register.
1148     RegisterMap reg_map2(thread);
1149     frame stubFrame   = thread->last_frame();
1150     // Caller-frame is a compiled frame
1151     frame callerFrame = stubFrame.sender(&reg_map2);
1152 
1153     if (attached_method.is_null()) {
1154       methodHandle callee = bytecode.static_target(CHECK_NH);
1155       if (callee.is_null()) {
1156         THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle);
1157       }
1158     }
1159 
1160     // Retrieve from a compiled argument list
1161     receiver = Handle(THREAD, callerFrame.retrieve_receiver(&reg_map2));
1162 
1163     if (receiver.is_null()) {
1164       THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle);
1165     }
1166   }
1167 
1168   assert(receiver.is_null() || receiver->is_oop(), "wrong receiver");
1169 
1170   // Resolve method
1171   if (attached_method.not_null()) {
1172     // Parameterized by attached method.
1173     LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, CHECK_NH);
1174   } else {
1175     // Parameterized by bytecode.
1176     constantPoolHandle constants(THREAD, caller->constants());
1177     LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_NH);
1178   }
1179 
1180 #ifdef ASSERT
1181   // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
1182   if (has_receiver) {
1183     assert(receiver.not_null(), "should have thrown exception");
1184     KlassHandle receiver_klass(THREAD, receiver->klass());
1185     Klass* rk = NULL;
1186     if (attached_method.not_null()) {
1187       // In case there's resolved method attached, use its holder during the check.
1188       rk = attached_method->method_holder();
1189     } else {
1190       // Klass is already loaded.
1191       constantPoolHandle constants(THREAD, caller->constants());
1192       rk = constants->klass_ref_at(bytecode_index, CHECK_NH);
1193     }
1194     KlassHandle static_receiver_klass(THREAD, rk);
1195     methodHandle callee = callinfo.selected_method();
1196     assert(receiver_klass->is_subtype_of(static_receiver_klass()),
1197            "actual receiver must be subclass of static receiver klass");
1198     if (receiver_klass->is_instance_klass()) {
1199       if (InstanceKlass::cast(receiver_klass())->is_not_initialized()) {
1200         tty->print_cr("ERROR: Klass not yet initialized!!");
1201         receiver_klass()->print();
1202       }
1203       assert(!InstanceKlass::cast(receiver_klass())->is_not_initialized(), "receiver_klass must be initialized");
1204     }
1205   }
1206 #endif
1207 
1208   return receiver;
1209 }
1210 
1211 methodHandle SharedRuntime::find_callee_method(JavaThread* thread, TRAPS) {
1212   ResourceMark rm(THREAD);
1213   // We need first to check if any Java activations (compiled, interpreted)
1214   // exist on the stack since last JavaCall.  If not, we need
1215   // to get the target method from the JavaCall wrapper.
1216   vframeStream vfst(thread, true);  // Do not skip any javaCalls
1217   methodHandle callee_method;
1218   if (vfst.at_end()) {
1219     // No Java frames were found on stack since we did the JavaCall.
1220     // Hence the stack can only contain an entry_frame.  We need to
1221     // find the target method from the stub frame.
1222     RegisterMap reg_map(thread, false);
1223     frame fr = thread->last_frame();
1224     assert(fr.is_runtime_frame(), "must be a runtimeStub");
1225     fr = fr.sender(&reg_map);
1226     assert(fr.is_entry_frame(), "must be");
1227     // fr is now pointing to the entry frame.
1228     callee_method = methodHandle(THREAD, fr.entry_frame_call_wrapper()->callee_method());
1229     assert(fr.entry_frame_call_wrapper()->receiver() == NULL || !callee_method->is_static(), "non-null receiver for static call??");
1230   } else {
1231     Bytecodes::Code bc;
1232     CallInfo callinfo;
1233     find_callee_info_helper(thread, vfst, bc, callinfo, CHECK_(methodHandle()));
1234     callee_method = callinfo.selected_method();
1235   }
1236   assert(callee_method()->is_method(), "must be");
1237   return callee_method;
1238 }
1239 
1240 // Resolves a call.
1241 methodHandle SharedRuntime::resolve_helper(JavaThread *thread,
1242                                            bool is_virtual,
1243                                            bool is_optimized, TRAPS) {
1244   methodHandle callee_method;
1245   callee_method = resolve_sub_helper(thread, is_virtual, is_optimized, THREAD);
1246   if (JvmtiExport::can_hotswap_or_post_breakpoint()) {
1247     int retry_count = 0;
1248     while (!HAS_PENDING_EXCEPTION && callee_method->is_old() &&
1249            callee_method->method_holder() != SystemDictionary::Object_klass()) {
1250       // If has a pending exception then there is no need to re-try to
1251       // resolve this method.
1252       // If the method has been redefined, we need to try again.
1253       // Hack: we have no way to update the vtables of arrays, so don't
1254       // require that java.lang.Object has been updated.
1255 
1256       // It is very unlikely that method is redefined more than 100 times
1257       // in the middle of resolve. If it is looping here more than 100 times
1258       // means then there could be a bug here.
1259       guarantee((retry_count++ < 100),
1260                 "Could not resolve to latest version of redefined method");
1261       // method is redefined in the middle of resolve so re-try.
1262       callee_method = resolve_sub_helper(thread, is_virtual, is_optimized, THREAD);
1263     }
1264   }
1265   return callee_method;
1266 }
1267 
1268 // Resolves a call.  The compilers generate code for calls that go here
1269 // and are patched with the real destination of the call.
1270 methodHandle SharedRuntime::resolve_sub_helper(JavaThread *thread,
1271                                            bool is_virtual,
1272                                            bool is_optimized, TRAPS) {
1273 
1274   ResourceMark rm(thread);
1275   RegisterMap cbl_map(thread, false);
1276   frame caller_frame = thread->last_frame().sender(&cbl_map);
1277 
1278   CodeBlob* caller_cb = caller_frame.cb();
1279   guarantee(caller_cb != NULL && caller_cb->is_compiled(), "must be called from compiled method");
1280   CompiledMethod* caller_nm = caller_cb->as_compiled_method_or_null();
1281 
1282   // make sure caller is not getting deoptimized
1283   // and removed before we are done with it.
1284   // CLEANUP - with lazy deopt shouldn't need this lock
1285   nmethodLocker caller_lock(caller_nm);
1286 
1287   // determine call info & receiver
1288   // note: a) receiver is NULL for static calls
1289   //       b) an exception is thrown if receiver is NULL for non-static calls
1290   CallInfo call_info;
1291   Bytecodes::Code invoke_code = Bytecodes::_illegal;
1292   Handle receiver = find_callee_info(thread, invoke_code,
1293                                      call_info, CHECK_(methodHandle()));
1294   methodHandle callee_method = call_info.selected_method();
1295 
1296   assert((!is_virtual && invoke_code == Bytecodes::_invokestatic ) ||
1297          (!is_virtual && invoke_code == Bytecodes::_invokespecial) ||
1298          (!is_virtual && invoke_code == Bytecodes::_invokehandle ) ||
1299          (!is_virtual && invoke_code == Bytecodes::_invokedynamic) ||
1300          ( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode");
1301 
1302   assert(caller_nm->is_alive(), "It should be alive");
1303 
1304 #ifndef PRODUCT
1305   // tracing/debugging/statistics
1306   int *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :
1307                 (is_virtual) ? (&_resolve_virtual_ctr) :
1308                                (&_resolve_static_ctr);
1309   Atomic::inc(addr);
1310 
1311   if (TraceCallFixup) {
1312     ResourceMark rm(thread);
1313     tty->print("resolving %s%s (%s) call to",
1314       (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static",
1315       Bytecodes::name(invoke_code));
1316     callee_method->print_short_name(tty);
1317     tty->print_cr(" at pc: " INTPTR_FORMAT " to code: " INTPTR_FORMAT,
1318                   p2i(caller_frame.pc()), p2i(callee_method->code()));
1319   }
1320 #endif
1321 
1322   // JSR 292 key invariant:
1323   // If the resolved method is a MethodHandle invoke target, the call
1324   // site must be a MethodHandle call site, because the lambda form might tail-call
1325   // leaving the stack in a state unknown to either caller or callee
1326   // TODO detune for now but we might need it again
1327 //  assert(!callee_method->is_compiled_lambda_form() ||
1328 //         caller_nm->is_method_handle_return(caller_frame.pc()), "must be MH call site");
1329 
1330   // Compute entry points. This might require generation of C2I converter
1331   // frames, so we cannot be holding any locks here. Furthermore, the
1332   // computation of the entry points is independent of patching the call.  We
1333   // always return the entry-point, but we only patch the stub if the call has
1334   // not been deoptimized.  Return values: For a virtual call this is an
1335   // (cached_oop, destination address) pair. For a static call/optimized
1336   // virtual this is just a destination address.
1337 
1338   StaticCallInfo static_call_info;
1339   CompiledICInfo virtual_call_info;
1340 
1341   // Make sure the callee nmethod does not get deoptimized and removed before
1342   // we are done patching the code.
1343   CompiledMethod* callee = callee_method->code();
1344 
1345   if (callee != NULL) {
1346     assert(callee->is_compiled(), "must be nmethod for patching");
1347   }
1348 
1349   if (callee != NULL && !callee->is_in_use()) {
1350     // Patch call site to C2I adapter if callee nmethod is deoptimized or unloaded.
1351     callee = NULL;
1352   }
1353   nmethodLocker nl_callee(callee);
1354 #ifdef ASSERT
1355   address dest_entry_point = callee == NULL ? 0 : callee->entry_point(); // used below
1356 #endif
1357 
1358   if (is_virtual) {
1359     assert(receiver.not_null() || invoke_code == Bytecodes::_invokehandle, "sanity check");
1360     bool static_bound = call_info.resolved_method()->can_be_statically_bound();
1361     KlassHandle h_klass(THREAD, invoke_code == Bytecodes::_invokehandle ? NULL : receiver->klass());
1362     CompiledIC::compute_monomorphic_entry(callee_method, h_klass,
1363                      is_optimized, static_bound, virtual_call_info,
1364                      CHECK_(methodHandle()));
1365   } else {
1366     // static call
1367     CompiledStaticCall::compute_entry(callee_method, static_call_info);
1368   }
1369 
1370   // grab lock, check for deoptimization and potentially patch caller
1371   {
1372     MutexLocker ml_patch(CompiledIC_lock);
1373 
1374     // Lock blocks for safepoint during which both nmethods can change state.
1375 
1376     // Now that we are ready to patch if the Method* was redefined then
1377     // don't update call site and let the caller retry.
1378     // Don't update call site if callee nmethod was unloaded or deoptimized.
1379     // Don't update call site if callee nmethod was replaced by an other nmethod
1380     // which may happen when multiply alive nmethod (tiered compilation)
1381     // will be supported.
1382     if (!callee_method->is_old() &&
1383         (callee == NULL || callee->is_in_use() && (callee_method->code() == callee))) {
1384 #ifdef ASSERT
1385       // We must not try to patch to jump to an already unloaded method.
1386       if (dest_entry_point != 0) {
1387         CodeBlob* cb = CodeCache::find_blob(dest_entry_point);
1388         assert((cb != NULL) && cb->is_compiled() && (((CompiledMethod*)cb) == callee),
1389                "should not call unloaded nmethod");
1390       }
1391 #endif
1392       if (is_virtual) {
1393         CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1394         if (inline_cache->is_clean()) {
1395           inline_cache->set_to_monomorphic(virtual_call_info);
1396         }
1397       } else {
1398         CompiledStaticCall* ssc = compiledStaticCall_before(caller_frame.pc());
1399         if (ssc->is_clean()) ssc->set(static_call_info);
1400       }
1401     }
1402 
1403   } // unlock CompiledIC_lock
1404 
1405   return callee_method;
1406 }
1407 
1408 
1409 // Inline caches exist only in compiled code
1410 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* thread))
1411 #ifdef ASSERT
1412   RegisterMap reg_map(thread, false);
1413   frame stub_frame = thread->last_frame();
1414   assert(stub_frame.is_runtime_frame(), "sanity check");
1415   frame caller_frame = stub_frame.sender(&reg_map);
1416   assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame(), "unexpected frame");
1417 #endif /* ASSERT */
1418 
1419   methodHandle callee_method;
1420   JRT_BLOCK
1421     callee_method = SharedRuntime::handle_ic_miss_helper(thread, CHECK_NULL);
1422     // Return Method* through TLS
1423     thread->set_vm_result_2(callee_method());
1424   JRT_BLOCK_END
1425   // return compiled code entry point after potential safepoints
1426   assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1427   return callee_method->verified_code_entry();
1428 JRT_END
1429 
1430 
1431 // Handle call site that has been made non-entrant
1432 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* thread))
1433   // 6243940 We might end up in here if the callee is deoptimized
1434   // as we race to call it.  We don't want to take a safepoint if
1435   // the caller was interpreted because the caller frame will look
1436   // interpreted to the stack walkers and arguments are now
1437   // "compiled" so it is much better to make this transition
1438   // invisible to the stack walking code. The i2c path will
1439   // place the callee method in the callee_target. It is stashed
1440   // there because if we try and find the callee by normal means a
1441   // safepoint is possible and have trouble gc'ing the compiled args.
1442   RegisterMap reg_map(thread, false);
1443   frame stub_frame = thread->last_frame();
1444   assert(stub_frame.is_runtime_frame(), "sanity check");
1445   frame caller_frame = stub_frame.sender(&reg_map);
1446 
1447   if (caller_frame.is_interpreted_frame() ||
1448       caller_frame.is_entry_frame()) {
1449     Method* callee = thread->callee_target();
1450     guarantee(callee != NULL && callee->is_method(), "bad handshake");
1451     thread->set_vm_result_2(callee);
1452     thread->set_callee_target(NULL);
1453     return callee->get_c2i_entry();
1454   }
1455 
1456   // Must be compiled to compiled path which is safe to stackwalk
1457   methodHandle callee_method;
1458   JRT_BLOCK
1459     // Force resolving of caller (if we called from compiled frame)
1460     callee_method = SharedRuntime::reresolve_call_site(thread, CHECK_NULL);
1461     thread->set_vm_result_2(callee_method());
1462   JRT_BLOCK_END
1463   // return compiled code entry point after potential safepoints
1464   assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1465   return callee_method->verified_code_entry();
1466 JRT_END
1467 
1468 // Handle abstract method call
1469 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_abstract(JavaThread* thread))
1470   return StubRoutines::throw_AbstractMethodError_entry();
1471 JRT_END
1472 
1473 
1474 // resolve a static call and patch code
1475 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread *thread ))
1476   methodHandle callee_method;
1477   JRT_BLOCK
1478     callee_method = SharedRuntime::resolve_helper(thread, false, false, CHECK_NULL);
1479     thread->set_vm_result_2(callee_method());
1480   JRT_BLOCK_END
1481   // return compiled code entry point after potential safepoints
1482   assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1483   return callee_method->verified_code_entry();
1484 JRT_END
1485 
1486 
1487 // resolve virtual call and update inline cache to monomorphic
1488 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread *thread ))
1489   methodHandle callee_method;
1490   JRT_BLOCK
1491     callee_method = SharedRuntime::resolve_helper(thread, true, false, CHECK_NULL);
1492     thread->set_vm_result_2(callee_method());
1493   JRT_BLOCK_END
1494   // return compiled code entry point after potential safepoints
1495   assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1496   return callee_method->verified_code_entry();
1497 JRT_END
1498 
1499 
1500 // Resolve a virtual call that can be statically bound (e.g., always
1501 // monomorphic, so it has no inline cache).  Patch code to resolved target.
1502 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread *thread))
1503   methodHandle callee_method;
1504   JRT_BLOCK
1505     callee_method = SharedRuntime::resolve_helper(thread, true, true, CHECK_NULL);
1506     thread->set_vm_result_2(callee_method());
1507   JRT_BLOCK_END
1508   // return compiled code entry point after potential safepoints
1509   assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1510   return callee_method->verified_code_entry();
1511 JRT_END
1512 
1513 
1514 methodHandle SharedRuntime::handle_ic_miss_helper(JavaThread *thread, TRAPS) {
1515   ResourceMark rm(thread);
1516   CallInfo call_info;
1517   Bytecodes::Code bc;
1518 
1519   // receiver is NULL for static calls. An exception is thrown for NULL
1520   // receivers for non-static calls
1521   Handle receiver = find_callee_info(thread, bc, call_info,
1522                                      CHECK_(methodHandle()));
1523   // Compiler1 can produce virtual call sites that can actually be statically bound
1524   // If we fell thru to below we would think that the site was going megamorphic
1525   // when in fact the site can never miss. Worse because we'd think it was megamorphic
1526   // we'd try and do a vtable dispatch however methods that can be statically bound
1527   // don't have vtable entries (vtable_index < 0) and we'd blow up. So we force a
1528   // reresolution of the  call site (as if we did a handle_wrong_method and not an
1529   // plain ic_miss) and the site will be converted to an optimized virtual call site
1530   // never to miss again. I don't believe C2 will produce code like this but if it
1531   // did this would still be the correct thing to do for it too, hence no ifdef.
1532   //
1533   if (call_info.resolved_method()->can_be_statically_bound()) {
1534     methodHandle callee_method = SharedRuntime::reresolve_call_site(thread, CHECK_(methodHandle()));
1535     if (TraceCallFixup) {
1536       RegisterMap reg_map(thread, false);
1537       frame caller_frame = thread->last_frame().sender(&reg_map);
1538       ResourceMark rm(thread);
1539       tty->print("converting IC miss to reresolve (%s) call to", Bytecodes::name(bc));
1540       callee_method->print_short_name(tty);
1541       tty->print_cr(" from pc: " INTPTR_FORMAT, p2i(caller_frame.pc()));
1542       tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1543     }
1544     return callee_method;
1545   }
1546 
1547   methodHandle callee_method = call_info.selected_method();
1548 
1549   bool should_be_mono = false;
1550 
1551 #ifndef PRODUCT
1552   Atomic::inc(&_ic_miss_ctr);
1553 
1554   // Statistics & Tracing
1555   if (TraceCallFixup) {
1556     ResourceMark rm(thread);
1557     tty->print("IC miss (%s) call to", Bytecodes::name(bc));
1558     callee_method->print_short_name(tty);
1559     tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1560   }
1561 
1562   if (ICMissHistogram) {
1563     MutexLocker m(VMStatistic_lock);
1564     RegisterMap reg_map(thread, false);
1565     frame f = thread->last_frame().real_sender(&reg_map);// skip runtime stub
1566     // produce statistics under the lock
1567     trace_ic_miss(f.pc());
1568   }
1569 #endif
1570 
1571   // install an event collector so that when a vtable stub is created the
1572   // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The
1573   // event can't be posted when the stub is created as locks are held
1574   // - instead the event will be deferred until the event collector goes
1575   // out of scope.
1576   JvmtiDynamicCodeEventCollector event_collector;
1577 
1578   // Update inline cache to megamorphic. Skip update if we are called from interpreted.
1579   { MutexLocker ml_patch (CompiledIC_lock);
1580     RegisterMap reg_map(thread, false);
1581     frame caller_frame = thread->last_frame().sender(&reg_map);
1582     CodeBlob* cb = caller_frame.cb();
1583     CompiledMethod* caller_nm = cb->as_compiled_method_or_null();
1584     if (cb->is_compiled()) {
1585       CompiledIC* inline_cache = CompiledIC_before(((CompiledMethod*)cb), caller_frame.pc());
1586       bool should_be_mono = false;
1587       if (inline_cache->is_optimized()) {
1588         if (TraceCallFixup) {
1589           ResourceMark rm(thread);
1590           tty->print("OPTIMIZED IC miss (%s) call to", Bytecodes::name(bc));
1591           callee_method->print_short_name(tty);
1592           tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1593         }
1594         should_be_mono = true;
1595       } else if (inline_cache->is_icholder_call()) {
1596         CompiledICHolder* ic_oop = inline_cache->cached_icholder();
1597         if (ic_oop != NULL) {
1598 
1599           if (receiver()->klass() == ic_oop->holder_klass()) {
1600             // This isn't a real miss. We must have seen that compiled code
1601             // is now available and we want the call site converted to a
1602             // monomorphic compiled call site.
1603             // We can't assert for callee_method->code() != NULL because it
1604             // could have been deoptimized in the meantime
1605             if (TraceCallFixup) {
1606               ResourceMark rm(thread);
1607               tty->print("FALSE IC miss (%s) converting to compiled call to", Bytecodes::name(bc));
1608               callee_method->print_short_name(tty);
1609               tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1610             }
1611             should_be_mono = true;
1612           }
1613         }
1614       }
1615 
1616       if (should_be_mono) {
1617 
1618         // We have a path that was monomorphic but was going interpreted
1619         // and now we have (or had) a compiled entry. We correct the IC
1620         // by using a new icBuffer.
1621         CompiledICInfo info;
1622         KlassHandle receiver_klass(THREAD, receiver()->klass());
1623         inline_cache->compute_monomorphic_entry(callee_method,
1624                                                 receiver_klass,
1625                                                 inline_cache->is_optimized(),
1626                                                 false,
1627                                                 info, CHECK_(methodHandle()));
1628         inline_cache->set_to_monomorphic(info);
1629       } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) {
1630         // Potential change to megamorphic
1631         bool successful = inline_cache->set_to_megamorphic(&call_info, bc, CHECK_(methodHandle()));
1632         if (!successful) {
1633           inline_cache->set_to_clean();
1634         }
1635       } else {
1636         // Either clean or megamorphic
1637       }
1638     } else {
1639       fatal("Unimplemented");
1640     }
1641   } // Release CompiledIC_lock
1642 
1643   return callee_method;
1644 }
1645 
1646 //
1647 // Resets a call-site in compiled code so it will get resolved again.
1648 // This routines handles both virtual call sites, optimized virtual call
1649 // sites, and static call sites. Typically used to change a call sites
1650 // destination from compiled to interpreted.
1651 //
1652 methodHandle SharedRuntime::reresolve_call_site(JavaThread *thread, TRAPS) {
1653   ResourceMark rm(thread);
1654   RegisterMap reg_map(thread, false);
1655   frame stub_frame = thread->last_frame();
1656   assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1657   frame caller = stub_frame.sender(&reg_map);
1658 
1659   // Do nothing if the frame isn't a live compiled frame.
1660   // nmethod could be deoptimized by the time we get here
1661   // so no update to the caller is needed.
1662 
1663   if (caller.is_compiled_frame() && !caller.is_deoptimized_frame()) {
1664 
1665     address pc = caller.pc();
1666 
1667     // Check for static or virtual call
1668     bool is_static_call = false;
1669     CompiledMethod* caller_nm = CodeCache::find_compiled(pc);
1670 
1671     // Default call_addr is the location of the "basic" call.
1672     // Determine the address of the call we a reresolving. With
1673     // Inline Caches we will always find a recognizable call.
1674     // With Inline Caches disabled we may or may not find a
1675     // recognizable call. We will always find a call for static
1676     // calls and for optimized virtual calls. For vanilla virtual
1677     // calls it depends on the state of the UseInlineCaches switch.
1678     //
1679     // With Inline Caches disabled we can get here for a virtual call
1680     // for two reasons:
1681     //   1 - calling an abstract method. The vtable for abstract methods
1682     //       will run us thru handle_wrong_method and we will eventually
1683     //       end up in the interpreter to throw the ame.
1684     //   2 - a racing deoptimization. We could be doing a vanilla vtable
1685     //       call and between the time we fetch the entry address and
1686     //       we jump to it the target gets deoptimized. Similar to 1
1687     //       we will wind up in the interprter (thru a c2i with c2).
1688     //
1689     address call_addr = NULL;
1690     {
1691       // Get call instruction under lock because another thread may be
1692       // busy patching it.
1693       MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag);
1694       // Location of call instruction
1695       if (NativeCall::is_call_before(pc)) {
1696         NativeCall *ncall = nativeCall_before(pc);
1697         call_addr = ncall->instruction_address();
1698       }
1699     }
1700     // Make sure nmethod doesn't get deoptimized and removed until
1701     // this is done with it.
1702     // CLEANUP - with lazy deopt shouldn't need this lock
1703     nmethodLocker nmlock(caller_nm);
1704 
1705     if (call_addr != NULL) {
1706       RelocIterator iter(caller_nm, call_addr, call_addr+1);
1707       int ret = iter.next(); // Get item
1708       if (ret) {
1709         assert(iter.addr() == call_addr, "must find call");
1710         if (iter.type() == relocInfo::static_call_type) {
1711           is_static_call = true;
1712         } else {
1713           assert(iter.type() == relocInfo::virtual_call_type ||
1714                  iter.type() == relocInfo::opt_virtual_call_type
1715                 , "unexpected relocInfo. type");
1716         }
1717       } else {
1718         assert(!UseInlineCaches, "relocation info. must exist for this address");
1719       }
1720 
1721       // Cleaning the inline cache will force a new resolve. This is more robust
1722       // than directly setting it to the new destination, since resolving of calls
1723       // is always done through the same code path. (experience shows that it
1724       // leads to very hard to track down bugs, if an inline cache gets updated
1725       // to a wrong method). It should not be performance critical, since the
1726       // resolve is only done once.
1727 
1728       MutexLocker ml(CompiledIC_lock);
1729       if (is_static_call) {
1730         CompiledStaticCall* ssc= compiledStaticCall_at(call_addr);
1731         ssc->set_to_clean();
1732       } else {
1733         // compiled, dispatched call (which used to call an interpreted method)
1734         CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr);
1735         inline_cache->set_to_clean();
1736       }
1737     }
1738   }
1739 
1740   methodHandle callee_method = find_callee_method(thread, CHECK_(methodHandle()));
1741 
1742 
1743 #ifndef PRODUCT
1744   Atomic::inc(&_wrong_method_ctr);
1745 
1746   if (TraceCallFixup) {
1747     ResourceMark rm(thread);
1748     tty->print("handle_wrong_method reresolving call to");
1749     callee_method->print_short_name(tty);
1750     tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1751   }
1752 #endif
1753 
1754   return callee_method;
1755 }
1756 
1757 address SharedRuntime::handle_unsafe_access(JavaThread* thread, address next_pc) {
1758   // The faulting unsafe accesses should be changed to throw the error
1759   // synchronously instead. Meanwhile the faulting instruction will be
1760   // skipped over (effectively turning it into a no-op) and an
1761   // asynchronous exception will be raised which the thread will
1762   // handle at a later point. If the instruction is a load it will
1763   // return garbage.
1764 
1765   // Request an async exception.
1766   thread->set_pending_unsafe_access_error();
1767 
1768   // Return address of next instruction to execute.
1769   return next_pc;
1770 }
1771 
1772 #ifdef ASSERT
1773 void SharedRuntime::check_member_name_argument_is_last_argument(const methodHandle& method,
1774                                                                 const BasicType* sig_bt,
1775                                                                 const VMRegPair* regs) {
1776   ResourceMark rm;
1777   const int total_args_passed = method->size_of_parameters();
1778   const VMRegPair*    regs_with_member_name = regs;
1779         VMRegPair* regs_without_member_name = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed - 1);
1780 
1781   const int member_arg_pos = total_args_passed - 1;
1782   assert(member_arg_pos >= 0 && member_arg_pos < total_args_passed, "oob");
1783   assert(sig_bt[member_arg_pos] == T_OBJECT, "dispatch argument must be an object");
1784 
1785   const bool is_outgoing = method->is_method_handle_intrinsic();
1786   int comp_args_on_stack = java_calling_convention(sig_bt, regs_without_member_name, total_args_passed - 1, is_outgoing);
1787 
1788   for (int i = 0; i < member_arg_pos; i++) {
1789     VMReg a =    regs_with_member_name[i].first();
1790     VMReg b = regs_without_member_name[i].first();
1791     assert(a->value() == b->value(), "register allocation mismatch: a=" INTX_FORMAT ", b=" INTX_FORMAT, a->value(), b->value());
1792   }
1793   assert(regs_with_member_name[member_arg_pos].first()->is_valid(), "bad member arg");
1794 }
1795 #endif
1796 
1797 // ---------------------------------------------------------------------------
1798 // We are calling the interpreter via a c2i. Normally this would mean that
1799 // we were called by a compiled method. However we could have lost a race
1800 // where we went int -> i2c -> c2i and so the caller could in fact be
1801 // interpreted. If the caller is compiled we attempt to patch the caller
1802 // so he no longer calls into the interpreter.
1803 IRT_LEAF(void, SharedRuntime::fixup_callers_callsite(Method* method, address caller_pc))
1804   Method* moop(method);
1805 
1806   address entry_point = moop->from_compiled_entry_no_trampoline();
1807 
1808   // It's possible that deoptimization can occur at a call site which hasn't
1809   // been resolved yet, in which case this function will be called from
1810   // an nmethod that has been patched for deopt and we can ignore the
1811   // request for a fixup.
1812   // Also it is possible that we lost a race in that from_compiled_entry
1813   // is now back to the i2c in that case we don't need to patch and if
1814   // we did we'd leap into space because the callsite needs to use
1815   // "to interpreter" stub in order to load up the Method*. Don't
1816   // ask me how I know this...
1817 
1818   CodeBlob* cb = CodeCache::find_blob(caller_pc);
1819   if (!cb->is_compiled() || entry_point == moop->get_c2i_entry()) {
1820     return;
1821   }
1822 
1823   // The check above makes sure this is a nmethod.
1824   CompiledMethod* nm = cb->as_compiled_method_or_null();
1825   assert(nm, "must be");
1826 
1827   // Get the return PC for the passed caller PC.
1828   address return_pc = caller_pc + frame::pc_return_offset;
1829 
1830   // There is a benign race here. We could be attempting to patch to a compiled
1831   // entry point at the same time the callee is being deoptimized. If that is
1832   // the case then entry_point may in fact point to a c2i and we'd patch the
1833   // call site with the same old data. clear_code will set code() to NULL
1834   // at the end of it. If we happen to see that NULL then we can skip trying
1835   // to patch. If we hit the window where the callee has a c2i in the
1836   // from_compiled_entry and the NULL isn't present yet then we lose the race
1837   // and patch the code with the same old data. Asi es la vida.
1838 
1839   if (moop->code() == NULL) return;
1840 
1841   if (nm->is_in_use()) {
1842 
1843     // Expect to find a native call there (unless it was no-inline cache vtable dispatch)
1844     MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag);
1845     if (NativeCall::is_call_before(return_pc)) {
1846       NativeCall *call = nativeCall_before(return_pc);
1847       //
1848       // bug 6281185. We might get here after resolving a call site to a vanilla
1849       // virtual call. Because the resolvee uses the verified entry it may then
1850       // see compiled code and attempt to patch the site by calling us. This would
1851       // then incorrectly convert the call site to optimized and its downhill from
1852       // there. If you're lucky you'll get the assert in the bugid, if not you've
1853       // just made a call site that could be megamorphic into a monomorphic site
1854       // for the rest of its life! Just another racing bug in the life of
1855       // fixup_callers_callsite ...
1856       //
1857       RelocIterator iter(nm, call->instruction_address(), call->next_instruction_address());
1858       iter.next();
1859       assert(iter.has_current(), "must have a reloc at java call site");
1860       relocInfo::relocType typ = iter.reloc()->type();
1861       if (typ != relocInfo::static_call_type &&
1862            typ != relocInfo::opt_virtual_call_type &&
1863            typ != relocInfo::static_stub_type) {
1864         return;
1865       }
1866       address destination = call->destination();
1867       if (destination != entry_point) {
1868         CodeBlob* callee = CodeCache::find_blob(destination);
1869         // callee == cb seems weird. It means calling interpreter thru stub.
1870         if (callee == cb || callee->is_adapter_blob()) {
1871           // static call or optimized virtual
1872           if (TraceCallFixup) {
1873             tty->print("fixup callsite           at " INTPTR_FORMAT " to compiled code for", p2i(caller_pc));
1874             moop->print_short_name(tty);
1875             tty->print_cr(" to " INTPTR_FORMAT, p2i(entry_point));
1876           }
1877           call->set_destination_mt_safe(entry_point);
1878         } else {
1879           if (TraceCallFixup) {
1880             tty->print("failed to fixup callsite at " INTPTR_FORMAT " to compiled code for", p2i(caller_pc));
1881             moop->print_short_name(tty);
1882             tty->print_cr(" to " INTPTR_FORMAT, p2i(entry_point));
1883           }
1884           // assert is too strong could also be resolve destinations.
1885           // assert(InlineCacheBuffer::contains(destination) || VtableStubs::contains(destination), "must be");
1886         }
1887       } else {
1888           if (TraceCallFixup) {
1889             tty->print("already patched callsite at " INTPTR_FORMAT " to compiled code for", p2i(caller_pc));
1890             moop->print_short_name(tty);
1891             tty->print_cr(" to " INTPTR_FORMAT, p2i(entry_point));
1892           }
1893       }
1894     }
1895   }
1896 IRT_END
1897 
1898 
1899 // same as JVM_Arraycopy, but called directly from compiled code
1900 JRT_ENTRY(void, SharedRuntime::slow_arraycopy_C(oopDesc* src,  jint src_pos,
1901                                                 oopDesc* dest, jint dest_pos,
1902                                                 jint length,
1903                                                 JavaThread* thread)) {
1904 #ifndef PRODUCT
1905   _slow_array_copy_ctr++;
1906 #endif
1907   // Check if we have null pointers
1908   if (src == NULL || dest == NULL) {
1909     THROW(vmSymbols::java_lang_NullPointerException());
1910   }
1911   // Do the copy.  The casts to arrayOop are necessary to the copy_array API,
1912   // even though the copy_array API also performs dynamic checks to ensure
1913   // that src and dest are truly arrays (and are conformable).
1914   // The copy_array mechanism is awkward and could be removed, but
1915   // the compilers don't call this function except as a last resort,
1916   // so it probably doesn't matter.
1917   src->klass()->copy_array((arrayOopDesc*)src, src_pos,
1918                                         (arrayOopDesc*)dest, dest_pos,
1919                                         length, thread);
1920 }
1921 JRT_END
1922 
1923 // The caller of generate_class_cast_message() (or one of its callers)
1924 // must use a ResourceMark in order to correctly free the result.
1925 char* SharedRuntime::generate_class_cast_message(
1926     JavaThread* thread, Klass* caster_klass) {
1927 
1928   // Get target class name from the checkcast instruction
1929   vframeStream vfst(thread, true);
1930   assert(!vfst.at_end(), "Java frame must exist");
1931   Bytecode_checkcast cc(vfst.method(), vfst.method()->bcp_from(vfst.bci()));
1932   Klass* target_klass = vfst.method()->constants()->klass_at(
1933     cc.index(), thread);
1934   return generate_class_cast_message(caster_klass, target_klass);
1935 }
1936 
1937 // The caller of class_loader_and_module_name() (or one of its callers)
1938 // must use a ResourceMark in order to correctly free the result.
1939 const char* class_loader_and_module_name(Klass* klass) {
1940   const char* delim = "/";
1941   size_t delim_len = strlen(delim);
1942 
1943   const char* fqn = klass->external_name();
1944   // Length of message to return; always include FQN
1945   size_t msglen = strlen(fqn) + 1;
1946 
1947   bool has_cl_name = false;
1948   bool has_mod_name = false;
1949   bool has_version = false;
1950 
1951   // Use class loader name, if exists and not builtin
1952   const char* class_loader_name = "";
1953   ClassLoaderData* cld = klass->class_loader_data();
1954   assert(cld != NULL, "class_loader_data should not be NULL");
1955   if (!cld->is_builtin_class_loader_data()) {
1956     // If not builtin, look for name
1957     oop loader = klass->class_loader();
1958     if (loader != NULL) {
1959       oop class_loader_name_oop = java_lang_ClassLoader::name(loader);
1960       if (class_loader_name_oop != NULL) {
1961         class_loader_name = java_lang_String::as_utf8_string(class_loader_name_oop);
1962         if (class_loader_name != NULL && class_loader_name[0] != '\0') {
1963           has_cl_name = true;
1964           msglen += strlen(class_loader_name) + delim_len;
1965         }
1966       }
1967     }
1968   }
1969 
1970   const char* module_name = "";
1971   const char* version = "";
1972   Klass* bottom_klass = klass->is_objArray_klass() ?
1973     ObjArrayKlass::cast(klass)->bottom_klass() : klass;
1974   if (bottom_klass->is_instance_klass()) {
1975     ModuleEntry* module = InstanceKlass::cast(bottom_klass)->module();
1976     // Use module name, if exists
1977     if (module->is_named()) {
1978       has_mod_name = true;
1979       module_name = module->name()->as_C_string();
1980       msglen += strlen(module_name);
1981       // Use version if exists and is not a jdk module
1982       if (module->is_non_jdk_module() && module->version() != NULL) {
1983         has_version = true;
1984         version = module->version()->as_C_string();
1985         msglen += strlen("@") + strlen(version);
1986       }
1987     }
1988   } else {
1989     // klass is an array of primitives, so its module is java.base
1990     module_name = JAVA_BASE_NAME;
1991   }
1992 
1993   if (has_cl_name || has_mod_name) {
1994     msglen += delim_len;
1995   }
1996 
1997   char* message = NEW_RESOURCE_ARRAY_RETURN_NULL(char, msglen);
1998 
1999   // Just return the FQN if error in allocating string
2000   if (message == NULL) {
2001     return fqn;
2002   }
2003 
2004   jio_snprintf(message, msglen, "%s%s%s%s%s%s%s",
2005                class_loader_name,
2006                (has_cl_name) ? delim : "",
2007                (has_mod_name) ? module_name : "",
2008                (has_version) ? "@" : "",
2009                (has_version) ? version : "",
2010                (has_cl_name || has_mod_name) ? delim : "",
2011                fqn);
2012   return message;
2013 }
2014 
2015 char* SharedRuntime::generate_class_cast_message(
2016     Klass* caster_klass, Klass* target_klass) {
2017 
2018   const char* caster_name = class_loader_and_module_name(caster_klass);
2019 
2020   const char* target_name = class_loader_and_module_name(target_klass);
2021 
2022   size_t msglen = strlen(caster_name) + strlen(" cannot be cast to ") + strlen(target_name) + 1;
2023 
2024   char* message = NEW_RESOURCE_ARRAY_RETURN_NULL(char, msglen);
2025   if (message == NULL) {
2026     // Shouldn't happen, but don't cause even more problems if it does
2027     message = const_cast<char*>(caster_klass->external_name());
2028   } else {
2029     jio_snprintf(message,
2030                  msglen,
2031                  "%s cannot be cast to %s",
2032                  caster_name,
2033                  target_name);
2034   }
2035   return message;
2036 }
2037 
2038 JRT_LEAF(void, SharedRuntime::reguard_yellow_pages())
2039   (void) JavaThread::current()->reguard_stack();
2040 JRT_END
2041 
2042 
2043 // Handles the uncommon case in locking, i.e., contention or an inflated lock.
2044 JRT_BLOCK_ENTRY(void, SharedRuntime::complete_monitor_locking_C(oopDesc* _obj, BasicLock* lock, JavaThread* thread))
2045   // Disable ObjectSynchronizer::quick_enter() in default config
2046   // on AARCH64 and ARM until JDK-8153107 is resolved.
2047   if (ARM_ONLY((SyncFlags & 256) != 0 &&)
2048       AARCH64_ONLY((SyncFlags & 256) != 0 &&)
2049       !SafepointSynchronize::is_synchronizing()) {
2050     // Only try quick_enter() if we're not trying to reach a safepoint
2051     // so that the calling thread reaches the safepoint more quickly.
2052     if (ObjectSynchronizer::quick_enter(_obj, thread, lock)) return;
2053   }
2054   // NO_ASYNC required because an async exception on the state transition destructor
2055   // would leave you with the lock held and it would never be released.
2056   // The normal monitorenter NullPointerException is thrown without acquiring a lock
2057   // and the model is that an exception implies the method failed.
2058   JRT_BLOCK_NO_ASYNC
2059   oop obj(_obj);
2060   if (PrintBiasedLockingStatistics) {
2061     Atomic::inc(BiasedLocking::slow_path_entry_count_addr());
2062   }
2063   Handle h_obj(THREAD, obj);
2064   if (UseBiasedLocking) {
2065     // Retry fast entry if bias is revoked to avoid unnecessary inflation
2066     ObjectSynchronizer::fast_enter(h_obj, lock, true, CHECK);
2067   } else {
2068     ObjectSynchronizer::slow_enter(h_obj, lock, CHECK);
2069   }
2070   assert(!HAS_PENDING_EXCEPTION, "Should have no exception here");
2071   JRT_BLOCK_END
2072 JRT_END
2073 
2074 // Handles the uncommon cases of monitor unlocking in compiled code
2075 JRT_LEAF(void, SharedRuntime::complete_monitor_unlocking_C(oopDesc* _obj, BasicLock* lock, JavaThread * THREAD))
2076    oop obj(_obj);
2077   assert(JavaThread::current() == THREAD, "invariant");
2078   // I'm not convinced we need the code contained by MIGHT_HAVE_PENDING anymore
2079   // testing was unable to ever fire the assert that guarded it so I have removed it.
2080   assert(!HAS_PENDING_EXCEPTION, "Do we need code below anymore?");
2081 #undef MIGHT_HAVE_PENDING
2082 #ifdef MIGHT_HAVE_PENDING
2083   // Save and restore any pending_exception around the exception mark.
2084   // While the slow_exit must not throw an exception, we could come into
2085   // this routine with one set.
2086   oop pending_excep = NULL;
2087   const char* pending_file;
2088   int pending_line;
2089   if (HAS_PENDING_EXCEPTION) {
2090     pending_excep = PENDING_EXCEPTION;
2091     pending_file  = THREAD->exception_file();
2092     pending_line  = THREAD->exception_line();
2093     CLEAR_PENDING_EXCEPTION;
2094   }
2095 #endif /* MIGHT_HAVE_PENDING */
2096 
2097   {
2098     // Exit must be non-blocking, and therefore no exceptions can be thrown.
2099     EXCEPTION_MARK;
2100     ObjectSynchronizer::slow_exit(obj, lock, THREAD);
2101   }
2102 
2103 #ifdef MIGHT_HAVE_PENDING
2104   if (pending_excep != NULL) {
2105     THREAD->set_pending_exception(pending_excep, pending_file, pending_line);
2106   }
2107 #endif /* MIGHT_HAVE_PENDING */
2108 JRT_END
2109 
2110 #ifndef PRODUCT
2111 
2112 void SharedRuntime::print_statistics() {
2113   ttyLocker ttyl;
2114   if (xtty != NULL)  xtty->head("statistics type='SharedRuntime'");
2115 
2116   if (_throw_null_ctr) tty->print_cr("%5d implicit null throw", _throw_null_ctr);
2117 
2118   SharedRuntime::print_ic_miss_histogram();
2119 
2120   if (CountRemovableExceptions) {
2121     if (_nof_removable_exceptions > 0) {
2122       Unimplemented(); // this counter is not yet incremented
2123       tty->print_cr("Removable exceptions: %d", _nof_removable_exceptions);
2124     }
2125   }
2126 
2127   // Dump the JRT_ENTRY counters
2128   if (_new_instance_ctr) tty->print_cr("%5d new instance requires GC", _new_instance_ctr);
2129   if (_new_array_ctr) tty->print_cr("%5d new array requires GC", _new_array_ctr);
2130   if (_multi1_ctr) tty->print_cr("%5d multianewarray 1 dim", _multi1_ctr);
2131   if (_multi2_ctr) tty->print_cr("%5d multianewarray 2 dim", _multi2_ctr);
2132   if (_multi3_ctr) tty->print_cr("%5d multianewarray 3 dim", _multi3_ctr);
2133   if (_multi4_ctr) tty->print_cr("%5d multianewarray 4 dim", _multi4_ctr);
2134   if (_multi5_ctr) tty->print_cr("%5d multianewarray 5 dim", _multi5_ctr);
2135 
2136   tty->print_cr("%5d inline cache miss in compiled", _ic_miss_ctr);
2137   tty->print_cr("%5d wrong method", _wrong_method_ctr);
2138   tty->print_cr("%5d unresolved static call site", _resolve_static_ctr);
2139   tty->print_cr("%5d unresolved virtual call site", _resolve_virtual_ctr);
2140   tty->print_cr("%5d unresolved opt virtual call site", _resolve_opt_virtual_ctr);
2141 
2142   if (_mon_enter_stub_ctr) tty->print_cr("%5d monitor enter stub", _mon_enter_stub_ctr);
2143   if (_mon_exit_stub_ctr) tty->print_cr("%5d monitor exit stub", _mon_exit_stub_ctr);
2144   if (_mon_enter_ctr) tty->print_cr("%5d monitor enter slow", _mon_enter_ctr);
2145   if (_mon_exit_ctr) tty->print_cr("%5d monitor exit slow", _mon_exit_ctr);
2146   if (_partial_subtype_ctr) tty->print_cr("%5d slow partial subtype", _partial_subtype_ctr);
2147   if (_jbyte_array_copy_ctr) tty->print_cr("%5d byte array copies", _jbyte_array_copy_ctr);
2148   if (_jshort_array_copy_ctr) tty->print_cr("%5d short array copies", _jshort_array_copy_ctr);
2149   if (_jint_array_copy_ctr) tty->print_cr("%5d int array copies", _jint_array_copy_ctr);
2150   if (_jlong_array_copy_ctr) tty->print_cr("%5d long array copies", _jlong_array_copy_ctr);
2151   if (_oop_array_copy_ctr) tty->print_cr("%5d oop array copies", _oop_array_copy_ctr);
2152   if (_checkcast_array_copy_ctr) tty->print_cr("%5d checkcast array copies", _checkcast_array_copy_ctr);
2153   if (_unsafe_array_copy_ctr) tty->print_cr("%5d unsafe array copies", _unsafe_array_copy_ctr);
2154   if (_generic_array_copy_ctr) tty->print_cr("%5d generic array copies", _generic_array_copy_ctr);
2155   if (_slow_array_copy_ctr) tty->print_cr("%5d slow array copies", _slow_array_copy_ctr);
2156   if (_find_handler_ctr) tty->print_cr("%5d find exception handler", _find_handler_ctr);
2157   if (_rethrow_ctr) tty->print_cr("%5d rethrow handler", _rethrow_ctr);
2158 
2159   AdapterHandlerLibrary::print_statistics();
2160 
2161   if (xtty != NULL)  xtty->tail("statistics");
2162 }
2163 
2164 inline double percent(int x, int y) {
2165   return 100.0 * x / MAX2(y, 1);
2166 }
2167 
2168 class MethodArityHistogram {
2169  public:
2170   enum { MAX_ARITY = 256 };
2171  private:
2172   static int _arity_histogram[MAX_ARITY];     // histogram of #args
2173   static int _size_histogram[MAX_ARITY];      // histogram of arg size in words
2174   static int _max_arity;                      // max. arity seen
2175   static int _max_size;                       // max. arg size seen
2176 
2177   static void add_method_to_histogram(nmethod* nm) {
2178     Method* m = nm->method();
2179     ArgumentCount args(m->signature());
2180     int arity   = args.size() + (m->is_static() ? 0 : 1);
2181     int argsize = m->size_of_parameters();
2182     arity   = MIN2(arity, MAX_ARITY-1);
2183     argsize = MIN2(argsize, MAX_ARITY-1);
2184     int count = nm->method()->compiled_invocation_count();
2185     _arity_histogram[arity]  += count;
2186     _size_histogram[argsize] += count;
2187     _max_arity = MAX2(_max_arity, arity);
2188     _max_size  = MAX2(_max_size, argsize);
2189   }
2190 
2191   void print_histogram_helper(int n, int* histo, const char* name) {
2192     const int N = MIN2(5, n);
2193     tty->print_cr("\nHistogram of call arity (incl. rcvr, calls to compiled methods only):");
2194     double sum = 0;
2195     double weighted_sum = 0;
2196     int i;
2197     for (i = 0; i <= n; i++) { sum += histo[i]; weighted_sum += i*histo[i]; }
2198     double rest = sum;
2199     double percent = sum / 100;
2200     for (i = 0; i <= N; i++) {
2201       rest -= histo[i];
2202       tty->print_cr("%4d: %7d (%5.1f%%)", i, histo[i], histo[i] / percent);
2203     }
2204     tty->print_cr("rest: %7d (%5.1f%%))", (int)rest, rest / percent);
2205     tty->print_cr("(avg. %s = %3.1f, max = %d)", name, weighted_sum / sum, n);
2206   }
2207 
2208   void print_histogram() {
2209     tty->print_cr("\nHistogram of call arity (incl. rcvr, calls to compiled methods only):");
2210     print_histogram_helper(_max_arity, _arity_histogram, "arity");
2211     tty->print_cr("\nSame for parameter size (in words):");
2212     print_histogram_helper(_max_size, _size_histogram, "size");
2213     tty->cr();
2214   }
2215 
2216  public:
2217   MethodArityHistogram() {
2218     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
2219     _max_arity = _max_size = 0;
2220     for (int i = 0; i < MAX_ARITY; i++) _arity_histogram[i] = _size_histogram[i] = 0;
2221     CodeCache::nmethods_do(add_method_to_histogram);
2222     print_histogram();
2223   }
2224 };
2225 
2226 int MethodArityHistogram::_arity_histogram[MethodArityHistogram::MAX_ARITY];
2227 int MethodArityHistogram::_size_histogram[MethodArityHistogram::MAX_ARITY];
2228 int MethodArityHistogram::_max_arity;
2229 int MethodArityHistogram::_max_size;
2230 
2231 void SharedRuntime::print_call_statistics(int comp_total) {
2232   tty->print_cr("Calls from compiled code:");
2233   int total  = _nof_normal_calls + _nof_interface_calls + _nof_static_calls;
2234   int mono_c = _nof_normal_calls - _nof_optimized_calls - _nof_megamorphic_calls;
2235   int mono_i = _nof_interface_calls - _nof_optimized_interface_calls - _nof_megamorphic_interface_calls;
2236   tty->print_cr("\t%9d   (%4.1f%%) total non-inlined   ", total, percent(total, total));
2237   tty->print_cr("\t%9d   (%4.1f%%) virtual calls       ", _nof_normal_calls, percent(_nof_normal_calls, total));
2238   tty->print_cr("\t  %9d  (%3.0f%%)   inlined          ", _nof_inlined_calls, percent(_nof_inlined_calls, _nof_normal_calls));
2239   tty->print_cr("\t  %9d  (%3.0f%%)   optimized        ", _nof_optimized_calls, percent(_nof_optimized_calls, _nof_normal_calls));
2240   tty->print_cr("\t  %9d  (%3.0f%%)   monomorphic      ", mono_c, percent(mono_c, _nof_normal_calls));
2241   tty->print_cr("\t  %9d  (%3.0f%%)   megamorphic      ", _nof_megamorphic_calls, percent(_nof_megamorphic_calls, _nof_normal_calls));
2242   tty->print_cr("\t%9d   (%4.1f%%) interface calls     ", _nof_interface_calls, percent(_nof_interface_calls, total));
2243   tty->print_cr("\t  %9d  (%3.0f%%)   inlined          ", _nof_inlined_interface_calls, percent(_nof_inlined_interface_calls, _nof_interface_calls));
2244   tty->print_cr("\t  %9d  (%3.0f%%)   optimized        ", _nof_optimized_interface_calls, percent(_nof_optimized_interface_calls, _nof_interface_calls));
2245   tty->print_cr("\t  %9d  (%3.0f%%)   monomorphic      ", mono_i, percent(mono_i, _nof_interface_calls));
2246   tty->print_cr("\t  %9d  (%3.0f%%)   megamorphic      ", _nof_megamorphic_interface_calls, percent(_nof_megamorphic_interface_calls, _nof_interface_calls));
2247   tty->print_cr("\t%9d   (%4.1f%%) static/special calls", _nof_static_calls, percent(_nof_static_calls, total));
2248   tty->print_cr("\t  %9d  (%3.0f%%)   inlined          ", _nof_inlined_static_calls, percent(_nof_inlined_static_calls, _nof_static_calls));
2249   tty->cr();
2250   tty->print_cr("Note 1: counter updates are not MT-safe.");
2251   tty->print_cr("Note 2: %% in major categories are relative to total non-inlined calls;");
2252   tty->print_cr("        %% in nested categories are relative to their category");
2253   tty->print_cr("        (and thus add up to more than 100%% with inlining)");
2254   tty->cr();
2255 
2256   MethodArityHistogram h;
2257 }
2258 #endif
2259 
2260 
2261 // A simple wrapper class around the calling convention information
2262 // that allows sharing of adapters for the same calling convention.
2263 class AdapterFingerPrint : public CHeapObj<mtCode> {
2264  private:
2265   enum {
2266     _basic_type_bits = 4,
2267     _basic_type_mask = right_n_bits(_basic_type_bits),
2268     _basic_types_per_int = BitsPerInt / _basic_type_bits,
2269     _compact_int_count = 3
2270   };
2271   // TO DO:  Consider integrating this with a more global scheme for compressing signatures.
2272   // For now, 4 bits per components (plus T_VOID gaps after double/long) is not excessive.
2273 
2274   union {
2275     int  _compact[_compact_int_count];
2276     int* _fingerprint;
2277   } _value;
2278   int _length; // A negative length indicates the fingerprint is in the compact form,
2279                // Otherwise _value._fingerprint is the array.
2280 
2281   // Remap BasicTypes that are handled equivalently by the adapters.
2282   // These are correct for the current system but someday it might be
2283   // necessary to make this mapping platform dependent.
2284   static int adapter_encoding(BasicType in) {
2285     switch (in) {
2286       case T_BOOLEAN:
2287       case T_BYTE:
2288       case T_SHORT:
2289       case T_CHAR:
2290         // There are all promoted to T_INT in the calling convention
2291         return T_INT;
2292 
2293       case T_OBJECT:
2294       case T_ARRAY:
2295         // In other words, we assume that any register good enough for
2296         // an int or long is good enough for a managed pointer.
2297 #ifdef _LP64
2298         return T_LONG;
2299 #else
2300         return T_INT;
2301 #endif
2302 
2303       case T_INT:
2304       case T_LONG:
2305       case T_FLOAT:
2306       case T_DOUBLE:
2307       case T_VOID:
2308         return in;
2309 
2310       default:
2311         ShouldNotReachHere();
2312         return T_CONFLICT;
2313     }
2314   }
2315 
2316  public:
2317   AdapterFingerPrint(int total_args_passed, BasicType* sig_bt) {
2318     // The fingerprint is based on the BasicType signature encoded
2319     // into an array of ints with eight entries per int.
2320     int* ptr;
2321     int len = (total_args_passed + (_basic_types_per_int-1)) / _basic_types_per_int;
2322     if (len <= _compact_int_count) {
2323       assert(_compact_int_count == 3, "else change next line");
2324       _value._compact[0] = _value._compact[1] = _value._compact[2] = 0;
2325       // Storing the signature encoded as signed chars hits about 98%
2326       // of the time.
2327       _length = -len;
2328       ptr = _value._compact;
2329     } else {
2330       _length = len;
2331       _value._fingerprint = NEW_C_HEAP_ARRAY(int, _length, mtCode);
2332       ptr = _value._fingerprint;
2333     }
2334 
2335     // Now pack the BasicTypes with 8 per int
2336     int sig_index = 0;
2337     for (int index = 0; index < len; index++) {
2338       int value = 0;
2339       for (int byte = 0; byte < _basic_types_per_int; byte++) {
2340         int bt = ((sig_index < total_args_passed)
2341                   ? adapter_encoding(sig_bt[sig_index++])
2342                   : 0);
2343         assert((bt & _basic_type_mask) == bt, "must fit in 4 bits");
2344         value = (value << _basic_type_bits) | bt;
2345       }
2346       ptr[index] = value;
2347     }
2348   }
2349 
2350   ~AdapterFingerPrint() {
2351     if (_length > 0) {
2352       FREE_C_HEAP_ARRAY(int, _value._fingerprint);
2353     }
2354   }
2355 
2356   int value(int index) {
2357     if (_length < 0) {
2358       return _value._compact[index];
2359     }
2360     return _value._fingerprint[index];
2361   }
2362   int length() {
2363     if (_length < 0) return -_length;
2364     return _length;
2365   }
2366 
2367   bool is_compact() {
2368     return _length <= 0;
2369   }
2370 
2371   unsigned int compute_hash() {
2372     int hash = 0;
2373     for (int i = 0; i < length(); i++) {
2374       int v = value(i);
2375       hash = (hash << 8) ^ v ^ (hash >> 5);
2376     }
2377     return (unsigned int)hash;
2378   }
2379 
2380   const char* as_string() {
2381     stringStream st;
2382     st.print("0x");
2383     for (int i = 0; i < length(); i++) {
2384       st.print("%08x", value(i));
2385     }
2386     return st.as_string();
2387   }
2388 
2389   bool equals(AdapterFingerPrint* other) {
2390     if (other->_length != _length) {
2391       return false;
2392     }
2393     if (_length < 0) {
2394       assert(_compact_int_count == 3, "else change next line");
2395       return _value._compact[0] == other->_value._compact[0] &&
2396              _value._compact[1] == other->_value._compact[1] &&
2397              _value._compact[2] == other->_value._compact[2];
2398     } else {
2399       for (int i = 0; i < _length; i++) {
2400         if (_value._fingerprint[i] != other->_value._fingerprint[i]) {
2401           return false;
2402         }
2403       }
2404     }
2405     return true;
2406   }
2407 };
2408 
2409 
2410 // A hashtable mapping from AdapterFingerPrints to AdapterHandlerEntries
2411 class AdapterHandlerTable : public BasicHashtable<mtCode> {
2412   friend class AdapterHandlerTableIterator;
2413 
2414  private:
2415 
2416 #ifndef PRODUCT
2417   static int _lookups; // number of calls to lookup
2418   static int _buckets; // number of buckets checked
2419   static int _equals;  // number of buckets checked with matching hash
2420   static int _hits;    // number of successful lookups
2421   static int _compact; // number of equals calls with compact signature
2422 #endif
2423 
2424   AdapterHandlerEntry* bucket(int i) {
2425     return (AdapterHandlerEntry*)BasicHashtable<mtCode>::bucket(i);
2426   }
2427 
2428  public:
2429   AdapterHandlerTable()
2430     : BasicHashtable<mtCode>(293, (DumpSharedSpaces ? sizeof(CDSAdapterHandlerEntry) : sizeof(AdapterHandlerEntry))) { }
2431 
2432   // Create a new entry suitable for insertion in the table
2433   AdapterHandlerEntry* new_entry(AdapterFingerPrint* fingerprint, address i2c_entry, address c2i_entry, address c2i_unverified_entry) {
2434     AdapterHandlerEntry* entry = (AdapterHandlerEntry*)BasicHashtable<mtCode>::new_entry(fingerprint->compute_hash());
2435     entry->init(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
2436     if (DumpSharedSpaces) {
2437       ((CDSAdapterHandlerEntry*)entry)->init();
2438     }
2439     return entry;
2440   }
2441 
2442   // Insert an entry into the table
2443   void add(AdapterHandlerEntry* entry) {
2444     int index = hash_to_index(entry->hash());
2445     add_entry(index, entry);
2446   }
2447 
2448   void free_entry(AdapterHandlerEntry* entry) {
2449     entry->deallocate();
2450     BasicHashtable<mtCode>::free_entry(entry);
2451   }
2452 
2453   // Find a entry with the same fingerprint if it exists
2454   AdapterHandlerEntry* lookup(int total_args_passed, BasicType* sig_bt) {
2455     NOT_PRODUCT(_lookups++);
2456     AdapterFingerPrint fp(total_args_passed, sig_bt);
2457     unsigned int hash = fp.compute_hash();
2458     int index = hash_to_index(hash);
2459     for (AdapterHandlerEntry* e = bucket(index); e != NULL; e = e->next()) {
2460       NOT_PRODUCT(_buckets++);
2461       if (e->hash() == hash) {
2462         NOT_PRODUCT(_equals++);
2463         if (fp.equals(e->fingerprint())) {
2464 #ifndef PRODUCT
2465           if (fp.is_compact()) _compact++;
2466           _hits++;
2467 #endif
2468           return e;
2469         }
2470       }
2471     }
2472     return NULL;
2473   }
2474 
2475 #ifndef PRODUCT
2476   void print_statistics() {
2477     ResourceMark rm;
2478     int longest = 0;
2479     int empty = 0;
2480     int total = 0;
2481     int nonempty = 0;
2482     for (int index = 0; index < table_size(); index++) {
2483       int count = 0;
2484       for (AdapterHandlerEntry* e = bucket(index); e != NULL; e = e->next()) {
2485         count++;
2486       }
2487       if (count != 0) nonempty++;
2488       if (count == 0) empty++;
2489       if (count > longest) longest = count;
2490       total += count;
2491     }
2492     tty->print_cr("AdapterHandlerTable: empty %d longest %d total %d average %f",
2493                   empty, longest, total, total / (double)nonempty);
2494     tty->print_cr("AdapterHandlerTable: lookups %d buckets %d equals %d hits %d compact %d",
2495                   _lookups, _buckets, _equals, _hits, _compact);
2496   }
2497 #endif
2498 };
2499 
2500 
2501 #ifndef PRODUCT
2502 
2503 int AdapterHandlerTable::_lookups;
2504 int AdapterHandlerTable::_buckets;
2505 int AdapterHandlerTable::_equals;
2506 int AdapterHandlerTable::_hits;
2507 int AdapterHandlerTable::_compact;
2508 
2509 #endif
2510 
2511 class AdapterHandlerTableIterator : public StackObj {
2512  private:
2513   AdapterHandlerTable* _table;
2514   int _index;
2515   AdapterHandlerEntry* _current;
2516 
2517   void scan() {
2518     while (_index < _table->table_size()) {
2519       AdapterHandlerEntry* a = _table->bucket(_index);
2520       _index++;
2521       if (a != NULL) {
2522         _current = a;
2523         return;
2524       }
2525     }
2526   }
2527 
2528  public:
2529   AdapterHandlerTableIterator(AdapterHandlerTable* table): _table(table), _index(0), _current(NULL) {
2530     scan();
2531   }
2532   bool has_next() {
2533     return _current != NULL;
2534   }
2535   AdapterHandlerEntry* next() {
2536     if (_current != NULL) {
2537       AdapterHandlerEntry* result = _current;
2538       _current = _current->next();
2539       if (_current == NULL) scan();
2540       return result;
2541     } else {
2542       return NULL;
2543     }
2544   }
2545 };
2546 
2547 
2548 // ---------------------------------------------------------------------------
2549 // Implementation of AdapterHandlerLibrary
2550 AdapterHandlerTable* AdapterHandlerLibrary::_adapters = NULL;
2551 AdapterHandlerEntry* AdapterHandlerLibrary::_abstract_method_handler = NULL;
2552 const int AdapterHandlerLibrary_size = 16*K;
2553 BufferBlob* AdapterHandlerLibrary::_buffer = NULL;
2554 
2555 BufferBlob* AdapterHandlerLibrary::buffer_blob() {
2556   // Should be called only when AdapterHandlerLibrary_lock is active.
2557   if (_buffer == NULL) // Initialize lazily
2558       _buffer = BufferBlob::create("adapters", AdapterHandlerLibrary_size);
2559   return _buffer;
2560 }
2561 
2562 extern "C" void unexpected_adapter_call() {
2563   ShouldNotCallThis();
2564 }
2565 
2566 void AdapterHandlerLibrary::initialize() {
2567   if (_adapters != NULL) return;
2568   _adapters = new AdapterHandlerTable();
2569 
2570   if (!CodeCacheExtensions::skip_compiler_support()) {
2571     // Create a special handler for abstract methods.  Abstract methods
2572     // are never compiled so an i2c entry is somewhat meaningless, but
2573     // throw AbstractMethodError just in case.
2574     // Pass wrong_method_abstract for the c2i transitions to return
2575     // AbstractMethodError for invalid invocations.
2576     address wrong_method_abstract = SharedRuntime::get_handle_wrong_method_abstract_stub();
2577     _abstract_method_handler = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(0, NULL),
2578                                                                 StubRoutines::throw_AbstractMethodError_entry(),
2579                                                                 wrong_method_abstract, wrong_method_abstract);
2580   } else {
2581     // Adapters are not supposed to be used.
2582     // Generate a special one to cause an error if used (and store this
2583     // singleton in place of the useless _abstract_method_error adapter).
2584     address entry = (address) &unexpected_adapter_call;
2585     _abstract_method_handler = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(0, NULL),
2586                                                                 entry,
2587                                                                 entry,
2588                                                                 entry);
2589 
2590   }
2591 }
2592 
2593 AdapterHandlerEntry* AdapterHandlerLibrary::new_entry(AdapterFingerPrint* fingerprint,
2594                                                       address i2c_entry,
2595                                                       address c2i_entry,
2596                                                       address c2i_unverified_entry) {
2597   return _adapters->new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
2598 }
2599 
2600 AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(const methodHandle& method) {
2601   AdapterHandlerEntry* entry = get_adapter0(method);
2602   if (method->is_shared()) {
2603     // See comments around Method::link_method()
2604     MutexLocker mu(AdapterHandlerLibrary_lock);
2605     if (method->adapter() == NULL) {
2606       method->update_adapter_trampoline(entry);
2607     }
2608     address trampoline = method->from_compiled_entry();
2609     if (*(int*)trampoline == 0) {
2610       CodeBuffer buffer(trampoline, (int)SharedRuntime::trampoline_size());
2611       MacroAssembler _masm(&buffer);
2612       SharedRuntime::generate_trampoline(&_masm, entry->get_c2i_entry());
2613       assert(*(int*)trampoline != 0, "Instruction(s) for trampoline must not be encoded as zeros.");
2614 
2615       if (PrintInterpreter) {
2616         Disassembler::decode(buffer.insts_begin(), buffer.insts_end());
2617       }
2618     }
2619   }
2620 
2621   return entry;
2622 }
2623 
2624 AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter0(const methodHandle& method) {
2625   // Use customized signature handler.  Need to lock around updates to
2626   // the AdapterHandlerTable (it is not safe for concurrent readers
2627   // and a single writer: this could be fixed if it becomes a
2628   // problem).
2629 
2630   ResourceMark rm;
2631 
2632   NOT_PRODUCT(int insts_size);
2633   AdapterBlob* new_adapter = NULL;
2634   AdapterHandlerEntry* entry = NULL;
2635   AdapterFingerPrint* fingerprint = NULL;
2636   {
2637     MutexLocker mu(AdapterHandlerLibrary_lock);
2638     // make sure data structure is initialized
2639     initialize();
2640 
2641     // during dump time, always generate adapters, even if the
2642     // compiler has been turned off.
2643     if (!DumpSharedSpaces && CodeCacheExtensions::skip_compiler_support()) {
2644       // adapters are useless and should not be used, including the
2645       // abstract_method_handler. However, some callers check that
2646       // an adapter was installed.
2647       // Return the singleton adapter, stored into _abstract_method_handler
2648       // and modified to cause an error if we ever call it.
2649       return _abstract_method_handler;
2650     }
2651 
2652     if (method->is_abstract()) {
2653       return _abstract_method_handler;
2654     }
2655 
2656     // Fill in the signature array, for the calling-convention call.
2657     int total_args_passed = method->size_of_parameters(); // All args on stack
2658 
2659     BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
2660     VMRegPair* regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
2661     int i = 0;
2662     if (!method->is_static())  // Pass in receiver first
2663       sig_bt[i++] = T_OBJECT;
2664     for (SignatureStream ss(method->signature()); !ss.at_return_type(); ss.next()) {
2665       sig_bt[i++] = ss.type();  // Collect remaining bits of signature
2666       if (ss.type() == T_LONG || ss.type() == T_DOUBLE)
2667         sig_bt[i++] = T_VOID;   // Longs & doubles take 2 Java slots
2668     }
2669     assert(i == total_args_passed, "");
2670 
2671     // Lookup method signature's fingerprint
2672     entry = _adapters->lookup(total_args_passed, sig_bt);
2673 
2674 #ifdef ASSERT
2675     AdapterHandlerEntry* shared_entry = NULL;
2676     // Start adapter sharing verification only after the VM is booted.
2677     if (VerifyAdapterSharing && (entry != NULL)) {
2678       shared_entry = entry;
2679       entry = NULL;
2680     }
2681 #endif
2682 
2683     if (entry != NULL) {
2684       return entry;
2685     }
2686 
2687     // Get a description of the compiled java calling convention and the largest used (VMReg) stack slot usage
2688     int comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed, false);
2689 
2690     // Make a C heap allocated version of the fingerprint to store in the adapter
2691     fingerprint = new AdapterFingerPrint(total_args_passed, sig_bt);
2692 
2693     // StubRoutines::code2() is initialized after this function can be called. As a result,
2694     // VerifyAdapterCalls and VerifyAdapterSharing can fail if we re-use code that generated
2695     // prior to StubRoutines::code2() being set. Checks refer to checks generated in an I2C
2696     // stub that ensure that an I2C stub is called from an interpreter frame.
2697     bool contains_all_checks = StubRoutines::code2() != NULL;
2698 
2699     // Create I2C & C2I handlers
2700     BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
2701     if (buf != NULL) {
2702       CodeBuffer buffer(buf);
2703       short buffer_locs[20];
2704       buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
2705                                              sizeof(buffer_locs)/sizeof(relocInfo));
2706 
2707       MacroAssembler _masm(&buffer);
2708       entry = SharedRuntime::generate_i2c2i_adapters(&_masm,
2709                                                      total_args_passed,
2710                                                      comp_args_on_stack,
2711                                                      sig_bt,
2712                                                      regs,
2713                                                      fingerprint);
2714 #ifdef ASSERT
2715       if (VerifyAdapterSharing) {
2716         if (shared_entry != NULL) {
2717           assert(shared_entry->compare_code(buf->code_begin(), buffer.insts_size()), "code must match");
2718           // Release the one just created and return the original
2719           _adapters->free_entry(entry);
2720           return shared_entry;
2721         } else  {
2722           entry->save_code(buf->code_begin(), buffer.insts_size());
2723         }
2724       }
2725 #endif
2726 
2727       new_adapter = AdapterBlob::create(&buffer);
2728       NOT_PRODUCT(insts_size = buffer.insts_size());
2729     }
2730     if (new_adapter == NULL) {
2731       // CodeCache is full, disable compilation
2732       // Ought to log this but compile log is only per compile thread
2733       // and we're some non descript Java thread.
2734       return NULL; // Out of CodeCache space
2735     }
2736     entry->relocate(new_adapter->content_begin());
2737 #ifndef PRODUCT
2738     // debugging suppport
2739     if (PrintAdapterHandlers || PrintStubCode) {
2740       ttyLocker ttyl;
2741       entry->print_adapter_on(tty);
2742       tty->print_cr("i2c argument handler #%d for: %s %s %s (%d bytes generated)",
2743                     _adapters->number_of_entries(), (method->is_static() ? "static" : "receiver"),
2744                     method->signature()->as_C_string(), fingerprint->as_string(), insts_size);
2745       tty->print_cr("c2i argument handler starts at %p", entry->get_c2i_entry());
2746       if (Verbose || PrintStubCode) {
2747         address first_pc = entry->base_address();
2748         if (first_pc != NULL) {
2749           Disassembler::decode(first_pc, first_pc + insts_size);
2750           tty->cr();
2751         }
2752       }
2753     }
2754 #endif
2755     // Add the entry only if the entry contains all required checks (see sharedRuntime_xxx.cpp)
2756     // The checks are inserted only if -XX:+VerifyAdapterCalls is specified.
2757     if (contains_all_checks || !VerifyAdapterCalls) {
2758       _adapters->add(entry);
2759     }
2760   }
2761   // Outside of the lock
2762   if (new_adapter != NULL) {
2763     char blob_id[256];
2764     jio_snprintf(blob_id,
2765                  sizeof(blob_id),
2766                  "%s(%s)@" PTR_FORMAT,
2767                  new_adapter->name(),
2768                  fingerprint->as_string(),
2769                  new_adapter->content_begin());
2770     Forte::register_stub(blob_id, new_adapter->content_begin(), new_adapter->content_end());
2771 
2772     if (JvmtiExport::should_post_dynamic_code_generated()) {
2773       JvmtiExport::post_dynamic_code_generated(blob_id, new_adapter->content_begin(), new_adapter->content_end());
2774     }
2775   }
2776   return entry;
2777 }
2778 
2779 address AdapterHandlerEntry::base_address() {
2780   address base = _i2c_entry;
2781   if (base == NULL)  base = _c2i_entry;
2782   assert(base <= _c2i_entry || _c2i_entry == NULL, "");
2783   assert(base <= _c2i_unverified_entry || _c2i_unverified_entry == NULL, "");
2784   return base;
2785 }
2786 
2787 void AdapterHandlerEntry::relocate(address new_base) {
2788   address old_base = base_address();
2789   assert(old_base != NULL, "");
2790   ptrdiff_t delta = new_base - old_base;
2791   if (_i2c_entry != NULL)
2792     _i2c_entry += delta;
2793   if (_c2i_entry != NULL)
2794     _c2i_entry += delta;
2795   if (_c2i_unverified_entry != NULL)
2796     _c2i_unverified_entry += delta;
2797   assert(base_address() == new_base, "");
2798 }
2799 
2800 
2801 void AdapterHandlerEntry::deallocate() {
2802   delete _fingerprint;
2803 #ifdef ASSERT
2804   if (_saved_code) FREE_C_HEAP_ARRAY(unsigned char, _saved_code);
2805 #endif
2806 }
2807 
2808 
2809 #ifdef ASSERT
2810 // Capture the code before relocation so that it can be compared
2811 // against other versions.  If the code is captured after relocation
2812 // then relative instructions won't be equivalent.
2813 void AdapterHandlerEntry::save_code(unsigned char* buffer, int length) {
2814   _saved_code = NEW_C_HEAP_ARRAY(unsigned char, length, mtCode);
2815   _saved_code_length = length;
2816   memcpy(_saved_code, buffer, length);
2817 }
2818 
2819 
2820 bool AdapterHandlerEntry::compare_code(unsigned char* buffer, int length) {
2821   if (length != _saved_code_length) {
2822     return false;
2823   }
2824 
2825   return (memcmp(buffer, _saved_code, length) == 0) ? true : false;
2826 }
2827 #endif
2828 
2829 
2830 /**
2831  * Create a native wrapper for this native method.  The wrapper converts the
2832  * Java-compiled calling convention to the native convention, handles
2833  * arguments, and transitions to native.  On return from the native we transition
2834  * back to java blocking if a safepoint is in progress.
2835  */
2836 void AdapterHandlerLibrary::create_native_wrapper(const methodHandle& method) {
2837   ResourceMark rm;
2838   nmethod* nm = NULL;
2839 
2840   assert(method->is_native(), "must be native");
2841   assert(method->is_method_handle_intrinsic() ||
2842          method->has_native_function(), "must have something valid to call!");
2843 
2844   {
2845     // Perform the work while holding the lock, but perform any printing outside the lock
2846     MutexLocker mu(AdapterHandlerLibrary_lock);
2847     // See if somebody beat us to it
2848     if (method->code() != NULL) {
2849       return;
2850     }
2851 
2852     const int compile_id = CompileBroker::assign_compile_id(method, CompileBroker::standard_entry_bci);
2853     assert(compile_id > 0, "Must generate native wrapper");
2854 
2855 
2856     ResourceMark rm;
2857     BufferBlob*  buf = buffer_blob(); // the temporary code buffer in CodeCache
2858     if (buf != NULL) {
2859       CodeBuffer buffer(buf);
2860       double locs_buf[20];
2861       buffer.insts()->initialize_shared_locs((relocInfo*)locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
2862       MacroAssembler _masm(&buffer);
2863 
2864       // Fill in the signature array, for the calling-convention call.
2865       const int total_args_passed = method->size_of_parameters();
2866 
2867       BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
2868       VMRegPair*   regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
2869       int i=0;
2870       if (!method->is_static())  // Pass in receiver first
2871         sig_bt[i++] = T_OBJECT;
2872       SignatureStream ss(method->signature());
2873       for (; !ss.at_return_type(); ss.next()) {
2874         sig_bt[i++] = ss.type();  // Collect remaining bits of signature
2875         if (ss.type() == T_LONG || ss.type() == T_DOUBLE)
2876           sig_bt[i++] = T_VOID;   // Longs & doubles take 2 Java slots
2877       }
2878       assert(i == total_args_passed, "");
2879       BasicType ret_type = ss.type();
2880 
2881       // Now get the compiled-Java layout as input (or output) arguments.
2882       // NOTE: Stubs for compiled entry points of method handle intrinsics
2883       // are just trampolines so the argument registers must be outgoing ones.
2884       const bool is_outgoing = method->is_method_handle_intrinsic();
2885       int comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed, is_outgoing);
2886 
2887       // Generate the compiled-to-native wrapper code
2888       nm = SharedRuntime::generate_native_wrapper(&_masm, method, compile_id, sig_bt, regs, ret_type);
2889 
2890       if (nm != NULL) {
2891         method->set_code(method, nm);
2892 
2893         DirectiveSet* directive = DirectivesStack::getDefaultDirective(CompileBroker::compiler(CompLevel_simple));
2894         if (directive->PrintAssemblyOption) {
2895           nm->print_code();
2896         }
2897         DirectivesStack::release(directive);
2898       }
2899     }
2900   } // Unlock AdapterHandlerLibrary_lock
2901 
2902 
2903   // Install the generated code.
2904   if (nm != NULL) {
2905     if (PrintCompilation) {
2906       ttyLocker ttyl;
2907       CompileTask::print(tty, nm, method->is_static() ? "(static)" : "");
2908     }
2909     nm->post_compiled_method_load_event();
2910   }
2911 }
2912 
2913 JRT_ENTRY_NO_ASYNC(void, SharedRuntime::block_for_jni_critical(JavaThread* thread))
2914   assert(thread == JavaThread::current(), "must be");
2915   // The code is about to enter a JNI lazy critical native method and
2916   // _needs_gc is true, so if this thread is already in a critical
2917   // section then just return, otherwise this thread should block
2918   // until needs_gc has been cleared.
2919   if (thread->in_critical()) {
2920     return;
2921   }
2922   // Lock and unlock a critical section to give the system a chance to block
2923   GCLocker::lock_critical(thread);
2924   GCLocker::unlock_critical(thread);
2925 JRT_END
2926 
2927 // -------------------------------------------------------------------------
2928 // Java-Java calling convention
2929 // (what you use when Java calls Java)
2930 
2931 //------------------------------name_for_receiver----------------------------------
2932 // For a given signature, return the VMReg for parameter 0.
2933 VMReg SharedRuntime::name_for_receiver() {
2934   VMRegPair regs;
2935   BasicType sig_bt = T_OBJECT;
2936   (void) java_calling_convention(&sig_bt, &regs, 1, true);
2937   // Return argument 0 register.  In the LP64 build pointers
2938   // take 2 registers, but the VM wants only the 'main' name.
2939   return regs.first();
2940 }
2941 
2942 VMRegPair *SharedRuntime::find_callee_arguments(Symbol* sig, bool has_receiver, bool has_appendix, int* arg_size) {
2943   // This method is returning a data structure allocating as a
2944   // ResourceObject, so do not put any ResourceMarks in here.
2945   char *s = sig->as_C_string();
2946   int len = (int)strlen(s);
2947   s++; len--;                   // Skip opening paren
2948 
2949   BasicType *sig_bt = NEW_RESOURCE_ARRAY(BasicType, 256);
2950   VMRegPair *regs = NEW_RESOURCE_ARRAY(VMRegPair, 256);
2951   int cnt = 0;
2952   if (has_receiver) {
2953     sig_bt[cnt++] = T_OBJECT; // Receiver is argument 0; not in signature
2954   }
2955 
2956   while (*s != ')') {          // Find closing right paren
2957     switch (*s++) {            // Switch on signature character
2958     case 'B': sig_bt[cnt++] = T_BYTE;    break;
2959     case 'C': sig_bt[cnt++] = T_CHAR;    break;
2960     case 'D': sig_bt[cnt++] = T_DOUBLE;  sig_bt[cnt++] = T_VOID; break;
2961     case 'F': sig_bt[cnt++] = T_FLOAT;   break;
2962     case 'I': sig_bt[cnt++] = T_INT;     break;
2963     case 'J': sig_bt[cnt++] = T_LONG;    sig_bt[cnt++] = T_VOID; break;
2964     case 'S': sig_bt[cnt++] = T_SHORT;   break;
2965     case 'Z': sig_bt[cnt++] = T_BOOLEAN; break;
2966     case 'V': sig_bt[cnt++] = T_VOID;    break;
2967     case 'L':                   // Oop
2968       while (*s++ != ';');   // Skip signature
2969       sig_bt[cnt++] = T_OBJECT;
2970       break;
2971     case '[': {                 // Array
2972       do {                      // Skip optional size
2973         while (*s >= '0' && *s <= '9') s++;
2974       } while (*s++ == '[');   // Nested arrays?
2975       // Skip element type
2976       if (s[-1] == 'L')
2977         while (*s++ != ';'); // Skip signature
2978       sig_bt[cnt++] = T_ARRAY;
2979       break;
2980     }
2981     default : ShouldNotReachHere();
2982     }
2983   }
2984 
2985   if (has_appendix) {
2986     sig_bt[cnt++] = T_OBJECT;
2987   }
2988 
2989   assert(cnt < 256, "grow table size");
2990 
2991   int comp_args_on_stack;
2992   comp_args_on_stack = java_calling_convention(sig_bt, regs, cnt, true);
2993 
2994   // the calling convention doesn't count out_preserve_stack_slots so
2995   // we must add that in to get "true" stack offsets.
2996 
2997   if (comp_args_on_stack) {
2998     for (int i = 0; i < cnt; i++) {
2999       VMReg reg1 = regs[i].first();
3000       if (reg1->is_stack()) {
3001         // Yuck
3002         reg1 = reg1->bias(out_preserve_stack_slots());
3003       }
3004       VMReg reg2 = regs[i].second();
3005       if (reg2->is_stack()) {
3006         // Yuck
3007         reg2 = reg2->bias(out_preserve_stack_slots());
3008       }
3009       regs[i].set_pair(reg2, reg1);
3010     }
3011   }
3012 
3013   // results
3014   *arg_size = cnt;
3015   return regs;
3016 }
3017 
3018 // OSR Migration Code
3019 //
3020 // This code is used convert interpreter frames into compiled frames.  It is
3021 // called from very start of a compiled OSR nmethod.  A temp array is
3022 // allocated to hold the interesting bits of the interpreter frame.  All
3023 // active locks are inflated to allow them to move.  The displaced headers and
3024 // active interpreter locals are copied into the temp buffer.  Then we return
3025 // back to the compiled code.  The compiled code then pops the current
3026 // interpreter frame off the stack and pushes a new compiled frame.  Then it
3027 // copies the interpreter locals and displaced headers where it wants.
3028 // Finally it calls back to free the temp buffer.
3029 //
3030 // All of this is done NOT at any Safepoint, nor is any safepoint or GC allowed.
3031 
3032 JRT_LEAF(intptr_t*, SharedRuntime::OSR_migration_begin( JavaThread *thread) )
3033 
3034   //
3035   // This code is dependent on the memory layout of the interpreter local
3036   // array and the monitors. On all of our platforms the layout is identical
3037   // so this code is shared. If some platform lays the their arrays out
3038   // differently then this code could move to platform specific code or
3039   // the code here could be modified to copy items one at a time using
3040   // frame accessor methods and be platform independent.
3041 
3042   frame fr = thread->last_frame();
3043   assert(fr.is_interpreted_frame(), "");
3044   assert(fr.interpreter_frame_expression_stack_size()==0, "only handle empty stacks");
3045 
3046   // Figure out how many monitors are active.
3047   int active_monitor_count = 0;
3048   for (BasicObjectLock *kptr = fr.interpreter_frame_monitor_end();
3049        kptr < fr.interpreter_frame_monitor_begin();
3050        kptr = fr.next_monitor_in_interpreter_frame(kptr) ) {
3051     if (kptr->obj() != NULL) active_monitor_count++;
3052   }
3053 
3054   // QQQ we could place number of active monitors in the array so that compiled code
3055   // could double check it.
3056 
3057   Method* moop = fr.interpreter_frame_method();
3058   int max_locals = moop->max_locals();
3059   // Allocate temp buffer, 1 word per local & 2 per active monitor
3060   int buf_size_words = max_locals + active_monitor_count * BasicObjectLock::size();
3061   intptr_t *buf = NEW_C_HEAP_ARRAY(intptr_t,buf_size_words, mtCode);
3062 
3063   // Copy the locals.  Order is preserved so that loading of longs works.
3064   // Since there's no GC I can copy the oops blindly.
3065   assert(sizeof(HeapWord)==sizeof(intptr_t), "fix this code");
3066   Copy::disjoint_words((HeapWord*)fr.interpreter_frame_local_at(max_locals-1),
3067                        (HeapWord*)&buf[0],
3068                        max_locals);
3069 
3070   // Inflate locks.  Copy the displaced headers.  Be careful, there can be holes.
3071   int i = max_locals;
3072   for (BasicObjectLock *kptr2 = fr.interpreter_frame_monitor_end();
3073        kptr2 < fr.interpreter_frame_monitor_begin();
3074        kptr2 = fr.next_monitor_in_interpreter_frame(kptr2) ) {
3075     if (kptr2->obj() != NULL) {         // Avoid 'holes' in the monitor array
3076       BasicLock *lock = kptr2->lock();
3077       // Inflate so the displaced header becomes position-independent
3078       if (lock->displaced_header()->is_unlocked())
3079         ObjectSynchronizer::inflate_helper(kptr2->obj());
3080       // Now the displaced header is free to move
3081       buf[i++] = (intptr_t)lock->displaced_header();
3082       buf[i++] = cast_from_oop<intptr_t>(kptr2->obj());
3083     }
3084   }
3085   assert(i - max_locals == active_monitor_count*2, "found the expected number of monitors");
3086 
3087   return buf;
3088 JRT_END
3089 
3090 JRT_LEAF(void, SharedRuntime::OSR_migration_end( intptr_t* buf) )
3091   FREE_C_HEAP_ARRAY(intptr_t, buf);
3092 JRT_END
3093 
3094 bool AdapterHandlerLibrary::contains(const CodeBlob* b) {
3095   AdapterHandlerTableIterator iter(_adapters);
3096   while (iter.has_next()) {
3097     AdapterHandlerEntry* a = iter.next();
3098     if (b == CodeCache::find_blob(a->get_i2c_entry())) return true;
3099   }
3100   return false;
3101 }
3102 
3103 void AdapterHandlerLibrary::print_handler_on(outputStream* st, const CodeBlob* b) {
3104   AdapterHandlerTableIterator iter(_adapters);
3105   while (iter.has_next()) {
3106     AdapterHandlerEntry* a = iter.next();
3107     if (b == CodeCache::find_blob(a->get_i2c_entry())) {
3108       st->print("Adapter for signature: ");
3109       a->print_adapter_on(tty);
3110       return;
3111     }
3112   }
3113   assert(false, "Should have found handler");
3114 }
3115 
3116 void AdapterHandlerEntry::print_adapter_on(outputStream* st) const {
3117   st->print_cr("AHE@" INTPTR_FORMAT ": %s i2c: " INTPTR_FORMAT " c2i: " INTPTR_FORMAT " c2iUV: " INTPTR_FORMAT,
3118                p2i(this), fingerprint()->as_string(),
3119                p2i(get_i2c_entry()), p2i(get_c2i_entry()), p2i(get_c2i_unverified_entry()));
3120 
3121 }
3122 
3123 #if INCLUDE_CDS
3124 
3125 void CDSAdapterHandlerEntry::init() {
3126   assert(DumpSharedSpaces, "used during dump time only");
3127   _c2i_entry_trampoline = (address)MetaspaceShared::misc_data_space_alloc(SharedRuntime::trampoline_size());
3128   _adapter_trampoline = (AdapterHandlerEntry**)MetaspaceShared::misc_data_space_alloc(sizeof(AdapterHandlerEntry*));
3129 };
3130 
3131 #endif // INCLUDE_CDS
3132 
3133 
3134 #ifndef PRODUCT
3135 
3136 void AdapterHandlerLibrary::print_statistics() {
3137   _adapters->print_statistics();
3138 }
3139 
3140 #endif /* PRODUCT */
3141 
3142 JRT_LEAF(void, SharedRuntime::enable_stack_reserved_zone(JavaThread* thread))
3143   assert(thread->is_Java_thread(), "Only Java threads have a stack reserved zone");
3144   thread->enable_stack_reserved_zone();
3145   thread->set_reserved_stack_activation(thread->stack_base());
3146 JRT_END
3147 
3148 frame SharedRuntime::look_for_reserved_stack_annotated_method(JavaThread* thread, frame fr) {
3149   frame activation;
3150   CompiledMethod* nm = NULL;
3151   int count = 1;
3152 
3153   assert(fr.is_java_frame(), "Must start on Java frame");
3154 
3155   while (true) {
3156     Method* method = NULL;
3157     if (fr.is_interpreted_frame()) {
3158       method = fr.interpreter_frame_method();
3159     } else {
3160       CodeBlob* cb = fr.cb();
3161       if (cb != NULL && cb->is_compiled()) {
3162         nm = cb->as_compiled_method();
3163         method = nm->method();
3164       }
3165     }
3166     if ((method != NULL) && method->has_reserved_stack_access()) {
3167       ResourceMark rm(thread);
3168       activation = fr;
3169       warning("Potentially dangerous stack overflow in "
3170               "ReservedStackAccess annotated method %s [%d]",
3171               method->name_and_sig_as_C_string(), count++);
3172       EventReservedStackActivation event;
3173       if (event.should_commit()) {
3174         event.set_method(method);
3175         event.commit();
3176       }
3177     }
3178     if (fr.is_first_java_frame()) {
3179       break;
3180     } else {
3181       fr = fr.java_sender();
3182     }
3183   }
3184   return activation;
3185 }
3186