1 /*
   2  * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "code/codeCache.hpp"
  28 #include "code/debugInfoRec.hpp"
  29 #include "code/nmethod.hpp"
  30 #include "code/pcDesc.hpp"
  31 #include "code/scopeDesc.hpp"
  32 #include "interpreter/bytecode.hpp"
  33 #include "interpreter/interpreter.hpp"
  34 #include "interpreter/oopMapCache.hpp"
  35 #include "memory/allocation.inline.hpp"
  36 #include "memory/oopFactory.hpp"
  37 #include "memory/resourceArea.hpp"
  38 #include "oops/method.hpp"
  39 #include "oops/objArrayOop.inline.hpp"
  40 #include "oops/oop.inline.hpp"
  41 #include "oops/fieldStreams.hpp"
  42 #include "oops/verifyOopClosure.hpp"
  43 #include "prims/jvmtiThreadState.hpp"
  44 #include "runtime/biasedLocking.hpp"
  45 #include "runtime/compilationPolicy.hpp"
  46 #include "runtime/deoptimization.hpp"
  47 #include "runtime/interfaceSupport.hpp"
  48 #include "runtime/sharedRuntime.hpp"
  49 #include "runtime/signature.hpp"
  50 #include "runtime/stubRoutines.hpp"
  51 #include "runtime/thread.hpp"
  52 #include "runtime/vframe.hpp"
  53 #include "runtime/vframeArray.hpp"
  54 #include "runtime/vframe_hp.hpp"
  55 #include "utilities/events.hpp"
  56 #include "utilities/xmlstream.hpp"
  57 
  58 #if INCLUDE_JVMCI
  59 #include "jvmci/jvmciRuntime.hpp"
  60 #include "jvmci/jvmciJavaClasses.hpp"
  61 #endif
  62 
  63 
  64 bool DeoptimizationMarker::_is_active = false;
  65 
  66 Deoptimization::UnrollBlock::UnrollBlock(int  size_of_deoptimized_frame,
  67                                          int  caller_adjustment,
  68                                          int  caller_actual_parameters,
  69                                          int  number_of_frames,
  70                                          intptr_t* frame_sizes,
  71                                          address* frame_pcs,
  72                                          BasicType return_type,
  73                                          int exec_mode) {
  74   _size_of_deoptimized_frame = size_of_deoptimized_frame;
  75   _caller_adjustment         = caller_adjustment;
  76   _caller_actual_parameters  = caller_actual_parameters;
  77   _number_of_frames          = number_of_frames;
  78   _frame_sizes               = frame_sizes;
  79   _frame_pcs                 = frame_pcs;
  80   _register_block            = NEW_C_HEAP_ARRAY(intptr_t, RegisterMap::reg_count * 2, mtCompiler);
  81   _return_type               = return_type;
  82   _initial_info              = 0;
  83   // PD (x86 only)
  84   _counter_temp              = 0;
  85   _unpack_kind               = exec_mode;
  86   _sender_sp_temp            = 0;
  87 
  88   _total_frame_sizes         = size_of_frames();
  89   assert(exec_mode >= 0 && exec_mode < Unpack_LIMIT, "Unexpected exec_mode");
  90 }
  91 
  92 
  93 Deoptimization::UnrollBlock::~UnrollBlock() {
  94   FREE_C_HEAP_ARRAY(intptr_t, _frame_sizes);
  95   FREE_C_HEAP_ARRAY(intptr_t, _frame_pcs);
  96   FREE_C_HEAP_ARRAY(intptr_t, _register_block);
  97 }
  98 
  99 
 100 intptr_t* Deoptimization::UnrollBlock::value_addr_at(int register_number) const {
 101   assert(register_number < RegisterMap::reg_count, "checking register number");
 102   return &_register_block[register_number * 2];
 103 }
 104 
 105 
 106 
 107 int Deoptimization::UnrollBlock::size_of_frames() const {
 108   // Acount first for the adjustment of the initial frame
 109   int result = _caller_adjustment;
 110   for (int index = 0; index < number_of_frames(); index++) {
 111     result += frame_sizes()[index];
 112   }
 113   return result;
 114 }
 115 
 116 
 117 void Deoptimization::UnrollBlock::print() {
 118   ttyLocker ttyl;
 119   tty->print_cr("UnrollBlock");
 120   tty->print_cr("  size_of_deoptimized_frame = %d", _size_of_deoptimized_frame);
 121   tty->print(   "  frame_sizes: ");
 122   for (int index = 0; index < number_of_frames(); index++) {
 123     tty->print(INTX_FORMAT " ", frame_sizes()[index]);
 124   }
 125   tty->cr();
 126 }
 127 
 128 
 129 // In order to make fetch_unroll_info work properly with escape
 130 // analysis, The method was changed from JRT_LEAF to JRT_BLOCK_ENTRY and
 131 // ResetNoHandleMark and HandleMark were removed from it. The actual reallocation
 132 // of previously eliminated objects occurs in realloc_objects, which is
 133 // called from the method fetch_unroll_info_helper below.
 134 JRT_BLOCK_ENTRY(Deoptimization::UnrollBlock*, Deoptimization::fetch_unroll_info(JavaThread* thread, int exec_mode))
 135   // It is actually ok to allocate handles in a leaf method. It causes no safepoints,
 136   // but makes the entry a little slower. There is however a little dance we have to
 137   // do in debug mode to get around the NoHandleMark code in the JRT_LEAF macro
 138 
 139   // fetch_unroll_info() is called at the beginning of the deoptimization
 140   // handler. Note this fact before we start generating temporary frames
 141   // that can confuse an asynchronous stack walker. This counter is
 142   // decremented at the end of unpack_frames().
 143   if (TraceDeoptimization) {
 144     tty->print_cr("Deoptimizing thread " INTPTR_FORMAT, p2i(thread));
 145   }
 146   thread->inc_in_deopt_handler();
 147 
 148   return fetch_unroll_info_helper(thread, exec_mode);
 149 JRT_END
 150 
 151 
 152 // This is factored, since it is both called from a JRT_LEAF (deoptimization) and a JRT_ENTRY (uncommon_trap)
 153 Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread* thread, int exec_mode) {
 154 
 155   // Note: there is a safepoint safety issue here. No matter whether we enter
 156   // via vanilla deopt or uncommon trap we MUST NOT stop at a safepoint once
 157   // the vframeArray is created.
 158   //
 159 
 160   // Allocate our special deoptimization ResourceMark
 161   DeoptResourceMark* dmark = new DeoptResourceMark(thread);
 162   assert(thread->deopt_mark() == NULL, "Pending deopt!");
 163   thread->set_deopt_mark(dmark);
 164 
 165   frame stub_frame = thread->last_frame(); // Makes stack walkable as side effect
 166   RegisterMap map(thread, true);
 167   RegisterMap dummy_map(thread, false);
 168   // Now get the deoptee with a valid map
 169   frame deoptee = stub_frame.sender(&map);
 170   // Set the deoptee nmethod
 171   assert(thread->deopt_compiled_method() == NULL, "Pending deopt!");
 172   CompiledMethod* cm = deoptee.cb()->as_compiled_method_or_null();
 173   thread->set_deopt_compiled_method(cm);
 174 
 175   if (VerifyStack) {
 176     thread->validate_frame_layout();
 177   }
 178 
 179   // Create a growable array of VFrames where each VFrame represents an inlined
 180   // Java frame.  This storage is allocated with the usual system arena.
 181   assert(deoptee.is_compiled_frame(), "Wrong frame type");
 182   GrowableArray<compiledVFrame*>* chunk = new GrowableArray<compiledVFrame*>(10);
 183   vframe* vf = vframe::new_vframe(&deoptee, &map, thread);
 184   while (!vf->is_top()) {
 185     assert(vf->is_compiled_frame(), "Wrong frame type");
 186     chunk->push(compiledVFrame::cast(vf));
 187     vf = vf->sender();
 188   }
 189   assert(vf->is_compiled_frame(), "Wrong frame type");
 190   chunk->push(compiledVFrame::cast(vf));
 191 
 192   bool realloc_failures = false;
 193 
 194 #if defined(COMPILER2) || INCLUDE_JVMCI
 195   // Reallocate the non-escaping objects and restore their fields. Then
 196   // relock objects if synchronization on them was eliminated.
 197 #ifndef INCLUDE_JVMCI
 198   if (DoEscapeAnalysis || EliminateNestedLocks) {
 199     if (EliminateAllocations) {
 200 #endif // INCLUDE_JVMCI
 201       assert (chunk->at(0)->scope() != NULL,"expect only compiled java frames");
 202       GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects();
 203 
 204       // The flag return_oop() indicates call sites which return oop
 205       // in compiled code. Such sites include java method calls,
 206       // runtime calls (for example, used to allocate new objects/arrays
 207       // on slow code path) and any other calls generated in compiled code.
 208       // It is not guaranteed that we can get such information here only
 209       // by analyzing bytecode in deoptimized frames. This is why this flag
 210       // is set during method compilation (see Compile::Process_OopMap_Node()).
 211       // If the previous frame was popped or if we are dispatching an exception,
 212       // we don't have an oop result.
 213       bool save_oop_result = chunk->at(0)->scope()->return_oop() && !thread->popframe_forcing_deopt_reexecution() && (exec_mode == Unpack_deopt);
 214       Handle return_value;
 215       if (save_oop_result) {
 216         // Reallocation may trigger GC. If deoptimization happened on return from
 217         // call which returns oop we need to save it since it is not in oopmap.
 218         oop result = deoptee.saved_oop_result(&map);
 219         assert(result == NULL || result->is_oop(), "must be oop");
 220         return_value = Handle(thread, result);
 221         assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
 222         if (TraceDeoptimization) {
 223           ttyLocker ttyl;
 224           tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, p2i(result), p2i(thread));
 225         }
 226       }
 227       if (objects != NULL) {
 228         JRT_BLOCK
 229           realloc_failures = realloc_objects(thread, &deoptee, objects, THREAD);
 230         JRT_END
 231         bool skip_internal = (cm != NULL) && !cm->is_compiled_by_jvmci();
 232         reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal);
 233 #ifndef PRODUCT
 234         if (TraceDeoptimization) {
 235           ttyLocker ttyl;
 236           tty->print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, p2i(thread));
 237           print_objects(objects, realloc_failures);
 238         }
 239 #endif
 240       }
 241       if (save_oop_result) {
 242         // Restore result.
 243         deoptee.set_saved_oop_result(&map, return_value());
 244       }
 245 #ifndef INCLUDE_JVMCI
 246     }
 247     if (EliminateLocks) {
 248 #endif // INCLUDE_JVMCI
 249 #ifndef PRODUCT
 250       bool first = true;
 251 #endif
 252       for (int i = 0; i < chunk->length(); i++) {
 253         compiledVFrame* cvf = chunk->at(i);
 254         assert (cvf->scope() != NULL,"expect only compiled java frames");
 255         GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
 256         if (monitors->is_nonempty()) {
 257           relock_objects(monitors, thread, realloc_failures);
 258 #ifndef PRODUCT
 259           if (PrintDeoptimizationDetails) {
 260             ttyLocker ttyl;
 261             for (int j = 0; j < monitors->length(); j++) {
 262               MonitorInfo* mi = monitors->at(j);
 263               if (mi->eliminated()) {
 264                 if (first) {
 265                   first = false;
 266                   tty->print_cr("RELOCK OBJECTS in thread " INTPTR_FORMAT, p2i(thread));
 267                 }
 268                 if (mi->owner_is_scalar_replaced()) {
 269                   Klass* k = java_lang_Class::as_Klass(mi->owner_klass());
 270                   tty->print_cr("     failed reallocation for klass %s", k->external_name());
 271                 } else {
 272                   tty->print_cr("     object <" INTPTR_FORMAT "> locked", p2i(mi->owner()));
 273                 }
 274               }
 275             }
 276           }
 277 #endif // !PRODUCT
 278         }
 279       }
 280 #ifndef INCLUDE_JVMCI
 281     }
 282   }
 283 #endif // INCLUDE_JVMCI
 284 #endif // COMPILER2 || INCLUDE_JVMCI
 285 
 286   ScopeDesc* trap_scope = chunk->at(0)->scope();
 287   Handle exceptionObject;
 288   if (trap_scope->rethrow_exception()) {
 289     if (PrintDeoptimizationDetails) {
 290       tty->print_cr("Exception to be rethrown in the interpreter for method %s::%s at bci %d", trap_scope->method()->method_holder()->name()->as_C_string(), trap_scope->method()->name()->as_C_string(), trap_scope->bci());
 291     }
 292     GrowableArray<ScopeValue*>* expressions = trap_scope->expressions();
 293     guarantee(expressions != NULL && expressions->length() > 0, "must have exception to throw");
 294     ScopeValue* topOfStack = expressions->top();
 295     exceptionObject = StackValue::create_stack_value(&deoptee, &map, topOfStack)->get_obj();
 296     guarantee(exceptionObject() != NULL, "exception oop can not be null");
 297   }
 298 
 299   // Ensure that no safepoint is taken after pointers have been stored
 300   // in fields of rematerialized objects.  If a safepoint occurs from here on
 301   // out the java state residing in the vframeArray will be missed.
 302   NoSafepointVerifier no_safepoint;
 303 
 304   vframeArray* array = create_vframeArray(thread, deoptee, &map, chunk, realloc_failures);
 305 #if defined(COMPILER2) || INCLUDE_JVMCI
 306   if (realloc_failures) {
 307     pop_frames_failed_reallocs(thread, array);
 308   }
 309 #endif
 310 
 311   assert(thread->vframe_array_head() == NULL, "Pending deopt!");
 312   thread->set_vframe_array_head(array);
 313 
 314   // Now that the vframeArray has been created if we have any deferred local writes
 315   // added by jvmti then we can free up that structure as the data is now in the
 316   // vframeArray
 317 
 318   if (thread->deferred_locals() != NULL) {
 319     GrowableArray<jvmtiDeferredLocalVariableSet*>* list = thread->deferred_locals();
 320     int i = 0;
 321     do {
 322       // Because of inlining we could have multiple vframes for a single frame
 323       // and several of the vframes could have deferred writes. Find them all.
 324       if (list->at(i)->id() == array->original().id()) {
 325         jvmtiDeferredLocalVariableSet* dlv = list->at(i);
 326         list->remove_at(i);
 327         // individual jvmtiDeferredLocalVariableSet are CHeapObj's
 328         delete dlv;
 329       } else {
 330         i++;
 331       }
 332     } while ( i < list->length() );
 333     if (list->length() == 0) {
 334       thread->set_deferred_locals(NULL);
 335       // free the list and elements back to C heap.
 336       delete list;
 337     }
 338 
 339   }
 340 
 341 #ifndef SHARK
 342   // Compute the caller frame based on the sender sp of stub_frame and stored frame sizes info.
 343   CodeBlob* cb = stub_frame.cb();
 344   // Verify we have the right vframeArray
 345   assert(cb->frame_size() >= 0, "Unexpected frame size");
 346   intptr_t* unpack_sp = stub_frame.sp() + cb->frame_size();
 347 
 348   // If the deopt call site is a MethodHandle invoke call site we have
 349   // to adjust the unpack_sp.
 350   nmethod* deoptee_nm = deoptee.cb()->as_nmethod_or_null();
 351   if (deoptee_nm != NULL && deoptee_nm->is_method_handle_return(deoptee.pc()))
 352     unpack_sp = deoptee.unextended_sp();
 353 
 354 #ifdef ASSERT
 355   assert(cb->is_deoptimization_stub() ||
 356          cb->is_uncommon_trap_stub() ||
 357          strcmp("Stub<DeoptimizationStub.deoptimizationHandler>", cb->name()) == 0 ||
 358          strcmp("Stub<UncommonTrapStub.uncommonTrapHandler>", cb->name()) == 0,
 359          "unexpected code blob: %s", cb->name());
 360 #endif
 361 #else
 362   intptr_t* unpack_sp = stub_frame.sender(&dummy_map).unextended_sp();
 363 #endif // !SHARK
 364 
 365   // This is a guarantee instead of an assert because if vframe doesn't match
 366   // we will unpack the wrong deoptimized frame and wind up in strange places
 367   // where it will be very difficult to figure out what went wrong. Better
 368   // to die an early death here than some very obscure death later when the
 369   // trail is cold.
 370   // Note: on ia64 this guarantee can be fooled by frames with no memory stack
 371   // in that it will fail to detect a problem when there is one. This needs
 372   // more work in tiger timeframe.
 373   guarantee(array->unextended_sp() == unpack_sp, "vframe_array_head must contain the vframeArray to unpack");
 374 
 375   int number_of_frames = array->frames();
 376 
 377   // Compute the vframes' sizes.  Note that frame_sizes[] entries are ordered from outermost to innermost
 378   // virtual activation, which is the reverse of the elements in the vframes array.
 379   intptr_t* frame_sizes = NEW_C_HEAP_ARRAY(intptr_t, number_of_frames, mtCompiler);
 380   // +1 because we always have an interpreter return address for the final slot.
 381   address* frame_pcs = NEW_C_HEAP_ARRAY(address, number_of_frames + 1, mtCompiler);
 382   int popframe_extra_args = 0;
 383   // Create an interpreter return address for the stub to use as its return
 384   // address so the skeletal frames are perfectly walkable
 385   frame_pcs[number_of_frames] = Interpreter::deopt_entry(vtos, 0);
 386 
 387   // PopFrame requires that the preserved incoming arguments from the recently-popped topmost
 388   // activation be put back on the expression stack of the caller for reexecution
 389   if (JvmtiExport::can_pop_frame() && thread->popframe_forcing_deopt_reexecution()) {
 390     popframe_extra_args = in_words(thread->popframe_preserved_args_size_in_words());
 391   }
 392 
 393   // Find the current pc for sender of the deoptee. Since the sender may have been deoptimized
 394   // itself since the deoptee vframeArray was created we must get a fresh value of the pc rather
 395   // than simply use array->sender.pc(). This requires us to walk the current set of frames
 396   //
 397   frame deopt_sender = stub_frame.sender(&dummy_map); // First is the deoptee frame
 398   deopt_sender = deopt_sender.sender(&dummy_map);     // Now deoptee caller
 399 
 400   // It's possible that the number of parameters at the call site is
 401   // different than number of arguments in the callee when method
 402   // handles are used.  If the caller is interpreted get the real
 403   // value so that the proper amount of space can be added to it's
 404   // frame.
 405   bool caller_was_method_handle = false;
 406   if (deopt_sender.is_interpreted_frame()) {
 407     methodHandle method = deopt_sender.interpreter_frame_method();
 408     Bytecode_invoke cur = Bytecode_invoke_check(method, deopt_sender.interpreter_frame_bci());
 409     if (cur.is_invokedynamic() || cur.is_invokehandle()) {
 410       // Method handle invokes may involve fairly arbitrary chains of
 411       // calls so it's impossible to know how much actual space the
 412       // caller has for locals.
 413       caller_was_method_handle = true;
 414     }
 415   }
 416 
 417   //
 418   // frame_sizes/frame_pcs[0] oldest frame (int or c2i)
 419   // frame_sizes/frame_pcs[1] next oldest frame (int)
 420   // frame_sizes/frame_pcs[n] youngest frame (int)
 421   //
 422   // Now a pc in frame_pcs is actually the return address to the frame's caller (a frame
 423   // owns the space for the return address to it's caller).  Confusing ain't it.
 424   //
 425   // The vframe array can address vframes with indices running from
 426   // 0.._frames-1. Index  0 is the youngest frame and _frame - 1 is the oldest (root) frame.
 427   // When we create the skeletal frames we need the oldest frame to be in the zero slot
 428   // in the frame_sizes/frame_pcs so the assembly code can do a trivial walk.
 429   // so things look a little strange in this loop.
 430   //
 431   int callee_parameters = 0;
 432   int callee_locals = 0;
 433   for (int index = 0; index < array->frames(); index++ ) {
 434     // frame[number_of_frames - 1 ] = on_stack_size(youngest)
 435     // frame[number_of_frames - 2 ] = on_stack_size(sender(youngest))
 436     // frame[number_of_frames - 3 ] = on_stack_size(sender(sender(youngest)))
 437     frame_sizes[number_of_frames - 1 - index] = BytesPerWord * array->element(index)->on_stack_size(callee_parameters,
 438                                                                                                     callee_locals,
 439                                                                                                     index == 0,
 440                                                                                                     popframe_extra_args);
 441     // This pc doesn't have to be perfect just good enough to identify the frame
 442     // as interpreted so the skeleton frame will be walkable
 443     // The correct pc will be set when the skeleton frame is completely filled out
 444     // The final pc we store in the loop is wrong and will be overwritten below
 445     frame_pcs[number_of_frames - 1 - index ] = Interpreter::deopt_entry(vtos, 0) - frame::pc_return_offset;
 446 
 447     callee_parameters = array->element(index)->method()->size_of_parameters();
 448     callee_locals = array->element(index)->method()->max_locals();
 449     popframe_extra_args = 0;
 450   }
 451 
 452   // Compute whether the root vframe returns a float or double value.
 453   BasicType return_type;
 454   {
 455     methodHandle method(thread, array->element(0)->method());
 456     Bytecode_invoke invoke = Bytecode_invoke_check(method, array->element(0)->bci());
 457     return_type = invoke.is_valid() ? invoke.result_type() : T_ILLEGAL;
 458   }
 459 
 460   // Compute information for handling adapters and adjusting the frame size of the caller.
 461   int caller_adjustment = 0;
 462 
 463   // Compute the amount the oldest interpreter frame will have to adjust
 464   // its caller's stack by. If the caller is a compiled frame then
 465   // we pretend that the callee has no parameters so that the
 466   // extension counts for the full amount of locals and not just
 467   // locals-parms. This is because without a c2i adapter the parm
 468   // area as created by the compiled frame will not be usable by
 469   // the interpreter. (Depending on the calling convention there
 470   // may not even be enough space).
 471 
 472   // QQQ I'd rather see this pushed down into last_frame_adjust
 473   // and have it take the sender (aka caller).
 474 
 475   if (deopt_sender.is_compiled_frame() || caller_was_method_handle) {
 476     caller_adjustment = last_frame_adjust(0, callee_locals);
 477   } else if (callee_locals > callee_parameters) {
 478     // The caller frame may need extending to accommodate
 479     // non-parameter locals of the first unpacked interpreted frame.
 480     // Compute that adjustment.
 481     caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
 482   }
 483 
 484   // If the sender is deoptimized the we must retrieve the address of the handler
 485   // since the frame will "magically" show the original pc before the deopt
 486   // and we'd undo the deopt.
 487 
 488   frame_pcs[0] = deopt_sender.raw_pc();
 489 
 490 #ifndef SHARK
 491   assert(CodeCache::find_blob_unsafe(frame_pcs[0]) != NULL, "bad pc");
 492 #endif // SHARK
 493 
 494 #ifdef INCLUDE_JVMCI
 495   if (exceptionObject() != NULL) {
 496     thread->set_exception_oop(exceptionObject());
 497     exec_mode = Unpack_exception;
 498   }
 499 #endif
 500 
 501   if (thread->frames_to_pop_failed_realloc() > 0 && exec_mode != Unpack_uncommon_trap) {
 502     assert(thread->has_pending_exception(), "should have thrown OOME");
 503     thread->set_exception_oop(thread->pending_exception());
 504     thread->clear_pending_exception();
 505     exec_mode = Unpack_exception;
 506   }
 507 
 508 #if INCLUDE_JVMCI
 509   if (thread->frames_to_pop_failed_realloc() > 0) {
 510     thread->set_pending_monitorenter(false);
 511   }
 512 #endif
 513 
 514   UnrollBlock* info = new UnrollBlock(array->frame_size() * BytesPerWord,
 515                                       caller_adjustment * BytesPerWord,
 516                                       caller_was_method_handle ? 0 : callee_parameters,
 517                                       number_of_frames,
 518                                       frame_sizes,
 519                                       frame_pcs,
 520                                       return_type,
 521                                       exec_mode);
 522   // On some platforms, we need a way to pass some platform dependent
 523   // information to the unpacking code so the skeletal frames come out
 524   // correct (initial fp value, unextended sp, ...)
 525   info->set_initial_info((intptr_t) array->sender().initial_deoptimization_info());
 526 
 527   if (array->frames() > 1) {
 528     if (VerifyStack && TraceDeoptimization) {
 529       ttyLocker ttyl;
 530       tty->print_cr("Deoptimizing method containing inlining");
 531     }
 532   }
 533 
 534   array->set_unroll_block(info);
 535   return info;
 536 }
 537 
 538 // Called to cleanup deoptimization data structures in normal case
 539 // after unpacking to stack and when stack overflow error occurs
 540 void Deoptimization::cleanup_deopt_info(JavaThread *thread,
 541                                         vframeArray *array) {
 542 
 543   // Get array if coming from exception
 544   if (array == NULL) {
 545     array = thread->vframe_array_head();
 546   }
 547   thread->set_vframe_array_head(NULL);
 548 
 549   // Free the previous UnrollBlock
 550   vframeArray* old_array = thread->vframe_array_last();
 551   thread->set_vframe_array_last(array);
 552 
 553   if (old_array != NULL) {
 554     UnrollBlock* old_info = old_array->unroll_block();
 555     old_array->set_unroll_block(NULL);
 556     delete old_info;
 557     delete old_array;
 558   }
 559 
 560   // Deallocate any resource creating in this routine and any ResourceObjs allocated
 561   // inside the vframeArray (StackValueCollections)
 562 
 563   delete thread->deopt_mark();
 564   thread->set_deopt_mark(NULL);
 565   thread->set_deopt_compiled_method(NULL);
 566 
 567 
 568   if (JvmtiExport::can_pop_frame()) {
 569 #ifndef CC_INTERP
 570     // Regardless of whether we entered this routine with the pending
 571     // popframe condition bit set, we should always clear it now
 572     thread->clear_popframe_condition();
 573 #else
 574     // C++ interpreter will clear has_pending_popframe when it enters
 575     // with method_resume. For deopt_resume2 we clear it now.
 576     if (thread->popframe_forcing_deopt_reexecution())
 577         thread->clear_popframe_condition();
 578 #endif /* CC_INTERP */
 579   }
 580 
 581   // unpack_frames() is called at the end of the deoptimization handler
 582   // and (in C2) at the end of the uncommon trap handler. Note this fact
 583   // so that an asynchronous stack walker can work again. This counter is
 584   // incremented at the beginning of fetch_unroll_info() and (in C2) at
 585   // the beginning of uncommon_trap().
 586   thread->dec_in_deopt_handler();
 587 }
 588 
 589 // Moved from cpu directories because none of the cpus has callee save values.
 590 // If a cpu implements callee save values, move this to deoptimization_<cpu>.cpp.
 591 void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) {
 592 
 593   // This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in
 594   // the days we had adapter frames. When we deoptimize a situation where a
 595   // compiled caller calls a compiled caller will have registers it expects
 596   // to survive the call to the callee. If we deoptimize the callee the only
 597   // way we can restore these registers is to have the oldest interpreter
 598   // frame that we create restore these values. That is what this routine
 599   // will accomplish.
 600 
 601   // At the moment we have modified c2 to not have any callee save registers
 602   // so this problem does not exist and this routine is just a place holder.
 603 
 604   assert(f->is_interpreted_frame(), "must be interpreted");
 605 }
 606 
 607 // Return BasicType of value being returned
 608 JRT_LEAF(BasicType, Deoptimization::unpack_frames(JavaThread* thread, int exec_mode))
 609 
 610   // We are already active int he special DeoptResourceMark any ResourceObj's we
 611   // allocate will be freed at the end of the routine.
 612 
 613   // It is actually ok to allocate handles in a leaf method. It causes no safepoints,
 614   // but makes the entry a little slower. There is however a little dance we have to
 615   // do in debug mode to get around the NoHandleMark code in the JRT_LEAF macro
 616   ResetNoHandleMark rnhm; // No-op in release/product versions
 617   HandleMark hm;
 618 
 619   frame stub_frame = thread->last_frame();
 620 
 621   // Since the frame to unpack is the top frame of this thread, the vframe_array_head
 622   // must point to the vframeArray for the unpack frame.
 623   vframeArray* array = thread->vframe_array_head();
 624 
 625 #ifndef PRODUCT
 626   if (TraceDeoptimization) {
 627     ttyLocker ttyl;
 628     tty->print_cr("DEOPT UNPACKING thread " INTPTR_FORMAT " vframeArray " INTPTR_FORMAT " mode %d",
 629                   p2i(thread), p2i(array), exec_mode);
 630   }
 631 #endif
 632   Events::log(thread, "DEOPT UNPACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT " mode %d",
 633               p2i(stub_frame.pc()), p2i(stub_frame.sp()), exec_mode);
 634 
 635   UnrollBlock* info = array->unroll_block();
 636 
 637   // Unpack the interpreter frames and any adapter frame (c2 only) we might create.
 638   array->unpack_to_stack(stub_frame, exec_mode, info->caller_actual_parameters());
 639 
 640   BasicType bt = info->return_type();
 641 
 642   // If we have an exception pending, claim that the return type is an oop
 643   // so the deopt_blob does not overwrite the exception_oop.
 644 
 645   if (exec_mode == Unpack_exception)
 646     bt = T_OBJECT;
 647 
 648   // Cleanup thread deopt data
 649   cleanup_deopt_info(thread, array);
 650 
 651 #ifndef PRODUCT
 652   if (VerifyStack) {
 653     ResourceMark res_mark;
 654 
 655     thread->validate_frame_layout();
 656 
 657     // Verify that the just-unpacked frames match the interpreter's
 658     // notions of expression stack and locals
 659     vframeArray* cur_array = thread->vframe_array_last();
 660     RegisterMap rm(thread, false);
 661     rm.set_include_argument_oops(false);
 662     bool is_top_frame = true;
 663     int callee_size_of_parameters = 0;
 664     int callee_max_locals = 0;
 665     for (int i = 0; i < cur_array->frames(); i++) {
 666       vframeArrayElement* el = cur_array->element(i);
 667       frame* iframe = el->iframe();
 668       guarantee(iframe->is_interpreted_frame(), "Wrong frame type");
 669 
 670       // Get the oop map for this bci
 671       InterpreterOopMap mask;
 672       int cur_invoke_parameter_size = 0;
 673       bool try_next_mask = false;
 674       int next_mask_expression_stack_size = -1;
 675       int top_frame_expression_stack_adjustment = 0;
 676       methodHandle mh(thread, iframe->interpreter_frame_method());
 677       OopMapCache::compute_one_oop_map(mh, iframe->interpreter_frame_bci(), &mask);
 678       BytecodeStream str(mh);
 679       str.set_start(iframe->interpreter_frame_bci());
 680       int max_bci = mh->code_size();
 681       // Get to the next bytecode if possible
 682       assert(str.bci() < max_bci, "bci in interpreter frame out of bounds");
 683       // Check to see if we can grab the number of outgoing arguments
 684       // at an uncommon trap for an invoke (where the compiler
 685       // generates debug info before the invoke has executed)
 686       Bytecodes::Code cur_code = str.next();
 687       if (cur_code == Bytecodes::_invokevirtual   ||
 688           cur_code == Bytecodes::_invokespecial   ||
 689           cur_code == Bytecodes::_invokestatic    ||
 690           cur_code == Bytecodes::_invokeinterface ||
 691           cur_code == Bytecodes::_invokedynamic) {
 692         Bytecode_invoke invoke(mh, iframe->interpreter_frame_bci());
 693         Symbol* signature = invoke.signature();
 694         ArgumentSizeComputer asc(signature);
 695         cur_invoke_parameter_size = asc.size();
 696         if (invoke.has_receiver()) {
 697           // Add in receiver
 698           ++cur_invoke_parameter_size;
 699         }
 700         if (i != 0 && !invoke.is_invokedynamic() && MethodHandles::has_member_arg(invoke.klass(), invoke.name())) {
 701           callee_size_of_parameters++;
 702         }
 703       }
 704       if (str.bci() < max_bci) {
 705         Bytecodes::Code bc = str.next();
 706         if (bc >= 0) {
 707           // The interpreter oop map generator reports results before
 708           // the current bytecode has executed except in the case of
 709           // calls. It seems to be hard to tell whether the compiler
 710           // has emitted debug information matching the "state before"
 711           // a given bytecode or the state after, so we try both
 712           switch (cur_code) {
 713             case Bytecodes::_invokevirtual:
 714             case Bytecodes::_invokespecial:
 715             case Bytecodes::_invokestatic:
 716             case Bytecodes::_invokeinterface:
 717             case Bytecodes::_invokedynamic:
 718             case Bytecodes::_athrow:
 719               break;
 720             default: {
 721               InterpreterOopMap next_mask;
 722               OopMapCache::compute_one_oop_map(mh, str.bci(), &next_mask);
 723               next_mask_expression_stack_size = next_mask.expression_stack_size();
 724               // Need to subtract off the size of the result type of
 725               // the bytecode because this is not described in the
 726               // debug info but returned to the interpreter in the TOS
 727               // caching register
 728               BasicType bytecode_result_type = Bytecodes::result_type(cur_code);
 729               if (bytecode_result_type != T_ILLEGAL) {
 730                 top_frame_expression_stack_adjustment = type2size[bytecode_result_type];
 731               }
 732               assert(top_frame_expression_stack_adjustment >= 0, "");
 733               try_next_mask = true;
 734               break;
 735             }
 736           }
 737         }
 738       }
 739 
 740       // Verify stack depth and oops in frame
 741       // This assertion may be dependent on the platform we're running on and may need modification (tested on x86 and sparc)
 742       if (!(
 743             /* SPARC */
 744             (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + callee_size_of_parameters) ||
 745             /* x86 */
 746             (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + callee_max_locals) ||
 747             (try_next_mask &&
 748              (iframe->interpreter_frame_expression_stack_size() == (next_mask_expression_stack_size -
 749                                                                     top_frame_expression_stack_adjustment))) ||
 750             (is_top_frame && (exec_mode == Unpack_exception) && iframe->interpreter_frame_expression_stack_size() == 0) ||
 751             (is_top_frame && (exec_mode == Unpack_uncommon_trap || exec_mode == Unpack_reexecute || el->should_reexecute()) &&
 752              (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + cur_invoke_parameter_size))
 753             )) {
 754         ttyLocker ttyl;
 755 
 756         // Print out some information that will help us debug the problem
 757         tty->print_cr("Wrong number of expression stack elements during deoptimization");
 758         tty->print_cr("  Error occurred while verifying frame %d (0..%d, 0 is topmost)", i, cur_array->frames() - 1);
 759         tty->print_cr("  Fabricated interpreter frame had %d expression stack elements",
 760                       iframe->interpreter_frame_expression_stack_size());
 761         tty->print_cr("  Interpreter oop map had %d expression stack elements", mask.expression_stack_size());
 762         tty->print_cr("  try_next_mask = %d", try_next_mask);
 763         tty->print_cr("  next_mask_expression_stack_size = %d", next_mask_expression_stack_size);
 764         tty->print_cr("  callee_size_of_parameters = %d", callee_size_of_parameters);
 765         tty->print_cr("  callee_max_locals = %d", callee_max_locals);
 766         tty->print_cr("  top_frame_expression_stack_adjustment = %d", top_frame_expression_stack_adjustment);
 767         tty->print_cr("  exec_mode = %d", exec_mode);
 768         tty->print_cr("  cur_invoke_parameter_size = %d", cur_invoke_parameter_size);
 769         tty->print_cr("  Thread = " INTPTR_FORMAT ", thread ID = %d", p2i(thread), thread->osthread()->thread_id());
 770         tty->print_cr("  Interpreted frames:");
 771         for (int k = 0; k < cur_array->frames(); k++) {
 772           vframeArrayElement* el = cur_array->element(k);
 773           tty->print_cr("    %s (bci %d)", el->method()->name_and_sig_as_C_string(), el->bci());
 774         }
 775         cur_array->print_on_2(tty);
 776         guarantee(false, "wrong number of expression stack elements during deopt");
 777       }
 778       VerifyOopClosure verify;
 779       iframe->oops_interpreted_do(&verify, &rm, false);
 780       callee_size_of_parameters = mh->size_of_parameters();
 781       callee_max_locals = mh->max_locals();
 782       is_top_frame = false;
 783     }
 784   }
 785 #endif /* !PRODUCT */
 786 
 787 
 788   return bt;
 789 JRT_END
 790 
 791 
 792 int Deoptimization::deoptimize_dependents() {
 793   Threads::deoptimized_wrt_marked_nmethods();
 794   return 0;
 795 }
 796 
 797 Deoptimization::DeoptAction Deoptimization::_unloaded_action
 798   = Deoptimization::Action_reinterpret;
 799 
 800 #if defined(COMPILER2) || INCLUDE_JVMCI
 801 bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, GrowableArray<ScopeValue*>* objects, TRAPS) {
 802   Handle pending_exception(THREAD, thread->pending_exception());
 803   const char* exception_file = thread->exception_file();
 804   int exception_line = thread->exception_line();
 805   thread->clear_pending_exception();
 806 
 807   bool failures = false;
 808 
 809   for (int i = 0; i < objects->length(); i++) {
 810     assert(objects->at(i)->is_object(), "invalid debug information");
 811     ObjectValue* sv = (ObjectValue*) objects->at(i);
 812 
 813     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
 814     oop obj = NULL;
 815 
 816     if (k->is_instance_klass()) {
 817       InstanceKlass* ik = InstanceKlass::cast(k);
 818       obj = ik->allocate_instance(THREAD);
 819     } else if (k->is_typeArray_klass()) {
 820       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
 821       assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
 822       int len = sv->field_size() / type2size[ak->element_type()];
 823       obj = ak->allocate(len, THREAD);
 824     } else if (k->is_objArray_klass()) {
 825       ObjArrayKlass* ak = ObjArrayKlass::cast(k);
 826       obj = ak->allocate(sv->field_size(), THREAD);
 827     }
 828 
 829     if (obj == NULL) {
 830       failures = true;
 831     }
 832 
 833     assert(sv->value().is_null(), "redundant reallocation");
 834     assert(obj != NULL || HAS_PENDING_EXCEPTION, "allocation should succeed or we should get an exception");
 835     CLEAR_PENDING_EXCEPTION;
 836     sv->set_value(obj);
 837   }
 838 
 839   if (failures) {
 840     THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures);
 841   } else if (pending_exception.not_null()) {
 842     thread->set_pending_exception(pending_exception(), exception_file, exception_line);
 843   }
 844 
 845   return failures;
 846 }
 847 
 848 // restore elements of an eliminated type array
 849 void Deoptimization::reassign_type_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, typeArrayOop obj, BasicType type) {
 850   int index = 0;
 851   intptr_t val;
 852 
 853   for (int i = 0; i < sv->field_size(); i++) {
 854     StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
 855     switch(type) {
 856     case T_LONG: case T_DOUBLE: {
 857       assert(value->type() == T_INT, "Agreement.");
 858       StackValue* low =
 859         StackValue::create_stack_value(fr, reg_map, sv->field_at(++i));
 860 #ifdef _LP64
 861       jlong res = (jlong)low->get_int();
 862 #else
 863 #ifdef SPARC
 864       // For SPARC we have to swap high and low words.
 865       jlong res = jlong_from((jint)low->get_int(), (jint)value->get_int());
 866 #else
 867       jlong res = jlong_from((jint)value->get_int(), (jint)low->get_int());
 868 #endif //SPARC
 869 #endif
 870       obj->long_at_put(index, res);
 871       break;
 872     }
 873 
 874     // Have to cast to INT (32 bits) pointer to avoid little/big-endian problem.
 875     case T_INT: case T_FLOAT: { // 4 bytes.
 876       assert(value->type() == T_INT, "Agreement.");
 877       bool big_value = false;
 878       if (i + 1 < sv->field_size() && type == T_INT) {
 879         if (sv->field_at(i)->is_location()) {
 880           Location::Type type = ((LocationValue*) sv->field_at(i))->location().type();
 881           if (type == Location::dbl || type == Location::lng) {
 882             big_value = true;
 883           }
 884         } else if (sv->field_at(i)->is_constant_int()) {
 885           ScopeValue* next_scope_field = sv->field_at(i + 1);
 886           if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) {
 887             big_value = true;
 888           }
 889         }
 890       }
 891 
 892       if (big_value) {
 893         StackValue* low = StackValue::create_stack_value(fr, reg_map, sv->field_at(++i));
 894   #ifdef _LP64
 895         jlong res = (jlong)low->get_int();
 896   #else
 897   #ifdef SPARC
 898         // For SPARC we have to swap high and low words.
 899         jlong res = jlong_from((jint)low->get_int(), (jint)value->get_int());
 900   #else
 901         jlong res = jlong_from((jint)value->get_int(), (jint)low->get_int());
 902   #endif //SPARC
 903   #endif
 904         obj->int_at_put(index, (jint)*((jint*)&res));
 905         obj->int_at_put(++index, (jint)*(((jint*)&res) + 1));
 906       } else {
 907         val = value->get_int();
 908         obj->int_at_put(index, (jint)*((jint*)&val));
 909       }
 910       break;
 911     }
 912 
 913     case T_SHORT:
 914       assert(value->type() == T_INT, "Agreement.");
 915       val = value->get_int();
 916       obj->short_at_put(index, (jshort)*((jint*)&val));
 917       break;
 918 
 919     case T_CHAR:
 920       assert(value->type() == T_INT, "Agreement.");
 921       val = value->get_int();
 922       obj->char_at_put(index, (jchar)*((jint*)&val));
 923       break;
 924 
 925     case T_BYTE:
 926       assert(value->type() == T_INT, "Agreement.");
 927       val = value->get_int();
 928       obj->byte_at_put(index, (jbyte)*((jint*)&val));
 929       break;
 930 
 931     case T_BOOLEAN:
 932       assert(value->type() == T_INT, "Agreement.");
 933       val = value->get_int();
 934       obj->bool_at_put(index, (jboolean)*((jint*)&val));
 935       break;
 936 
 937       default:
 938         ShouldNotReachHere();
 939     }
 940     index++;
 941   }
 942 }
 943 
 944 
 945 // restore fields of an eliminated object array
 946 void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) {
 947   for (int i = 0; i < sv->field_size(); i++) {
 948     StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
 949     assert(value->type() == T_OBJECT, "object element expected");
 950     obj->obj_at_put(i, value->get_obj()());
 951   }
 952 }
 953 
 954 class ReassignedField {
 955 public:
 956   int _offset;
 957   BasicType _type;
 958 public:
 959   ReassignedField() {
 960     _offset = 0;
 961     _type = T_ILLEGAL;
 962   }
 963 };
 964 
 965 int compare(ReassignedField* left, ReassignedField* right) {
 966   return left->_offset - right->_offset;
 967 }
 968 
 969 // Restore fields of an eliminated instance object using the same field order
 970 // returned by HotSpotResolvedObjectTypeImpl.getInstanceFields(true)
 971 static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool skip_internal) {
 972   if (klass->superklass() != NULL) {
 973     svIndex = reassign_fields_by_klass(klass->superklass(), fr, reg_map, sv, svIndex, obj, skip_internal);
 974   }
 975 
 976   GrowableArray<ReassignedField>* fields = new GrowableArray<ReassignedField>();
 977   for (AllFieldStream fs(klass); !fs.done(); fs.next()) {
 978     if (!fs.access_flags().is_static() && (!skip_internal || !fs.access_flags().is_internal())) {
 979       ReassignedField field;
 980       field._offset = fs.offset();
 981       field._type = FieldType::basic_type(fs.signature());
 982       fields->append(field);
 983     }
 984   }
 985   fields->sort(compare);
 986   for (int i = 0; i < fields->length(); i++) {
 987     intptr_t val;
 988     ScopeValue* scope_field = sv->field_at(svIndex);
 989     StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field);
 990     int offset = fields->at(i)._offset;
 991     BasicType type = fields->at(i)._type;
 992     switch (type) {
 993       case T_OBJECT: case T_ARRAY:
 994         assert(value->type() == T_OBJECT, "Agreement.");
 995         obj->obj_field_put(offset, value->get_obj()());
 996         break;
 997 
 998       // Have to cast to INT (32 bits) pointer to avoid little/big-endian problem.
 999       case T_INT: case T_FLOAT: { // 4 bytes.
1000         assert(value->type() == T_INT, "Agreement.");
1001         bool big_value = false;
1002         if (i+1 < fields->length() && fields->at(i+1)._type == T_INT) {
1003           if (scope_field->is_location()) {
1004             Location::Type type = ((LocationValue*) scope_field)->location().type();
1005             if (type == Location::dbl || type == Location::lng) {
1006               big_value = true;
1007             }
1008           }
1009           if (scope_field->is_constant_int()) {
1010             ScopeValue* next_scope_field = sv->field_at(svIndex + 1);
1011             if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) {
1012               big_value = true;
1013             }
1014           }
1015         }
1016 
1017         if (big_value) {
1018           i++;
1019           assert(i < fields->length(), "second T_INT field needed");
1020           assert(fields->at(i)._type == T_INT, "T_INT field needed");
1021         } else {
1022           val = value->get_int();
1023           obj->int_field_put(offset, (jint)*((jint*)&val));
1024           break;
1025         }
1026       }
1027         /* no break */
1028 
1029       case T_LONG: case T_DOUBLE: {
1030         assert(value->type() == T_INT, "Agreement.");
1031         StackValue* low = StackValue::create_stack_value(fr, reg_map, sv->field_at(++svIndex));
1032 #ifdef _LP64
1033         jlong res = (jlong)low->get_int();
1034 #else
1035 #ifdef SPARC
1036         // For SPARC we have to swap high and low words.
1037         jlong res = jlong_from((jint)low->get_int(), (jint)value->get_int());
1038 #else
1039         jlong res = jlong_from((jint)value->get_int(), (jint)low->get_int());
1040 #endif //SPARC
1041 #endif
1042         obj->long_field_put(offset, res);
1043         break;
1044       }
1045 
1046       case T_SHORT:
1047         assert(value->type() == T_INT, "Agreement.");
1048         val = value->get_int();
1049         obj->short_field_put(offset, (jshort)*((jint*)&val));
1050         break;
1051 
1052       case T_CHAR:
1053         assert(value->type() == T_INT, "Agreement.");
1054         val = value->get_int();
1055         obj->char_field_put(offset, (jchar)*((jint*)&val));
1056         break;
1057 
1058       case T_BYTE:
1059         assert(value->type() == T_INT, "Agreement.");
1060         val = value->get_int();
1061         obj->byte_field_put(offset, (jbyte)*((jint*)&val));
1062         break;
1063 
1064       case T_BOOLEAN:
1065         assert(value->type() == T_INT, "Agreement.");
1066         val = value->get_int();
1067         obj->bool_field_put(offset, (jboolean)*((jint*)&val));
1068         break;
1069 
1070       default:
1071         ShouldNotReachHere();
1072     }
1073     svIndex++;
1074   }
1075   return svIndex;
1076 }
1077 
1078 // restore fields of all eliminated objects and arrays
1079 void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures, bool skip_internal) {
1080   for (int i = 0; i < objects->length(); i++) {
1081     ObjectValue* sv = (ObjectValue*) objects->at(i);
1082     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1083     Handle obj = sv->value();
1084     assert(obj.not_null() || realloc_failures, "reallocation was missed");
1085     if (PrintDeoptimizationDetails) {
1086       tty->print_cr("reassign fields for object of type %s!", k->name()->as_C_string());
1087     }
1088     if (obj.is_null()) {
1089       continue;
1090     }
1091 
1092     if (k->is_instance_klass()) {
1093       InstanceKlass* ik = InstanceKlass::cast(k);
1094       reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), skip_internal);
1095     } else if (k->is_typeArray_klass()) {
1096       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1097       reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
1098     } else if (k->is_objArray_klass()) {
1099       reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
1100     }
1101   }
1102 }
1103 
1104 
1105 // relock objects for which synchronization was eliminated
1106 void Deoptimization::relock_objects(GrowableArray<MonitorInfo*>* monitors, JavaThread* thread, bool realloc_failures) {
1107   for (int i = 0; i < monitors->length(); i++) {
1108     MonitorInfo* mon_info = monitors->at(i);
1109     if (mon_info->eliminated()) {
1110       assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed");
1111       if (!mon_info->owner_is_scalar_replaced()) {
1112         Handle obj(thread, mon_info->owner());
1113         markOop mark = obj->mark();
1114         if (UseBiasedLocking && mark->has_bias_pattern()) {
1115           // New allocated objects may have the mark set to anonymously biased.
1116           // Also the deoptimized method may called methods with synchronization
1117           // where the thread-local object is bias locked to the current thread.
1118           assert(mark->is_biased_anonymously() ||
1119                  mark->biased_locker() == thread, "should be locked to current thread");
1120           // Reset mark word to unbiased prototype.
1121           markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
1122           obj->set_mark(unbiased_prototype);
1123         }
1124         BasicLock* lock = mon_info->lock();
1125         ObjectSynchronizer::slow_enter(obj, lock, thread);
1126         assert(mon_info->owner()->is_locked(), "object must be locked now");
1127       }
1128     }
1129   }
1130 }
1131 
1132 
1133 #ifndef PRODUCT
1134 // print information about reallocated objects
1135 void Deoptimization::print_objects(GrowableArray<ScopeValue*>* objects, bool realloc_failures) {
1136   fieldDescriptor fd;
1137 
1138   for (int i = 0; i < objects->length(); i++) {
1139     ObjectValue* sv = (ObjectValue*) objects->at(i);
1140     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1141     Handle obj = sv->value();
1142 
1143     tty->print("     object <" INTPTR_FORMAT "> of type ", p2i(sv->value()()));
1144     k->print_value();
1145     assert(obj.not_null() || realloc_failures, "reallocation was missed");
1146     if (obj.is_null()) {
1147       tty->print(" allocation failed");
1148     } else {
1149       tty->print(" allocated (%d bytes)", obj->size() * HeapWordSize);
1150     }
1151     tty->cr();
1152 
1153     if (Verbose && !obj.is_null()) {
1154       k->oop_print_on(obj(), tty);
1155     }
1156   }
1157 }
1158 #endif
1159 #endif // COMPILER2 || INCLUDE_JVMCI
1160 
1161 vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures) {
1162   Events::log(thread, "DEOPT PACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT, p2i(fr.pc()), p2i(fr.sp()));
1163 
1164 #ifndef PRODUCT
1165   if (PrintDeoptimizationDetails) {
1166     ttyLocker ttyl;
1167     tty->print("DEOPT PACKING thread " INTPTR_FORMAT " ", p2i(thread));
1168     fr.print_on(tty);
1169     tty->print_cr("     Virtual frames (innermost first):");
1170     for (int index = 0; index < chunk->length(); index++) {
1171       compiledVFrame* vf = chunk->at(index);
1172       tty->print("       %2d - ", index);
1173       vf->print_value();
1174       int bci = chunk->at(index)->raw_bci();
1175       const char* code_name;
1176       if (bci == SynchronizationEntryBCI) {
1177         code_name = "sync entry";
1178       } else {
1179         Bytecodes::Code code = vf->method()->code_at(bci);
1180         code_name = Bytecodes::name(code);
1181       }
1182       tty->print(" - %s", code_name);
1183       tty->print_cr(" @ bci %d ", bci);
1184       if (Verbose) {
1185         vf->print();
1186         tty->cr();
1187       }
1188     }
1189   }
1190 #endif
1191 
1192   // Register map for next frame (used for stack crawl).  We capture
1193   // the state of the deopt'ing frame's caller.  Thus if we need to
1194   // stuff a C2I adapter we can properly fill in the callee-save
1195   // register locations.
1196   frame caller = fr.sender(reg_map);
1197   int frame_size = caller.sp() - fr.sp();
1198 
1199   frame sender = caller;
1200 
1201   // Since the Java thread being deoptimized will eventually adjust it's own stack,
1202   // the vframeArray containing the unpacking information is allocated in the C heap.
1203   // For Compiler1, the caller of the deoptimized frame is saved for use by unpack_frames().
1204   vframeArray* array = vframeArray::allocate(thread, frame_size, chunk, reg_map, sender, caller, fr, realloc_failures);
1205 
1206   // Compare the vframeArray to the collected vframes
1207   assert(array->structural_compare(thread, chunk), "just checking");
1208 
1209 #ifndef PRODUCT
1210   if (PrintDeoptimizationDetails) {
1211     ttyLocker ttyl;
1212     tty->print_cr("     Created vframeArray " INTPTR_FORMAT, p2i(array));
1213   }
1214 #endif // PRODUCT
1215 
1216   return array;
1217 }
1218 
1219 #if defined(COMPILER2) || INCLUDE_JVMCI
1220 void Deoptimization::pop_frames_failed_reallocs(JavaThread* thread, vframeArray* array) {
1221   // Reallocation of some scalar replaced objects failed. Record
1222   // that we need to pop all the interpreter frames for the
1223   // deoptimized compiled frame.
1224   assert(thread->frames_to_pop_failed_realloc() == 0, "missed frames to pop?");
1225   thread->set_frames_to_pop_failed_realloc(array->frames());
1226   // Unlock all monitors here otherwise the interpreter will see a
1227   // mix of locked and unlocked monitors (because of failed
1228   // reallocations of synchronized objects) and be confused.
1229   for (int i = 0; i < array->frames(); i++) {
1230     MonitorChunk* monitors = array->element(i)->monitors();
1231     if (monitors != NULL) {
1232       for (int j = 0; j < monitors->number_of_monitors(); j++) {
1233         BasicObjectLock* src = monitors->at(j);
1234         if (src->obj() != NULL) {
1235           ObjectSynchronizer::fast_exit(src->obj(), src->lock(), thread);
1236         }
1237       }
1238       array->element(i)->free_monitors(thread);
1239 #ifdef ASSERT
1240       array->element(i)->set_removed_monitors();
1241 #endif
1242     }
1243   }
1244 }
1245 #endif
1246 
1247 static void collect_monitors(compiledVFrame* cvf, GrowableArray<Handle>* objects_to_revoke) {
1248   GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
1249   Thread* thread = Thread::current();
1250   for (int i = 0; i < monitors->length(); i++) {
1251     MonitorInfo* mon_info = monitors->at(i);
1252     if (!mon_info->eliminated() && mon_info->owner() != NULL) {
1253       objects_to_revoke->append(Handle(thread, mon_info->owner()));
1254     }
1255   }
1256 }
1257 
1258 
1259 void Deoptimization::revoke_biases_of_monitors(JavaThread* thread, frame fr, RegisterMap* map) {
1260   if (!UseBiasedLocking) {
1261     return;
1262   }
1263 
1264   GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>();
1265 
1266   // Unfortunately we don't have a RegisterMap available in most of
1267   // the places we want to call this routine so we need to walk the
1268   // stack again to update the register map.
1269   if (map == NULL || !map->update_map()) {
1270     StackFrameStream sfs(thread, true);
1271     bool found = false;
1272     while (!found && !sfs.is_done()) {
1273       frame* cur = sfs.current();
1274       sfs.next();
1275       found = cur->id() == fr.id();
1276     }
1277     assert(found, "frame to be deoptimized not found on target thread's stack");
1278     map = sfs.register_map();
1279   }
1280 
1281   vframe* vf = vframe::new_vframe(&fr, map, thread);
1282   compiledVFrame* cvf = compiledVFrame::cast(vf);
1283   // Revoke monitors' biases in all scopes
1284   while (!cvf->is_top()) {
1285     collect_monitors(cvf, objects_to_revoke);
1286     cvf = compiledVFrame::cast(cvf->sender());
1287   }
1288   collect_monitors(cvf, objects_to_revoke);
1289 
1290   if (SafepointSynchronize::is_at_safepoint()) {
1291     BiasedLocking::revoke_at_safepoint(objects_to_revoke);
1292   } else {
1293     BiasedLocking::revoke(objects_to_revoke);
1294   }
1295 }
1296 
1297 
1298 void Deoptimization::revoke_biases_of_monitors(CodeBlob* cb) {
1299   if (!UseBiasedLocking) {
1300     return;
1301   }
1302 
1303   assert(SafepointSynchronize::is_at_safepoint(), "must only be called from safepoint");
1304   GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>();
1305   for (JavaThread* jt = Threads::first(); jt != NULL ; jt = jt->next()) {
1306     if (jt->has_last_Java_frame()) {
1307       StackFrameStream sfs(jt, true);
1308       while (!sfs.is_done()) {
1309         frame* cur = sfs.current();
1310         if (cb->contains(cur->pc())) {
1311           vframe* vf = vframe::new_vframe(cur, sfs.register_map(), jt);
1312           compiledVFrame* cvf = compiledVFrame::cast(vf);
1313           // Revoke monitors' biases in all scopes
1314           while (!cvf->is_top()) {
1315             collect_monitors(cvf, objects_to_revoke);
1316             cvf = compiledVFrame::cast(cvf->sender());
1317           }
1318           collect_monitors(cvf, objects_to_revoke);
1319         }
1320         sfs.next();
1321       }
1322     }
1323   }
1324   BiasedLocking::revoke_at_safepoint(objects_to_revoke);
1325 }
1326 
1327 
1328 void Deoptimization::deoptimize_single_frame(JavaThread* thread, frame fr, Deoptimization::DeoptReason reason) {
1329   assert(fr.can_be_deoptimized(), "checking frame type");
1330 
1331   gather_statistics(reason, Action_none, Bytecodes::_illegal);
1332 
1333   if (LogCompilation && xtty != NULL) {
1334     CompiledMethod* cm = fr.cb()->as_compiled_method_or_null();
1335     assert(cm != NULL, "only compiled methods can deopt");
1336 
1337     ttyLocker ttyl;
1338     xtty->begin_head("deoptimized thread='" UINTX_FORMAT "' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc()));
1339     cm->log_identity(xtty);
1340     xtty->end_head();
1341     for (ScopeDesc* sd = cm->scope_desc_at(fr.pc()); ; sd = sd->sender()) {
1342       xtty->begin_elem("jvms bci='%d'", sd->bci());
1343       xtty->method(sd->method());
1344       xtty->end_elem();
1345       if (sd->is_top())  break;
1346     }
1347     xtty->tail("deoptimized");
1348   }
1349 
1350   // Patch the compiled method so that when execution returns to it we will
1351   // deopt the execution state and return to the interpreter.
1352   fr.deoptimize(thread);
1353 }
1354 
1355 void Deoptimization::deoptimize(JavaThread* thread, frame fr, RegisterMap *map) {
1356   deoptimize(thread, fr, map, Reason_constraint);
1357 }
1358 
1359 void Deoptimization::deoptimize(JavaThread* thread, frame fr, RegisterMap *map, DeoptReason reason) {
1360   // Deoptimize only if the frame comes from compile code.
1361   // Do not deoptimize the frame which is already patched
1362   // during the execution of the loops below.
1363   if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
1364     return;
1365   }
1366   ResourceMark rm;
1367   DeoptimizationMarker dm;
1368   if (UseBiasedLocking) {
1369     revoke_biases_of_monitors(thread, fr, map);
1370   }
1371   deoptimize_single_frame(thread, fr, reason);
1372 
1373 }
1374 
1375 
1376 void Deoptimization::deoptimize_frame_internal(JavaThread* thread, intptr_t* id, DeoptReason reason) {
1377   assert(thread == Thread::current() || SafepointSynchronize::is_at_safepoint(),
1378          "can only deoptimize other thread at a safepoint");
1379   // Compute frame and register map based on thread and sp.
1380   RegisterMap reg_map(thread, UseBiasedLocking);
1381   frame fr = thread->last_frame();
1382   while (fr.id() != id) {
1383     fr = fr.sender(&reg_map);
1384   }
1385   deoptimize(thread, fr, &reg_map, reason);
1386 }
1387 
1388 
1389 void Deoptimization::deoptimize_frame(JavaThread* thread, intptr_t* id, DeoptReason reason) {
1390   if (thread == Thread::current()) {
1391     Deoptimization::deoptimize_frame_internal(thread, id, reason);
1392   } else {
1393     VM_DeoptimizeFrame deopt(thread, id, reason);
1394     VMThread::execute(&deopt);
1395   }
1396 }
1397 
1398 void Deoptimization::deoptimize_frame(JavaThread* thread, intptr_t* id) {
1399   deoptimize_frame(thread, id, Reason_constraint);
1400 }
1401 
1402 // JVMTI PopFrame support
1403 JRT_LEAF(void, Deoptimization::popframe_preserve_args(JavaThread* thread, int bytes_to_save, void* start_address))
1404 {
1405   thread->popframe_preserve_args(in_ByteSize(bytes_to_save), start_address);
1406 }
1407 JRT_END
1408 
1409 MethodData*
1410 Deoptimization::get_method_data(JavaThread* thread, methodHandle m,
1411                                 bool create_if_missing) {
1412   Thread* THREAD = thread;
1413   MethodData* mdo = m()->method_data();
1414   if (mdo == NULL && create_if_missing && !HAS_PENDING_EXCEPTION) {
1415     // Build an MDO.  Ignore errors like OutOfMemory;
1416     // that simply means we won't have an MDO to update.
1417     Method::build_interpreter_method_data(m, THREAD);
1418     if (HAS_PENDING_EXCEPTION) {
1419       assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here");
1420       CLEAR_PENDING_EXCEPTION;
1421     }
1422     mdo = m()->method_data();
1423   }
1424   return mdo;
1425 }
1426 
1427 #if defined(COMPILER2) || defined(SHARK) || INCLUDE_JVMCI
1428 void Deoptimization::load_class_by_index(const constantPoolHandle& constant_pool, int index, TRAPS) {
1429   // in case of an unresolved klass entry, load the class.
1430   if (constant_pool->tag_at(index).is_unresolved_klass()) {
1431     Klass* tk = constant_pool->klass_at_ignore_error(index, CHECK);
1432     return;
1433   }
1434 
1435   if (!constant_pool->tag_at(index).is_symbol()) return;
1436 
1437   Handle class_loader (THREAD, constant_pool->pool_holder()->class_loader());
1438   Symbol*  symbol  = constant_pool->symbol_at(index);
1439 
1440   // class name?
1441   if (symbol->byte_at(0) != '(') {
1442     Handle protection_domain (THREAD, constant_pool->pool_holder()->protection_domain());
1443     SystemDictionary::resolve_or_null(symbol, class_loader, protection_domain, CHECK);
1444     return;
1445   }
1446 
1447   // then it must be a signature!
1448   ResourceMark rm(THREAD);
1449   for (SignatureStream ss(symbol); !ss.is_done(); ss.next()) {
1450     if (ss.is_object()) {
1451       Symbol* class_name = ss.as_symbol(CHECK);
1452       Handle protection_domain (THREAD, constant_pool->pool_holder()->protection_domain());
1453       SystemDictionary::resolve_or_null(class_name, class_loader, protection_domain, CHECK);
1454     }
1455   }
1456 }
1457 
1458 
1459 void Deoptimization::load_class_by_index(const constantPoolHandle& constant_pool, int index) {
1460   EXCEPTION_MARK;
1461   load_class_by_index(constant_pool, index, THREAD);
1462   if (HAS_PENDING_EXCEPTION) {
1463     // Exception happened during classloading. We ignore the exception here, since it
1464     // is going to be rethrown since the current activation is going to be deoptimized and
1465     // the interpreter will re-execute the bytecode.
1466     CLEAR_PENDING_EXCEPTION;
1467     // Class loading called java code which may have caused a stack
1468     // overflow. If the exception was thrown right before the return
1469     // to the runtime the stack is no longer guarded. Reguard the
1470     // stack otherwise if we return to the uncommon trap blob and the
1471     // stack bang causes a stack overflow we crash.
1472     assert(THREAD->is_Java_thread(), "only a java thread can be here");
1473     JavaThread* thread = (JavaThread*)THREAD;
1474     bool guard_pages_enabled = thread->stack_guards_enabled();
1475     if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack();
1476     assert(guard_pages_enabled, "stack banging in uncommon trap blob may cause crash");
1477   }
1478 }
1479 
1480 JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* thread, jint trap_request)) {
1481   HandleMark hm;
1482 
1483   // uncommon_trap() is called at the beginning of the uncommon trap
1484   // handler. Note this fact before we start generating temporary frames
1485   // that can confuse an asynchronous stack walker. This counter is
1486   // decremented at the end of unpack_frames().
1487   thread->inc_in_deopt_handler();
1488 
1489   // We need to update the map if we have biased locking.
1490 #if INCLUDE_JVMCI
1491   // JVMCI might need to get an exception from the stack, which in turn requires the register map to be valid
1492   RegisterMap reg_map(thread, true);
1493 #else
1494   RegisterMap reg_map(thread, UseBiasedLocking);
1495 #endif
1496   frame stub_frame = thread->last_frame();
1497   frame fr = stub_frame.sender(&reg_map);
1498   // Make sure the calling nmethod is not getting deoptimized and removed
1499   // before we are done with it.
1500   nmethodLocker nl(fr.pc());
1501 
1502   // Log a message
1503   Events::log(thread, "Uncommon trap: trap_request=" PTR32_FORMAT " fr.pc=" INTPTR_FORMAT " relative=" INTPTR_FORMAT,
1504               trap_request, p2i(fr.pc()), fr.pc() - fr.cb()->code_begin());
1505 
1506   {
1507     ResourceMark rm;
1508 
1509     // Revoke biases of any monitors in the frame to ensure we can migrate them
1510     revoke_biases_of_monitors(thread, fr, &reg_map);
1511 
1512     DeoptReason reason = trap_request_reason(trap_request);
1513     DeoptAction action = trap_request_action(trap_request);
1514 #if INCLUDE_JVMCI
1515     int debug_id = trap_request_debug_id(trap_request);
1516 #endif
1517     jint unloaded_class_index = trap_request_index(trap_request); // CP idx or -1
1518 
1519     vframe*  vf  = vframe::new_vframe(&fr, &reg_map, thread);
1520     compiledVFrame* cvf = compiledVFrame::cast(vf);
1521 
1522     CompiledMethod* nm = cvf->code();
1523 
1524     ScopeDesc*      trap_scope  = cvf->scope();
1525 
1526     if (TraceDeoptimization) {
1527       ttyLocker ttyl;
1528       tty->print_cr("  bci=%d pc=" INTPTR_FORMAT ", relative_pc=" INTPTR_FORMAT ", method=%s" JVMCI_ONLY(", debug_id=%d"), trap_scope->bci(), p2i(fr.pc()), fr.pc() - nm->code_begin(), trap_scope->method()->name_and_sig_as_C_string()
1529 #if INCLUDE_JVMCI
1530           , debug_id
1531 #endif
1532           );
1533     }
1534 
1535     methodHandle    trap_method = trap_scope->method();
1536     int             trap_bci    = trap_scope->bci();
1537 #if INCLUDE_JVMCI
1538     oop speculation = thread->pending_failed_speculation();
1539     if (nm->is_compiled_by_jvmci()) {
1540       if (speculation != NULL) {
1541         oop speculation_log = nm->as_nmethod()->speculation_log();
1542         if (speculation_log != NULL) {
1543           if (TraceDeoptimization || TraceUncollectedSpeculations) {
1544             if (HotSpotSpeculationLog::lastFailed(speculation_log) != NULL) {
1545               tty->print_cr("A speculation that was not collected by the compiler is being overwritten");
1546             }
1547           }
1548           if (TraceDeoptimization) {
1549             tty->print_cr("Saving speculation to speculation log");
1550           }
1551           HotSpotSpeculationLog::set_lastFailed(speculation_log, speculation);
1552         } else {
1553           if (TraceDeoptimization) {
1554             tty->print_cr("Speculation present but no speculation log");
1555           }
1556         }
1557         thread->set_pending_failed_speculation(NULL);
1558       } else {
1559         if (TraceDeoptimization) {
1560           tty->print_cr("No speculation");
1561         }
1562       }
1563     } else {
1564       assert(speculation == NULL, "There should not be a speculation for method compiled by non-JVMCI compilers");
1565     }
1566 
1567     if (trap_bci == SynchronizationEntryBCI) {
1568       trap_bci = 0;
1569       thread->set_pending_monitorenter(true);
1570     }
1571 
1572     if (reason == Deoptimization::Reason_transfer_to_interpreter) {
1573       thread->set_pending_transfer_to_interpreter(true);
1574     }
1575 #endif
1576 
1577     Bytecodes::Code trap_bc     = trap_method->java_code_at(trap_bci);
1578     // Record this event in the histogram.
1579     gather_statistics(reason, action, trap_bc);
1580 
1581     // Ensure that we can record deopt. history:
1582     // Need MDO to record RTM code generation state.
1583     bool create_if_missing = ProfileTraps || UseCodeAging RTM_OPT_ONLY( || UseRTMLocking );
1584 
1585     methodHandle profiled_method;
1586 #if INCLUDE_JVMCI
1587     if (nm->is_compiled_by_jvmci()) {
1588       profiled_method = nm->method();
1589     } else {
1590       profiled_method = trap_method;
1591     }
1592 #else
1593     profiled_method = trap_method;
1594 #endif
1595 
1596     MethodData* trap_mdo =
1597       get_method_data(thread, profiled_method, create_if_missing);
1598 
1599     // Log a message
1600     Events::log_deopt_message(thread, "Uncommon trap: reason=%s action=%s pc=" INTPTR_FORMAT " method=%s @ %d %s",
1601                               trap_reason_name(reason), trap_action_name(action), p2i(fr.pc()),
1602                               trap_method->name_and_sig_as_C_string(), trap_bci, nm->compiler_name());
1603 
1604     // Print a bunch of diagnostics, if requested.
1605     if (TraceDeoptimization || LogCompilation) {
1606       ResourceMark rm;
1607       ttyLocker ttyl;
1608       char buf[100];
1609       if (xtty != NULL) {
1610         xtty->begin_head("uncommon_trap thread='" UINTX_FORMAT "' %s",
1611                          os::current_thread_id(),
1612                          format_trap_request(buf, sizeof(buf), trap_request));
1613         nm->log_identity(xtty);
1614       }
1615       Symbol* class_name = NULL;
1616       bool unresolved = false;
1617       if (unloaded_class_index >= 0) {
1618         constantPoolHandle constants (THREAD, trap_method->constants());
1619         if (constants->tag_at(unloaded_class_index).is_unresolved_klass()) {
1620           class_name = constants->klass_name_at(unloaded_class_index);
1621           unresolved = true;
1622           if (xtty != NULL)
1623             xtty->print(" unresolved='1'");
1624         } else if (constants->tag_at(unloaded_class_index).is_symbol()) {
1625           class_name = constants->symbol_at(unloaded_class_index);
1626         }
1627         if (xtty != NULL)
1628           xtty->name(class_name);
1629       }
1630       if (xtty != NULL && trap_mdo != NULL && (int)reason < (int)MethodData::_trap_hist_limit) {
1631         // Dump the relevant MDO state.
1632         // This is the deopt count for the current reason, any previous
1633         // reasons or recompiles seen at this point.
1634         int dcnt = trap_mdo->trap_count(reason);
1635         if (dcnt != 0)
1636           xtty->print(" count='%d'", dcnt);
1637         ProfileData* pdata = trap_mdo->bci_to_data(trap_bci);
1638         int dos = (pdata == NULL)? 0: pdata->trap_state();
1639         if (dos != 0) {
1640           xtty->print(" state='%s'", format_trap_state(buf, sizeof(buf), dos));
1641           if (trap_state_is_recompiled(dos)) {
1642             int recnt2 = trap_mdo->overflow_recompile_count();
1643             if (recnt2 != 0)
1644               xtty->print(" recompiles2='%d'", recnt2);
1645           }
1646         }
1647       }
1648       if (xtty != NULL) {
1649         xtty->stamp();
1650         xtty->end_head();
1651       }
1652       if (TraceDeoptimization) {  // make noise on the tty
1653         tty->print("Uncommon trap occurred in");
1654         nm->method()->print_short_name(tty);
1655         tty->print(" compiler=%s compile_id=%d", nm->compiler_name(), nm->compile_id());
1656 #if INCLUDE_JVMCI
1657         if (nm->is_nmethod()) {
1658           oop installedCode = nm->as_nmethod()->jvmci_installed_code();
1659           if (installedCode != NULL) {
1660             oop installedCodeName = NULL;
1661             if (installedCode->is_a(InstalledCode::klass())) {
1662               installedCodeName = InstalledCode::name(installedCode);
1663             }
1664             if (installedCodeName != NULL) {
1665               tty->print(" (JVMCI: installedCodeName=%s) ", java_lang_String::as_utf8_string(installedCodeName));
1666             } else {
1667               tty->print(" (JVMCI: installed code has no name) ");
1668             }
1669           } else if (nm->is_compiled_by_jvmci()) {
1670             tty->print(" (JVMCI: no installed code) ");
1671           }
1672         }
1673 #endif
1674         tty->print(" (@" INTPTR_FORMAT ") thread=" UINTX_FORMAT " reason=%s action=%s unloaded_class_index=%d" JVMCI_ONLY(" debug_id=%d"),
1675                    p2i(fr.pc()),
1676                    os::current_thread_id(),
1677                    trap_reason_name(reason),
1678                    trap_action_name(action),
1679                    unloaded_class_index
1680 #if INCLUDE_JVMCI
1681                    , debug_id
1682 #endif
1683                    );
1684         if (class_name != NULL) {
1685           tty->print(unresolved ? " unresolved class: " : " symbol: ");
1686           class_name->print_symbol_on(tty);
1687         }
1688         tty->cr();
1689       }
1690       if (xtty != NULL) {
1691         // Log the precise location of the trap.
1692         for (ScopeDesc* sd = trap_scope; ; sd = sd->sender()) {
1693           xtty->begin_elem("jvms bci='%d'", sd->bci());
1694           xtty->method(sd->method());
1695           xtty->end_elem();
1696           if (sd->is_top())  break;
1697         }
1698         xtty->tail("uncommon_trap");
1699       }
1700     }
1701     // (End diagnostic printout.)
1702 
1703     // Load class if necessary
1704     if (unloaded_class_index >= 0) {
1705       constantPoolHandle constants(THREAD, trap_method->constants());
1706       load_class_by_index(constants, unloaded_class_index);
1707     }
1708 
1709     // Flush the nmethod if necessary and desirable.
1710     //
1711     // We need to avoid situations where we are re-flushing the nmethod
1712     // because of a hot deoptimization site.  Repeated flushes at the same
1713     // point need to be detected by the compiler and avoided.  If the compiler
1714     // cannot avoid them (or has a bug and "refuses" to avoid them), this
1715     // module must take measures to avoid an infinite cycle of recompilation
1716     // and deoptimization.  There are several such measures:
1717     //
1718     //   1. If a recompilation is ordered a second time at some site X
1719     //   and for the same reason R, the action is adjusted to 'reinterpret',
1720     //   to give the interpreter time to exercise the method more thoroughly.
1721     //   If this happens, the method's overflow_recompile_count is incremented.
1722     //
1723     //   2. If the compiler fails to reduce the deoptimization rate, then
1724     //   the method's overflow_recompile_count will begin to exceed the set
1725     //   limit PerBytecodeRecompilationCutoff.  If this happens, the action
1726     //   is adjusted to 'make_not_compilable', and the method is abandoned
1727     //   to the interpreter.  This is a performance hit for hot methods,
1728     //   but is better than a disastrous infinite cycle of recompilations.
1729     //   (Actually, only the method containing the site X is abandoned.)
1730     //
1731     //   3. In parallel with the previous measures, if the total number of
1732     //   recompilations of a method exceeds the much larger set limit
1733     //   PerMethodRecompilationCutoff, the method is abandoned.
1734     //   This should only happen if the method is very large and has
1735     //   many "lukewarm" deoptimizations.  The code which enforces this
1736     //   limit is elsewhere (class nmethod, class Method).
1737     //
1738     // Note that the per-BCI 'is_recompiled' bit gives the compiler one chance
1739     // to recompile at each bytecode independently of the per-BCI cutoff.
1740     //
1741     // The decision to update code is up to the compiler, and is encoded
1742     // in the Action_xxx code.  If the compiler requests Action_none
1743     // no trap state is changed, no compiled code is changed, and the
1744     // computation suffers along in the interpreter.
1745     //
1746     // The other action codes specify various tactics for decompilation
1747     // and recompilation.  Action_maybe_recompile is the loosest, and
1748     // allows the compiled code to stay around until enough traps are seen,
1749     // and until the compiler gets around to recompiling the trapping method.
1750     //
1751     // The other actions cause immediate removal of the present code.
1752 
1753     // Traps caused by injected profile shouldn't pollute trap counts.
1754     bool injected_profile_trap = trap_method->has_injected_profile() &&
1755                                  (reason == Reason_intrinsic || reason == Reason_unreached);
1756 
1757     bool update_trap_state = (reason != Reason_tenured) && !injected_profile_trap;
1758     bool make_not_entrant = false;
1759     bool make_not_compilable = false;
1760     bool reprofile = false;
1761     switch (action) {
1762     case Action_none:
1763       // Keep the old code.
1764       update_trap_state = false;
1765       break;
1766     case Action_maybe_recompile:
1767       // Do not need to invalidate the present code, but we can
1768       // initiate another
1769       // Start compiler without (necessarily) invalidating the nmethod.
1770       // The system will tolerate the old code, but new code should be
1771       // generated when possible.
1772       break;
1773     case Action_reinterpret:
1774       // Go back into the interpreter for a while, and then consider
1775       // recompiling form scratch.
1776       make_not_entrant = true;
1777       // Reset invocation counter for outer most method.
1778       // This will allow the interpreter to exercise the bytecodes
1779       // for a while before recompiling.
1780       // By contrast, Action_make_not_entrant is immediate.
1781       //
1782       // Note that the compiler will track null_check, null_assert,
1783       // range_check, and class_check events and log them as if they
1784       // had been traps taken from compiled code.  This will update
1785       // the MDO trap history so that the next compilation will
1786       // properly detect hot trap sites.
1787       reprofile = true;
1788       break;
1789     case Action_make_not_entrant:
1790       // Request immediate recompilation, and get rid of the old code.
1791       // Make them not entrant, so next time they are called they get
1792       // recompiled.  Unloaded classes are loaded now so recompile before next
1793       // time they are called.  Same for uninitialized.  The interpreter will
1794       // link the missing class, if any.
1795       make_not_entrant = true;
1796       break;
1797     case Action_make_not_compilable:
1798       // Give up on compiling this method at all.
1799       make_not_entrant = true;
1800       make_not_compilable = true;
1801       break;
1802     default:
1803       ShouldNotReachHere();
1804     }
1805 
1806     // Setting +ProfileTraps fixes the following, on all platforms:
1807     // 4852688: ProfileInterpreter is off by default for ia64.  The result is
1808     // infinite heroic-opt-uncommon-trap/deopt/recompile cycles, since the
1809     // recompile relies on a MethodData* to record heroic opt failures.
1810 
1811     // Whether the interpreter is producing MDO data or not, we also need
1812     // to use the MDO to detect hot deoptimization points and control
1813     // aggressive optimization.
1814     bool inc_recompile_count = false;
1815     ProfileData* pdata = NULL;
1816     if (ProfileTraps && !is_client_compilation_mode_vm() && update_trap_state && trap_mdo != NULL) {
1817       assert(trap_mdo == get_method_data(thread, profiled_method, false), "sanity");
1818       uint this_trap_count = 0;
1819       bool maybe_prior_trap = false;
1820       bool maybe_prior_recompile = false;
1821       pdata = query_update_method_data(trap_mdo, trap_bci, reason, true,
1822 #if INCLUDE_JVMCI
1823                                    nm->is_compiled_by_jvmci() && nm->is_osr_method(),
1824 #endif
1825                                    nm->method(),
1826                                    //outputs:
1827                                    this_trap_count,
1828                                    maybe_prior_trap,
1829                                    maybe_prior_recompile);
1830       // Because the interpreter also counts null, div0, range, and class
1831       // checks, these traps from compiled code are double-counted.
1832       // This is harmless; it just means that the PerXTrapLimit values
1833       // are in effect a little smaller than they look.
1834 
1835       DeoptReason per_bc_reason = reason_recorded_per_bytecode_if_any(reason);
1836       if (per_bc_reason != Reason_none) {
1837         // Now take action based on the partially known per-BCI history.
1838         if (maybe_prior_trap
1839             && this_trap_count >= (uint)PerBytecodeTrapLimit) {
1840           // If there are too many traps at this BCI, force a recompile.
1841           // This will allow the compiler to see the limit overflow, and
1842           // take corrective action, if possible.  The compiler generally
1843           // does not use the exact PerBytecodeTrapLimit value, but instead
1844           // changes its tactics if it sees any traps at all.  This provides
1845           // a little hysteresis, delaying a recompile until a trap happens
1846           // several times.
1847           //
1848           // Actually, since there is only one bit of counter per BCI,
1849           // the possible per-BCI counts are {0,1,(per-method count)}.
1850           // This produces accurate results if in fact there is only
1851           // one hot trap site, but begins to get fuzzy if there are
1852           // many sites.  For example, if there are ten sites each
1853           // trapping two or more times, they each get the blame for
1854           // all of their traps.
1855           make_not_entrant = true;
1856         }
1857 
1858         // Detect repeated recompilation at the same BCI, and enforce a limit.
1859         if (make_not_entrant && maybe_prior_recompile) {
1860           // More than one recompile at this point.
1861           inc_recompile_count = maybe_prior_trap;
1862         }
1863       } else {
1864         // For reasons which are not recorded per-bytecode, we simply
1865         // force recompiles unconditionally.
1866         // (Note that PerMethodRecompilationCutoff is enforced elsewhere.)
1867         make_not_entrant = true;
1868       }
1869 
1870       // Go back to the compiler if there are too many traps in this method.
1871       if (this_trap_count >= per_method_trap_limit(reason)) {
1872         // If there are too many traps in this method, force a recompile.
1873         // This will allow the compiler to see the limit overflow, and
1874         // take corrective action, if possible.
1875         // (This condition is an unlikely backstop only, because the
1876         // PerBytecodeTrapLimit is more likely to take effect first,
1877         // if it is applicable.)
1878         make_not_entrant = true;
1879       }
1880 
1881       // Here's more hysteresis:  If there has been a recompile at
1882       // this trap point already, run the method in the interpreter
1883       // for a while to exercise it more thoroughly.
1884       if (make_not_entrant && maybe_prior_recompile && maybe_prior_trap) {
1885         reprofile = true;
1886       }
1887     }
1888 
1889     // Take requested actions on the method:
1890 
1891     // Recompile
1892     if (make_not_entrant) {
1893       if (!nm->make_not_entrant()) {
1894         return; // the call did not change nmethod's state
1895       }
1896 
1897       if (pdata != NULL) {
1898         // Record the recompilation event, if any.
1899         int tstate0 = pdata->trap_state();
1900         int tstate1 = trap_state_set_recompiled(tstate0, true);
1901         if (tstate1 != tstate0)
1902           pdata->set_trap_state(tstate1);
1903       }
1904 
1905 #if INCLUDE_RTM_OPT
1906       // Restart collecting RTM locking abort statistic if the method
1907       // is recompiled for a reason other than RTM state change.
1908       // Assume that in new recompiled code the statistic could be different,
1909       // for example, due to different inlining.
1910       if ((reason != Reason_rtm_state_change) && (trap_mdo != NULL) &&
1911           UseRTMDeopt && (nm->as_nmethod()->rtm_state() != ProfileRTM)) {
1912         trap_mdo->atomic_set_rtm_state(ProfileRTM);
1913       }
1914 #endif
1915       // For code aging we count traps separately here, using make_not_entrant()
1916       // as a guard against simultaneous deopts in multiple threads.
1917       if (reason == Reason_tenured && trap_mdo != NULL) {
1918         trap_mdo->inc_tenure_traps();
1919       }
1920     }
1921 
1922     if (inc_recompile_count) {
1923       trap_mdo->inc_overflow_recompile_count();
1924       if ((uint)trap_mdo->overflow_recompile_count() >
1925           (uint)PerBytecodeRecompilationCutoff) {
1926         // Give up on the method containing the bad BCI.
1927         if (trap_method() == nm->method()) {
1928           make_not_compilable = true;
1929         } else {
1930           trap_method->set_not_compilable(CompLevel_full_optimization, true, "overflow_recompile_count > PerBytecodeRecompilationCutoff");
1931           // But give grace to the enclosing nm->method().
1932         }
1933       }
1934     }
1935 
1936     // Reprofile
1937     if (reprofile) {
1938       CompilationPolicy::policy()->reprofile(trap_scope, nm->is_osr_method());
1939     }
1940 
1941     // Give up compiling
1942     if (make_not_compilable && !nm->method()->is_not_compilable(CompLevel_full_optimization)) {
1943       assert(make_not_entrant, "consistent");
1944       nm->method()->set_not_compilable(CompLevel_full_optimization);
1945     }
1946 
1947   } // Free marked resources
1948 
1949 }
1950 JRT_END
1951 
1952 ProfileData*
1953 Deoptimization::query_update_method_data(MethodData* trap_mdo,
1954                                          int trap_bci,
1955                                          Deoptimization::DeoptReason reason,
1956                                          bool update_total_trap_count,
1957 #if INCLUDE_JVMCI
1958                                          bool is_osr,
1959 #endif
1960                                          Method* compiled_method,
1961                                          //outputs:
1962                                          uint& ret_this_trap_count,
1963                                          bool& ret_maybe_prior_trap,
1964                                          bool& ret_maybe_prior_recompile) {
1965   bool maybe_prior_trap = false;
1966   bool maybe_prior_recompile = false;
1967   uint this_trap_count = 0;
1968   if (update_total_trap_count) {
1969     uint idx = reason;
1970 #if INCLUDE_JVMCI
1971     if (is_osr) {
1972       idx += Reason_LIMIT;
1973     }
1974 #endif
1975     uint prior_trap_count = trap_mdo->trap_count(idx);
1976     this_trap_count  = trap_mdo->inc_trap_count(idx);
1977 
1978     // If the runtime cannot find a place to store trap history,
1979     // it is estimated based on the general condition of the method.
1980     // If the method has ever been recompiled, or has ever incurred
1981     // a trap with the present reason , then this BCI is assumed
1982     // (pessimistically) to be the culprit.
1983     maybe_prior_trap      = (prior_trap_count != 0);
1984     maybe_prior_recompile = (trap_mdo->decompile_count() != 0);
1985   }
1986   ProfileData* pdata = NULL;
1987 
1988 
1989   // For reasons which are recorded per bytecode, we check per-BCI data.
1990   DeoptReason per_bc_reason = reason_recorded_per_bytecode_if_any(reason);
1991   assert(per_bc_reason != Reason_none || update_total_trap_count, "must be");
1992   if (per_bc_reason != Reason_none) {
1993     // Find the profile data for this BCI.  If there isn't one,
1994     // try to allocate one from the MDO's set of spares.
1995     // This will let us detect a repeated trap at this point.
1996     pdata = trap_mdo->allocate_bci_to_data(trap_bci, reason_is_speculate(reason) ? compiled_method : NULL);
1997 
1998     if (pdata != NULL) {
1999       if (reason_is_speculate(reason) && !pdata->is_SpeculativeTrapData()) {
2000         if (LogCompilation && xtty != NULL) {
2001           ttyLocker ttyl;
2002           // no more room for speculative traps in this MDO
2003           xtty->elem("speculative_traps_oom");
2004         }
2005       }
2006       // Query the trap state of this profile datum.
2007       int tstate0 = pdata->trap_state();
2008       if (!trap_state_has_reason(tstate0, per_bc_reason))
2009         maybe_prior_trap = false;
2010       if (!trap_state_is_recompiled(tstate0))
2011         maybe_prior_recompile = false;
2012 
2013       // Update the trap state of this profile datum.
2014       int tstate1 = tstate0;
2015       // Record the reason.
2016       tstate1 = trap_state_add_reason(tstate1, per_bc_reason);
2017       // Store the updated state on the MDO, for next time.
2018       if (tstate1 != tstate0)
2019         pdata->set_trap_state(tstate1);
2020     } else {
2021       if (LogCompilation && xtty != NULL) {
2022         ttyLocker ttyl;
2023         // Missing MDP?  Leave a small complaint in the log.
2024         xtty->elem("missing_mdp bci='%d'", trap_bci);
2025       }
2026     }
2027   }
2028 
2029   // Return results:
2030   ret_this_trap_count = this_trap_count;
2031   ret_maybe_prior_trap = maybe_prior_trap;
2032   ret_maybe_prior_recompile = maybe_prior_recompile;
2033   return pdata;
2034 }
2035 
2036 void
2037 Deoptimization::update_method_data_from_interpreter(MethodData* trap_mdo, int trap_bci, int reason) {
2038   ResourceMark rm;
2039   // Ignored outputs:
2040   uint ignore_this_trap_count;
2041   bool ignore_maybe_prior_trap;
2042   bool ignore_maybe_prior_recompile;
2043   assert(!reason_is_speculate(reason), "reason speculate only used by compiler");
2044   // JVMCI uses the total counts to determine if deoptimizations are happening too frequently -> do not adjust total counts
2045   bool update_total_counts = JVMCI_ONLY(false) NOT_JVMCI(true);
2046   query_update_method_data(trap_mdo, trap_bci,
2047                            (DeoptReason)reason,
2048                            update_total_counts,
2049 #if INCLUDE_JVMCI
2050                            false,
2051 #endif
2052                            NULL,
2053                            ignore_this_trap_count,
2054                            ignore_maybe_prior_trap,
2055                            ignore_maybe_prior_recompile);
2056 }
2057 
2058 Deoptimization::UnrollBlock* Deoptimization::uncommon_trap(JavaThread* thread, jint trap_request, jint exec_mode) {
2059   if (TraceDeoptimization) {
2060     tty->print("Uncommon trap ");
2061   }
2062   // Still in Java no safepoints
2063   {
2064     // This enters VM and may safepoint
2065     uncommon_trap_inner(thread, trap_request);
2066   }
2067   return fetch_unroll_info_helper(thread, exec_mode);
2068 }
2069 
2070 // Local derived constants.
2071 // Further breakdown of DataLayout::trap_state, as promised by DataLayout.
2072 const int DS_REASON_MASK   = DataLayout::trap_mask >> 1;
2073 const int DS_RECOMPILE_BIT = DataLayout::trap_mask - DS_REASON_MASK;
2074 
2075 //---------------------------trap_state_reason---------------------------------
2076 Deoptimization::DeoptReason
2077 Deoptimization::trap_state_reason(int trap_state) {
2078   // This assert provides the link between the width of DataLayout::trap_bits
2079   // and the encoding of "recorded" reasons.  It ensures there are enough
2080   // bits to store all needed reasons in the per-BCI MDO profile.
2081   assert(DS_REASON_MASK >= Reason_RECORDED_LIMIT, "enough bits");
2082   int recompile_bit = (trap_state & DS_RECOMPILE_BIT);
2083   trap_state -= recompile_bit;
2084   if (trap_state == DS_REASON_MASK) {
2085     return Reason_many;
2086   } else {
2087     assert((int)Reason_none == 0, "state=0 => Reason_none");
2088     return (DeoptReason)trap_state;
2089   }
2090 }
2091 //-------------------------trap_state_has_reason-------------------------------
2092 int Deoptimization::trap_state_has_reason(int trap_state, int reason) {
2093   assert(reason_is_recorded_per_bytecode((DeoptReason)reason), "valid reason");
2094   assert(DS_REASON_MASK >= Reason_RECORDED_LIMIT, "enough bits");
2095   int recompile_bit = (trap_state & DS_RECOMPILE_BIT);
2096   trap_state -= recompile_bit;
2097   if (trap_state == DS_REASON_MASK) {
2098     return -1;  // true, unspecifically (bottom of state lattice)
2099   } else if (trap_state == reason) {
2100     return 1;   // true, definitely
2101   } else if (trap_state == 0) {
2102     return 0;   // false, definitely (top of state lattice)
2103   } else {
2104     return 0;   // false, definitely
2105   }
2106 }
2107 //-------------------------trap_state_add_reason-------------------------------
2108 int Deoptimization::trap_state_add_reason(int trap_state, int reason) {
2109   assert(reason_is_recorded_per_bytecode((DeoptReason)reason) || reason == Reason_many, "valid reason");
2110   int recompile_bit = (trap_state & DS_RECOMPILE_BIT);
2111   trap_state -= recompile_bit;
2112   if (trap_state == DS_REASON_MASK) {
2113     return trap_state + recompile_bit;     // already at state lattice bottom
2114   } else if (trap_state == reason) {
2115     return trap_state + recompile_bit;     // the condition is already true
2116   } else if (trap_state == 0) {
2117     return reason + recompile_bit;          // no condition has yet been true
2118   } else {
2119     return DS_REASON_MASK + recompile_bit;  // fall to state lattice bottom
2120   }
2121 }
2122 //-----------------------trap_state_is_recompiled------------------------------
2123 bool Deoptimization::trap_state_is_recompiled(int trap_state) {
2124   return (trap_state & DS_RECOMPILE_BIT) != 0;
2125 }
2126 //-----------------------trap_state_set_recompiled-----------------------------
2127 int Deoptimization::trap_state_set_recompiled(int trap_state, bool z) {
2128   if (z)  return trap_state |  DS_RECOMPILE_BIT;
2129   else    return trap_state & ~DS_RECOMPILE_BIT;
2130 }
2131 //---------------------------format_trap_state---------------------------------
2132 // This is used for debugging and diagnostics, including LogFile output.
2133 const char* Deoptimization::format_trap_state(char* buf, size_t buflen,
2134                                               int trap_state) {
2135   assert(buflen > 0, "sanity");
2136   DeoptReason reason      = trap_state_reason(trap_state);
2137   bool        recomp_flag = trap_state_is_recompiled(trap_state);
2138   // Re-encode the state from its decoded components.
2139   int decoded_state = 0;
2140   if (reason_is_recorded_per_bytecode(reason) || reason == Reason_many)
2141     decoded_state = trap_state_add_reason(decoded_state, reason);
2142   if (recomp_flag)
2143     decoded_state = trap_state_set_recompiled(decoded_state, recomp_flag);
2144   // If the state re-encodes properly, format it symbolically.
2145   // Because this routine is used for debugging and diagnostics,
2146   // be robust even if the state is a strange value.
2147   size_t len;
2148   if (decoded_state != trap_state) {
2149     // Random buggy state that doesn't decode??
2150     len = jio_snprintf(buf, buflen, "#%d", trap_state);
2151   } else {
2152     len = jio_snprintf(buf, buflen, "%s%s",
2153                        trap_reason_name(reason),
2154                        recomp_flag ? " recompiled" : "");
2155   }
2156   return buf;
2157 }
2158 
2159 
2160 //--------------------------------statics--------------------------------------
2161 const char* Deoptimization::_trap_reason_name[] = {
2162   // Note:  Keep this in sync. with enum DeoptReason.
2163   "none",
2164   "null_check",
2165   "null_assert" JVMCI_ONLY("_or_unreached0"),
2166   "range_check",
2167   "class_check",
2168   "array_check",
2169   "intrinsic" JVMCI_ONLY("_or_type_checked_inlining"),
2170   "bimorphic" JVMCI_ONLY("_or_optimized_type_check"),
2171   "unloaded",
2172   "uninitialized",
2173   "unreached",
2174   "unhandled",
2175   "constraint",
2176   "div0_check",
2177   "age",
2178   "predicate",
2179   "loop_limit_check",
2180   "speculate_class_check",
2181   "speculate_null_check",
2182   "rtm_state_change",
2183   "unstable_if",
2184   "unstable_fused_if",
2185 #if INCLUDE_JVMCI
2186   "aliasing",
2187   "transfer_to_interpreter",
2188   "not_compiled_exception_handler",
2189   "unresolved",
2190   "jsr_mismatch",
2191 #endif
2192   "tenured"
2193 };
2194 const char* Deoptimization::_trap_action_name[] = {
2195   // Note:  Keep this in sync. with enum DeoptAction.
2196   "none",
2197   "maybe_recompile",
2198   "reinterpret",
2199   "make_not_entrant",
2200   "make_not_compilable"
2201 };
2202 
2203 const char* Deoptimization::trap_reason_name(int reason) {
2204   // Check that every reason has a name
2205   STATIC_ASSERT(sizeof(_trap_reason_name)/sizeof(const char*) == Reason_LIMIT);
2206 
2207   if (reason == Reason_many)  return "many";
2208   if ((uint)reason < Reason_LIMIT)
2209     return _trap_reason_name[reason];
2210   static char buf[20];
2211   sprintf(buf, "reason%d", reason);
2212   return buf;
2213 }
2214 const char* Deoptimization::trap_action_name(int action) {
2215   // Check that every action has a name
2216   STATIC_ASSERT(sizeof(_trap_action_name)/sizeof(const char*) == Action_LIMIT);
2217 
2218   if ((uint)action < Action_LIMIT)
2219     return _trap_action_name[action];
2220   static char buf[20];
2221   sprintf(buf, "action%d", action);
2222   return buf;
2223 }
2224 
2225 // This is used for debugging and diagnostics, including LogFile output.
2226 const char* Deoptimization::format_trap_request(char* buf, size_t buflen,
2227                                                 int trap_request) {
2228   jint unloaded_class_index = trap_request_index(trap_request);
2229   const char* reason = trap_reason_name(trap_request_reason(trap_request));
2230   const char* action = trap_action_name(trap_request_action(trap_request));
2231 #if INCLUDE_JVMCI
2232   int debug_id = trap_request_debug_id(trap_request);
2233 #endif
2234   size_t len;
2235   if (unloaded_class_index < 0) {
2236     len = jio_snprintf(buf, buflen, "reason='%s' action='%s'" JVMCI_ONLY(" debug_id='%d'"),
2237                        reason, action
2238 #if INCLUDE_JVMCI
2239                        ,debug_id
2240 #endif
2241                        );
2242   } else {
2243     len = jio_snprintf(buf, buflen, "reason='%s' action='%s' index='%d'" JVMCI_ONLY(" debug_id='%d'"),
2244                        reason, action, unloaded_class_index
2245 #if INCLUDE_JVMCI
2246                        ,debug_id
2247 #endif
2248                        );
2249   }
2250   return buf;
2251 }
2252 
2253 juint Deoptimization::_deoptimization_hist
2254         [Deoptimization::Reason_LIMIT]
2255     [1 + Deoptimization::Action_LIMIT]
2256         [Deoptimization::BC_CASE_LIMIT]
2257   = {0};
2258 
2259 enum {
2260   LSB_BITS = 8,
2261   LSB_MASK = right_n_bits(LSB_BITS)
2262 };
2263 
2264 void Deoptimization::gather_statistics(DeoptReason reason, DeoptAction action,
2265                                        Bytecodes::Code bc) {
2266   assert(reason >= 0 && reason < Reason_LIMIT, "oob");
2267   assert(action >= 0 && action < Action_LIMIT, "oob");
2268   _deoptimization_hist[Reason_none][0][0] += 1;  // total
2269   _deoptimization_hist[reason][0][0]      += 1;  // per-reason total
2270   juint* cases = _deoptimization_hist[reason][1+action];
2271   juint* bc_counter_addr = NULL;
2272   juint  bc_counter      = 0;
2273   // Look for an unused counter, or an exact match to this BC.
2274   if (bc != Bytecodes::_illegal) {
2275     for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) {
2276       juint* counter_addr = &cases[bc_case];
2277       juint  counter = *counter_addr;
2278       if ((counter == 0 && bc_counter_addr == NULL)
2279           || (Bytecodes::Code)(counter & LSB_MASK) == bc) {
2280         // this counter is either free or is already devoted to this BC
2281         bc_counter_addr = counter_addr;
2282         bc_counter = counter | bc;
2283       }
2284     }
2285   }
2286   if (bc_counter_addr == NULL) {
2287     // Overflow, or no given bytecode.
2288     bc_counter_addr = &cases[BC_CASE_LIMIT-1];
2289     bc_counter = (*bc_counter_addr & ~LSB_MASK);  // clear LSB
2290   }
2291   *bc_counter_addr = bc_counter + (1 << LSB_BITS);
2292 }
2293 
2294 jint Deoptimization::total_deoptimization_count() {
2295   return _deoptimization_hist[Reason_none][0][0];
2296 }
2297 
2298 jint Deoptimization::deoptimization_count(DeoptReason reason) {
2299   assert(reason >= 0 && reason < Reason_LIMIT, "oob");
2300   return _deoptimization_hist[reason][0][0];
2301 }
2302 
2303 void Deoptimization::print_statistics() {
2304   juint total = total_deoptimization_count();
2305   juint account = total;
2306   if (total != 0) {
2307     ttyLocker ttyl;
2308     if (xtty != NULL)  xtty->head("statistics type='deoptimization'");
2309     tty->print_cr("Deoptimization traps recorded:");
2310     #define PRINT_STAT_LINE(name, r) \
2311       tty->print_cr("  %4d (%4.1f%%) %s", (int)(r), ((r) * 100.0) / total, name);
2312     PRINT_STAT_LINE("total", total);
2313     // For each non-zero entry in the histogram, print the reason,
2314     // the action, and (if specifically known) the type of bytecode.
2315     for (int reason = 0; reason < Reason_LIMIT; reason++) {
2316       for (int action = 0; action < Action_LIMIT; action++) {
2317         juint* cases = _deoptimization_hist[reason][1+action];
2318         for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) {
2319           juint counter = cases[bc_case];
2320           if (counter != 0) {
2321             char name[1*K];
2322             Bytecodes::Code bc = (Bytecodes::Code)(counter & LSB_MASK);
2323             if (bc_case == BC_CASE_LIMIT && (int)bc == 0)
2324               bc = Bytecodes::_illegal;
2325             sprintf(name, "%s/%s/%s",
2326                     trap_reason_name(reason),
2327                     trap_action_name(action),
2328                     Bytecodes::is_defined(bc)? Bytecodes::name(bc): "other");
2329             juint r = counter >> LSB_BITS;
2330             tty->print_cr("  %40s: " UINT32_FORMAT " (%.1f%%)", name, r, (r * 100.0) / total);
2331             account -= r;
2332           }
2333         }
2334       }
2335     }
2336     if (account != 0) {
2337       PRINT_STAT_LINE("unaccounted", account);
2338     }
2339     #undef PRINT_STAT_LINE
2340     if (xtty != NULL)  xtty->tail("statistics");
2341   }
2342 }
2343 #else // COMPILER2 || SHARK || INCLUDE_JVMCI
2344 
2345 
2346 // Stubs for C1 only system.
2347 bool Deoptimization::trap_state_is_recompiled(int trap_state) {
2348   return false;
2349 }
2350 
2351 const char* Deoptimization::trap_reason_name(int reason) {
2352   return "unknown";
2353 }
2354 
2355 void Deoptimization::print_statistics() {
2356   // no output
2357 }
2358 
2359 void
2360 Deoptimization::update_method_data_from_interpreter(MethodData* trap_mdo, int trap_bci, int reason) {
2361   // no udpate
2362 }
2363 
2364 int Deoptimization::trap_state_has_reason(int trap_state, int reason) {
2365   return 0;
2366 }
2367 
2368 void Deoptimization::gather_statistics(DeoptReason reason, DeoptAction action,
2369                                        Bytecodes::Code bc) {
2370   // no update
2371 }
2372 
2373 const char* Deoptimization::format_trap_state(char* buf, size_t buflen,
2374                                               int trap_state) {
2375   jio_snprintf(buf, buflen, "#%d", trap_state);
2376   return buf;
2377 }
2378 
2379 #endif // COMPILER2 || SHARK || INCLUDE_JVMCI