1 /*
   2  * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "jvm.h"
  27 #include "classfile/symbolTable.hpp"
  28 #include "classfile/systemDictionary.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "code/debugInfoRec.hpp"
  31 #include "code/nmethod.hpp"
  32 #include "code/pcDesc.hpp"
  33 #include "code/scopeDesc.hpp"
  34 #include "interpreter/bytecode.hpp"
  35 #include "interpreter/interpreter.hpp"
  36 #include "interpreter/oopMapCache.hpp"
  37 #include "memory/allocation.inline.hpp"
  38 #include "memory/oopFactory.hpp"
  39 #include "memory/resourceArea.hpp"
  40 #include "memory/universe.hpp"
  41 #include "oops/constantPool.hpp"
  42 #include "oops/method.hpp"
  43 #include "oops/objArrayKlass.hpp"
  44 #include "oops/objArrayOop.inline.hpp"
  45 #include "oops/oop.inline.hpp"
  46 #include "oops/fieldStreams.hpp"
  47 #include "oops/typeArrayOop.inline.hpp"
  48 #include "oops/valueArrayKlass.hpp"
  49 #include "oops/valueArrayOop.hpp"
  50 #include "oops/valueKlass.hpp"
  51 #include "oops/verifyOopClosure.hpp"
  52 #include "prims/jvmtiThreadState.hpp"
  53 #include "runtime/biasedLocking.hpp"
  54 #include "runtime/compilationPolicy.hpp"
  55 #include "runtime/deoptimization.hpp"
  56 #include "runtime/fieldDescriptor.hpp"
  57 #include "runtime/fieldDescriptor.inline.hpp"
  58 #include "runtime/frame.inline.hpp"
  59 #include "runtime/jniHandles.inline.hpp"
  60 #include "runtime/handles.inline.hpp"
  61 #include "runtime/interfaceSupport.inline.hpp"
  62 #include "runtime/safepointVerifiers.hpp"
  63 #include "runtime/sharedRuntime.hpp"
  64 #include "runtime/signature.hpp"
  65 #include "runtime/stubRoutines.hpp"
  66 #include "runtime/thread.hpp"
  67 #include "runtime/threadSMR.hpp"
  68 #include "runtime/vframe.hpp"
  69 #include "runtime/vframeArray.hpp"
  70 #include "runtime/vframe_hp.hpp"
  71 #include "utilities/events.hpp"
  72 #include "utilities/preserveException.hpp"
  73 #include "utilities/xmlstream.hpp"
  74 
  75 
  76 bool DeoptimizationMarker::_is_active = false;
  77 
  78 Deoptimization::UnrollBlock::UnrollBlock(int  size_of_deoptimized_frame,
  79                                          int  caller_adjustment,
  80                                          int  caller_actual_parameters,
  81                                          int  number_of_frames,
  82                                          intptr_t* frame_sizes,
  83                                          address* frame_pcs,
  84                                          BasicType return_type,
  85                                          int exec_mode) {
  86   _size_of_deoptimized_frame = size_of_deoptimized_frame;
  87   _caller_adjustment         = caller_adjustment;
  88   _caller_actual_parameters  = caller_actual_parameters;
  89   _number_of_frames          = number_of_frames;
  90   _frame_sizes               = frame_sizes;
  91   _frame_pcs                 = frame_pcs;
  92   _register_block            = NEW_C_HEAP_ARRAY(intptr_t, RegisterMap::reg_count * 2, mtCompiler);
  93   _return_type               = return_type;
  94   _initial_info              = 0;
  95   // PD (x86 only)
  96   _counter_temp              = 0;
  97   _unpack_kind               = exec_mode;
  98   _sender_sp_temp            = 0;
  99 
 100   _total_frame_sizes         = size_of_frames();
 101   assert(exec_mode >= 0 && exec_mode < Unpack_LIMIT, "Unexpected exec_mode");
 102 }
 103 
 104 
 105 Deoptimization::UnrollBlock::~UnrollBlock() {
 106   FREE_C_HEAP_ARRAY(intptr_t, _frame_sizes);
 107   FREE_C_HEAP_ARRAY(intptr_t, _frame_pcs);
 108   FREE_C_HEAP_ARRAY(intptr_t, _register_block);
 109 }
 110 
 111 
 112 intptr_t* Deoptimization::UnrollBlock::value_addr_at(int register_number) const {
 113   assert(register_number < RegisterMap::reg_count, "checking register number");
 114   return &_register_block[register_number * 2];
 115 }
 116 
 117 
 118 
 119 int Deoptimization::UnrollBlock::size_of_frames() const {
 120   // Acount first for the adjustment of the initial frame
 121   int result = _caller_adjustment;
 122   for (int index = 0; index < number_of_frames(); index++) {
 123     result += frame_sizes()[index];
 124   }
 125   return result;
 126 }
 127 
 128 
 129 void Deoptimization::UnrollBlock::print() {
 130   ttyLocker ttyl;
 131   tty->print_cr("UnrollBlock");
 132   tty->print_cr("  size_of_deoptimized_frame = %d", _size_of_deoptimized_frame);
 133   tty->print(   "  frame_sizes: ");
 134   for (int index = 0; index < number_of_frames(); index++) {
 135     tty->print(INTX_FORMAT " ", frame_sizes()[index]);
 136   }
 137   tty->cr();
 138 }
 139 
 140 
 141 // In order to make fetch_unroll_info work properly with escape
 142 // analysis, The method was changed from JRT_LEAF to JRT_BLOCK_ENTRY and
 143 // ResetNoHandleMark and HandleMark were removed from it. The actual reallocation
 144 // of previously eliminated objects occurs in realloc_objects, which is
 145 // called from the method fetch_unroll_info_helper below.
 146 JRT_BLOCK_ENTRY(Deoptimization::UnrollBlock*, Deoptimization::fetch_unroll_info(JavaThread* thread, int exec_mode))
 147   // It is actually ok to allocate handles in a leaf method. It causes no safepoints,
 148   // but makes the entry a little slower. There is however a little dance we have to
 149   // do in debug mode to get around the NoHandleMark code in the JRT_LEAF macro
 150 
 151   // fetch_unroll_info() is called at the beginning of the deoptimization
 152   // handler. Note this fact before we start generating temporary frames
 153   // that can confuse an asynchronous stack walker. This counter is
 154   // decremented at the end of unpack_frames().
 155   if (TraceDeoptimization) {
 156     tty->print_cr("Deoptimizing thread " INTPTR_FORMAT, p2i(thread));
 157   }
 158   thread->inc_in_deopt_handler();
 159 
 160   return fetch_unroll_info_helper(thread, exec_mode);
 161 JRT_END
 162 
 163 
 164 // This is factored, since it is both called from a JRT_LEAF (deoptimization) and a JRT_ENTRY (uncommon_trap)
 165 Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread* thread, int exec_mode) {
 166 
 167   // Note: there is a safepoint safety issue here. No matter whether we enter
 168   // via vanilla deopt or uncommon trap we MUST NOT stop at a safepoint once
 169   // the vframeArray is created.
 170   //
 171 
 172   // Allocate our special deoptimization ResourceMark
 173   DeoptResourceMark* dmark = new DeoptResourceMark(thread);
 174   assert(thread->deopt_mark() == NULL, "Pending deopt!");
 175   thread->set_deopt_mark(dmark);
 176 
 177   frame stub_frame = thread->last_frame(); // Makes stack walkable as side effect
 178   RegisterMap map(thread, true);
 179   RegisterMap dummy_map(thread, false);
 180   // Now get the deoptee with a valid map
 181   frame deoptee = stub_frame.sender(&map);
 182   // Set the deoptee nmethod
 183   assert(thread->deopt_compiled_method() == NULL, "Pending deopt!");
 184   CompiledMethod* cm = deoptee.cb()->as_compiled_method_or_null();
 185   thread->set_deopt_compiled_method(cm);
 186 
 187   if (VerifyStack) {
 188     thread->validate_frame_layout();
 189   }
 190 
 191   // Create a growable array of VFrames where each VFrame represents an inlined
 192   // Java frame.  This storage is allocated with the usual system arena.
 193   assert(deoptee.is_compiled_frame(), "Wrong frame type");
 194   GrowableArray<compiledVFrame*>* chunk = new GrowableArray<compiledVFrame*>(10);
 195   vframe* vf = vframe::new_vframe(&deoptee, &map, thread);
 196   while (!vf->is_top()) {
 197     assert(vf->is_compiled_frame(), "Wrong frame type");
 198     chunk->push(compiledVFrame::cast(vf));
 199     vf = vf->sender();
 200   }
 201   assert(vf->is_compiled_frame(), "Wrong frame type");
 202   chunk->push(compiledVFrame::cast(vf));
 203 
 204   bool realloc_failures = false;
 205 
 206 #if COMPILER2_OR_JVMCI
 207   // Reallocate the non-escaping objects and restore their fields. Then
 208   // relock objects if synchronization on them was eliminated.
 209 #if !INCLUDE_JVMCI
 210   if (DoEscapeAnalysis || EliminateNestedLocks) {
 211     if (EliminateAllocations) {
 212 #endif // INCLUDE_JVMCI
 213       assert (chunk->at(0)->scope() != NULL,"expect only compiled java frames");
 214       GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects();
 215 
 216       // The flag return_oop() indicates call sites which return oop
 217       // in compiled code. Such sites include java method calls,
 218       // runtime calls (for example, used to allocate new objects/arrays
 219       // on slow code path) and any other calls generated in compiled code.
 220       // It is not guaranteed that we can get such information here only
 221       // by analyzing bytecode in deoptimized frames. This is why this flag
 222       // is set during method compilation (see Compile::Process_OopMap_Node()).
 223       // If the previous frame was popped or if we are dispatching an exception,
 224       // we don't have an oop result.
 225       ScopeDesc* scope = chunk->at(0)->scope();
 226       bool save_oop_result = scope->return_oop() && !thread->popframe_forcing_deopt_reexecution() && (exec_mode == Unpack_deopt);
 227       // In case of the return of multiple values, we must take care
 228       // of all oop return values.
 229       GrowableArray<Handle> return_oops;
 230       ValueKlass* vk = NULL;
 231       if (save_oop_result && scope->return_vt()) {
 232         vk = ValueKlass::returned_value_klass(map);
 233         if (vk != NULL) {
 234           vk->save_oop_fields(map, return_oops);
 235           save_oop_result = false;
 236         }
 237       }
 238       if (save_oop_result) {
 239         // Reallocation may trigger GC. If deoptimization happened on return from
 240         // call which returns oop we need to save it since it is not in oopmap.
 241         oop result = deoptee.saved_oop_result(&map);
 242         assert(oopDesc::is_oop_or_null(result), "must be oop");
 243         return_oops.push(Handle(thread, result));
 244         assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
 245         if (TraceDeoptimization) {
 246           ttyLocker ttyl;
 247           tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, p2i(result), p2i(thread));
 248         }
 249       }
 250       if (objects != NULL || vk != NULL) {
 251         bool skip_internal = (cm != NULL) && !cm->is_compiled_by_jvmci();
 252         JRT_BLOCK
 253           if (vk != NULL) {
 254             realloc_failures = realloc_value_type_result(vk, map, return_oops, THREAD);
 255           }
 256           if (objects != NULL) {
 257             realloc_failures = realloc_failures || realloc_objects(thread, &deoptee, &map, objects, THREAD);
 258             reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal, THREAD);
 259           }
 260         JRT_END
 261 #ifndef PRODUCT
 262         if (TraceDeoptimization) {
 263           ttyLocker ttyl;
 264           tty->print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, p2i(thread));
 265           print_objects(objects, realloc_failures);
 266         }
 267 #endif
 268       }
 269       if (save_oop_result || vk != NULL) {
 270         // Restore result.
 271         assert(return_oops.length() == 1, "no value type");
 272         deoptee.set_saved_oop_result(&map, return_oops.pop()());
 273       }
 274 #if !INCLUDE_JVMCI
 275     }
 276     if (EliminateLocks) {
 277 #endif // INCLUDE_JVMCI
 278 #ifndef PRODUCT
 279       bool first = true;
 280 #endif
 281       for (int i = 0; i < chunk->length(); i++) {
 282         compiledVFrame* cvf = chunk->at(i);
 283         assert (cvf->scope() != NULL,"expect only compiled java frames");
 284         GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
 285         if (monitors->is_nonempty()) {
 286           relock_objects(monitors, thread, realloc_failures);
 287 #ifndef PRODUCT
 288           if (PrintDeoptimizationDetails) {
 289             ttyLocker ttyl;
 290             for (int j = 0; j < monitors->length(); j++) {
 291               MonitorInfo* mi = monitors->at(j);
 292               if (mi->eliminated()) {
 293                 if (first) {
 294                   first = false;
 295                   tty->print_cr("RELOCK OBJECTS in thread " INTPTR_FORMAT, p2i(thread));
 296                 }
 297                 if (mi->owner_is_scalar_replaced()) {
 298                   Klass* k = java_lang_Class::as_Klass(mi->owner_klass());
 299                   tty->print_cr("     failed reallocation for klass %s", k->external_name());
 300                 } else {
 301                   tty->print_cr("     object <" INTPTR_FORMAT "> locked", p2i(mi->owner()));
 302                 }
 303               }
 304             }
 305           }
 306 #endif // !PRODUCT
 307         }
 308       }
 309 #if !INCLUDE_JVMCI
 310     }
 311   }
 312 #endif // INCLUDE_JVMCI
 313 #endif // COMPILER2_OR_JVMCI
 314 
 315   ScopeDesc* trap_scope = chunk->at(0)->scope();
 316   Handle exceptionObject;
 317   if (trap_scope->rethrow_exception()) {
 318     if (PrintDeoptimizationDetails) {
 319       tty->print_cr("Exception to be rethrown in the interpreter for method %s::%s at bci %d", trap_scope->method()->method_holder()->name()->as_C_string(), trap_scope->method()->name()->as_C_string(), trap_scope->bci());
 320     }
 321     GrowableArray<ScopeValue*>* expressions = trap_scope->expressions();
 322     guarantee(expressions != NULL && expressions->length() > 0, "must have exception to throw");
 323     ScopeValue* topOfStack = expressions->top();
 324     exceptionObject = StackValue::create_stack_value(&deoptee, &map, topOfStack)->get_obj();
 325     guarantee(exceptionObject() != NULL, "exception oop can not be null");
 326   }
 327 
 328   // Ensure that no safepoint is taken after pointers have been stored
 329   // in fields of rematerialized objects.  If a safepoint occurs from here on
 330   // out the java state residing in the vframeArray will be missed.
 331   NoSafepointVerifier no_safepoint;
 332 
 333   vframeArray* array = create_vframeArray(thread, deoptee, &map, chunk, realloc_failures);
 334 #if COMPILER2_OR_JVMCI
 335   if (realloc_failures) {
 336     pop_frames_failed_reallocs(thread, array);
 337   }
 338 #endif
 339 
 340   assert(thread->vframe_array_head() == NULL, "Pending deopt!");
 341   thread->set_vframe_array_head(array);
 342 
 343   // Now that the vframeArray has been created if we have any deferred local writes
 344   // added by jvmti then we can free up that structure as the data is now in the
 345   // vframeArray
 346 
 347   if (thread->deferred_locals() != NULL) {
 348     GrowableArray<jvmtiDeferredLocalVariableSet*>* list = thread->deferred_locals();
 349     int i = 0;
 350     do {
 351       // Because of inlining we could have multiple vframes for a single frame
 352       // and several of the vframes could have deferred writes. Find them all.
 353       if (list->at(i)->id() == array->original().id()) {
 354         jvmtiDeferredLocalVariableSet* dlv = list->at(i);
 355         list->remove_at(i);
 356         // individual jvmtiDeferredLocalVariableSet are CHeapObj's
 357         delete dlv;
 358       } else {
 359         i++;
 360       }
 361     } while ( i < list->length() );
 362     if (list->length() == 0) {
 363       thread->set_deferred_locals(NULL);
 364       // free the list and elements back to C heap.
 365       delete list;
 366     }
 367 
 368   }
 369 
 370   // Compute the caller frame based on the sender sp of stub_frame and stored frame sizes info.
 371   CodeBlob* cb = stub_frame.cb();
 372   // Verify we have the right vframeArray
 373   assert(cb->frame_size() >= 0, "Unexpected frame size");
 374   intptr_t* unpack_sp = stub_frame.sp() + cb->frame_size();
 375 
 376   // If the deopt call site is a MethodHandle invoke call site we have
 377   // to adjust the unpack_sp.
 378   nmethod* deoptee_nm = deoptee.cb()->as_nmethod_or_null();
 379   if (deoptee_nm != NULL && deoptee_nm->is_method_handle_return(deoptee.pc()))
 380     unpack_sp = deoptee.unextended_sp();
 381 
 382 #ifdef ASSERT
 383   assert(cb->is_deoptimization_stub() ||
 384          cb->is_uncommon_trap_stub() ||
 385          strcmp("Stub<DeoptimizationStub.deoptimizationHandler>", cb->name()) == 0 ||
 386          strcmp("Stub<UncommonTrapStub.uncommonTrapHandler>", cb->name()) == 0,
 387          "unexpected code blob: %s", cb->name());
 388 #endif
 389 
 390   // This is a guarantee instead of an assert because if vframe doesn't match
 391   // we will unpack the wrong deoptimized frame and wind up in strange places
 392   // where it will be very difficult to figure out what went wrong. Better
 393   // to die an early death here than some very obscure death later when the
 394   // trail is cold.
 395   // Note: on ia64 this guarantee can be fooled by frames with no memory stack
 396   // in that it will fail to detect a problem when there is one. This needs
 397   // more work in tiger timeframe.
 398   guarantee(array->unextended_sp() == unpack_sp, "vframe_array_head must contain the vframeArray to unpack");
 399 
 400   int number_of_frames = array->frames();
 401 
 402   // Compute the vframes' sizes.  Note that frame_sizes[] entries are ordered from outermost to innermost
 403   // virtual activation, which is the reverse of the elements in the vframes array.
 404   intptr_t* frame_sizes = NEW_C_HEAP_ARRAY(intptr_t, number_of_frames, mtCompiler);
 405   // +1 because we always have an interpreter return address for the final slot.
 406   address* frame_pcs = NEW_C_HEAP_ARRAY(address, number_of_frames + 1, mtCompiler);
 407   int popframe_extra_args = 0;
 408   // Create an interpreter return address for the stub to use as its return
 409   // address so the skeletal frames are perfectly walkable
 410   frame_pcs[number_of_frames] = Interpreter::deopt_entry(vtos, 0);
 411 
 412   // PopFrame requires that the preserved incoming arguments from the recently-popped topmost
 413   // activation be put back on the expression stack of the caller for reexecution
 414   if (JvmtiExport::can_pop_frame() && thread->popframe_forcing_deopt_reexecution()) {
 415     popframe_extra_args = in_words(thread->popframe_preserved_args_size_in_words());
 416   }
 417 
 418   // Find the current pc for sender of the deoptee. Since the sender may have been deoptimized
 419   // itself since the deoptee vframeArray was created we must get a fresh value of the pc rather
 420   // than simply use array->sender.pc(). This requires us to walk the current set of frames
 421   //
 422   frame deopt_sender = stub_frame.sender(&dummy_map); // First is the deoptee frame
 423   deopt_sender = deopt_sender.sender(&dummy_map);     // Now deoptee caller
 424 
 425   // It's possible that the number of parameters at the call site is
 426   // different than number of arguments in the callee when method
 427   // handles are used.  If the caller is interpreted get the real
 428   // value so that the proper amount of space can be added to it's
 429   // frame.
 430   bool caller_was_method_handle = false;
 431   if (deopt_sender.is_interpreted_frame()) {
 432     methodHandle method = deopt_sender.interpreter_frame_method();
 433     Bytecode_invoke cur = Bytecode_invoke_check(method, deopt_sender.interpreter_frame_bci());
 434     if (cur.is_invokedynamic() || cur.is_invokehandle()) {
 435       // Method handle invokes may involve fairly arbitrary chains of
 436       // calls so it's impossible to know how much actual space the
 437       // caller has for locals.
 438       caller_was_method_handle = true;
 439     }
 440   }
 441 
 442   //
 443   // frame_sizes/frame_pcs[0] oldest frame (int or c2i)
 444   // frame_sizes/frame_pcs[1] next oldest frame (int)
 445   // frame_sizes/frame_pcs[n] youngest frame (int)
 446   //
 447   // Now a pc in frame_pcs is actually the return address to the frame's caller (a frame
 448   // owns the space for the return address to it's caller).  Confusing ain't it.
 449   //
 450   // The vframe array can address vframes with indices running from
 451   // 0.._frames-1. Index  0 is the youngest frame and _frame - 1 is the oldest (root) frame.
 452   // When we create the skeletal frames we need the oldest frame to be in the zero slot
 453   // in the frame_sizes/frame_pcs so the assembly code can do a trivial walk.
 454   // so things look a little strange in this loop.
 455   //
 456   int callee_parameters = 0;
 457   int callee_locals = 0;
 458   for (int index = 0; index < array->frames(); index++ ) {
 459     // frame[number_of_frames - 1 ] = on_stack_size(youngest)
 460     // frame[number_of_frames - 2 ] = on_stack_size(sender(youngest))
 461     // frame[number_of_frames - 3 ] = on_stack_size(sender(sender(youngest)))
 462     frame_sizes[number_of_frames - 1 - index] = BytesPerWord * array->element(index)->on_stack_size(callee_parameters,
 463                                                                                                     callee_locals,
 464                                                                                                     index == 0,
 465                                                                                                     popframe_extra_args);
 466     // This pc doesn't have to be perfect just good enough to identify the frame
 467     // as interpreted so the skeleton frame will be walkable
 468     // The correct pc will be set when the skeleton frame is completely filled out
 469     // The final pc we store in the loop is wrong and will be overwritten below
 470     frame_pcs[number_of_frames - 1 - index ] = Interpreter::deopt_entry(vtos, 0) - frame::pc_return_offset;
 471 
 472     callee_parameters = array->element(index)->method()->size_of_parameters();
 473     callee_locals = array->element(index)->method()->max_locals();
 474     popframe_extra_args = 0;
 475   }
 476 
 477   // Compute whether the root vframe returns a float or double value.
 478   BasicType return_type;
 479   {
 480     methodHandle method(thread, array->element(0)->method());
 481     Bytecode_invoke invoke = Bytecode_invoke_check(method, array->element(0)->bci());
 482     return_type = invoke.is_valid() ? invoke.result_type() : T_ILLEGAL;
 483   }
 484 
 485   // Compute information for handling adapters and adjusting the frame size of the caller.
 486   int caller_adjustment = 0;
 487 
 488   // Compute the amount the oldest interpreter frame will have to adjust
 489   // its caller's stack by. If the caller is a compiled frame then
 490   // we pretend that the callee has no parameters so that the
 491   // extension counts for the full amount of locals and not just
 492   // locals-parms. This is because without a c2i adapter the parm
 493   // area as created by the compiled frame will not be usable by
 494   // the interpreter. (Depending on the calling convention there
 495   // may not even be enough space).
 496 
 497   // QQQ I'd rather see this pushed down into last_frame_adjust
 498   // and have it take the sender (aka caller).
 499 
 500   if (deopt_sender.is_compiled_frame() || caller_was_method_handle) {
 501     caller_adjustment = last_frame_adjust(0, callee_locals);
 502   } else if (callee_locals > callee_parameters) {
 503     // The caller frame may need extending to accommodate
 504     // non-parameter locals of the first unpacked interpreted frame.
 505     // Compute that adjustment.
 506     caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
 507   }
 508 
 509   // If the sender is deoptimized we must retrieve the address of the handler
 510   // since the frame will "magically" show the original pc before the deopt
 511   // and we'd undo the deopt.
 512 
 513   frame_pcs[0] = deopt_sender.raw_pc();
 514 
 515   assert(CodeCache::find_blob_unsafe(frame_pcs[0]) != NULL, "bad pc");
 516 
 517 #if INCLUDE_JVMCI
 518   if (exceptionObject() != NULL) {
 519     thread->set_exception_oop(exceptionObject());
 520     exec_mode = Unpack_exception;
 521   }
 522 #endif
 523 
 524   if (thread->frames_to_pop_failed_realloc() > 0 && exec_mode != Unpack_uncommon_trap) {
 525     assert(thread->has_pending_exception(), "should have thrown OOME");
 526     thread->set_exception_oop(thread->pending_exception());
 527     thread->clear_pending_exception();
 528     exec_mode = Unpack_exception;
 529   }
 530 
 531 #if INCLUDE_JVMCI
 532   if (thread->frames_to_pop_failed_realloc() > 0) {
 533     thread->set_pending_monitorenter(false);
 534   }
 535 #endif
 536 
 537   UnrollBlock* info = new UnrollBlock(array->frame_size() * BytesPerWord,
 538                                       caller_adjustment * BytesPerWord,
 539                                       caller_was_method_handle ? 0 : callee_parameters,
 540                                       number_of_frames,
 541                                       frame_sizes,
 542                                       frame_pcs,
 543                                       return_type,
 544                                       exec_mode);
 545   // On some platforms, we need a way to pass some platform dependent
 546   // information to the unpacking code so the skeletal frames come out
 547   // correct (initial fp value, unextended sp, ...)
 548   info->set_initial_info((intptr_t) array->sender().initial_deoptimization_info());
 549 
 550   if (array->frames() > 1) {
 551     if (VerifyStack && TraceDeoptimization) {
 552       ttyLocker ttyl;
 553       tty->print_cr("Deoptimizing method containing inlining");
 554     }
 555   }
 556 
 557   array->set_unroll_block(info);
 558   return info;
 559 }
 560 
 561 // Called to cleanup deoptimization data structures in normal case
 562 // after unpacking to stack and when stack overflow error occurs
 563 void Deoptimization::cleanup_deopt_info(JavaThread *thread,
 564                                         vframeArray *array) {
 565 
 566   // Get array if coming from exception
 567   if (array == NULL) {
 568     array = thread->vframe_array_head();
 569   }
 570   thread->set_vframe_array_head(NULL);
 571 
 572   // Free the previous UnrollBlock
 573   vframeArray* old_array = thread->vframe_array_last();
 574   thread->set_vframe_array_last(array);
 575 
 576   if (old_array != NULL) {
 577     UnrollBlock* old_info = old_array->unroll_block();
 578     old_array->set_unroll_block(NULL);
 579     delete old_info;
 580     delete old_array;
 581   }
 582 
 583   // Deallocate any resource creating in this routine and any ResourceObjs allocated
 584   // inside the vframeArray (StackValueCollections)
 585 
 586   delete thread->deopt_mark();
 587   thread->set_deopt_mark(NULL);
 588   thread->set_deopt_compiled_method(NULL);
 589 
 590 
 591   if (JvmtiExport::can_pop_frame()) {
 592 #ifndef CC_INTERP
 593     // Regardless of whether we entered this routine with the pending
 594     // popframe condition bit set, we should always clear it now
 595     thread->clear_popframe_condition();
 596 #else
 597     // C++ interpreter will clear has_pending_popframe when it enters
 598     // with method_resume. For deopt_resume2 we clear it now.
 599     if (thread->popframe_forcing_deopt_reexecution())
 600         thread->clear_popframe_condition();
 601 #endif /* CC_INTERP */
 602   }
 603 
 604   // unpack_frames() is called at the end of the deoptimization handler
 605   // and (in C2) at the end of the uncommon trap handler. Note this fact
 606   // so that an asynchronous stack walker can work again. This counter is
 607   // incremented at the beginning of fetch_unroll_info() and (in C2) at
 608   // the beginning of uncommon_trap().
 609   thread->dec_in_deopt_handler();
 610 }
 611 
 612 // Moved from cpu directories because none of the cpus has callee save values.
 613 // If a cpu implements callee save values, move this to deoptimization_<cpu>.cpp.
 614 void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) {
 615 
 616   // This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in
 617   // the days we had adapter frames. When we deoptimize a situation where a
 618   // compiled caller calls a compiled caller will have registers it expects
 619   // to survive the call to the callee. If we deoptimize the callee the only
 620   // way we can restore these registers is to have the oldest interpreter
 621   // frame that we create restore these values. That is what this routine
 622   // will accomplish.
 623 
 624   // At the moment we have modified c2 to not have any callee save registers
 625   // so this problem does not exist and this routine is just a place holder.
 626 
 627   assert(f->is_interpreted_frame(), "must be interpreted");
 628 }
 629 
 630 // Return BasicType of value being returned
 631 JRT_LEAF(BasicType, Deoptimization::unpack_frames(JavaThread* thread, int exec_mode))
 632 
 633   // We are already active in the special DeoptResourceMark any ResourceObj's we
 634   // allocate will be freed at the end of the routine.
 635 
 636   // It is actually ok to allocate handles in a leaf method. It causes no safepoints,
 637   // but makes the entry a little slower. There is however a little dance we have to
 638   // do in debug mode to get around the NoHandleMark code in the JRT_LEAF macro
 639   ResetNoHandleMark rnhm; // No-op in release/product versions
 640   HandleMark hm;
 641 
 642   frame stub_frame = thread->last_frame();
 643 
 644   // Since the frame to unpack is the top frame of this thread, the vframe_array_head
 645   // must point to the vframeArray for the unpack frame.
 646   vframeArray* array = thread->vframe_array_head();
 647 
 648 #ifndef PRODUCT
 649   if (TraceDeoptimization) {
 650     ttyLocker ttyl;
 651     tty->print_cr("DEOPT UNPACKING thread " INTPTR_FORMAT " vframeArray " INTPTR_FORMAT " mode %d",
 652                   p2i(thread), p2i(array), exec_mode);
 653   }
 654 #endif
 655   Events::log_deopt_message(thread, "DEOPT UNPACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT " mode %d",
 656               p2i(stub_frame.pc()), p2i(stub_frame.sp()), exec_mode);
 657 
 658   UnrollBlock* info = array->unroll_block();
 659 
 660   // Unpack the interpreter frames and any adapter frame (c2 only) we might create.
 661   array->unpack_to_stack(stub_frame, exec_mode, info->caller_actual_parameters());
 662 
 663   BasicType bt = info->return_type();
 664 
 665   // If we have an exception pending, claim that the return type is an oop
 666   // so the deopt_blob does not overwrite the exception_oop.
 667 
 668   if (exec_mode == Unpack_exception)
 669     bt = T_OBJECT;
 670 
 671   // Cleanup thread deopt data
 672   cleanup_deopt_info(thread, array);
 673 
 674 #ifndef PRODUCT
 675   if (VerifyStack) {
 676     ResourceMark res_mark;
 677     // Clear pending exception to not break verification code (restored afterwards)
 678     PRESERVE_EXCEPTION_MARK;
 679 
 680     thread->validate_frame_layout();
 681 
 682     // Verify that the just-unpacked frames match the interpreter's
 683     // notions of expression stack and locals
 684     vframeArray* cur_array = thread->vframe_array_last();
 685     RegisterMap rm(thread, false);
 686     rm.set_include_argument_oops(false);
 687     bool is_top_frame = true;
 688     int callee_size_of_parameters = 0;
 689     int callee_max_locals = 0;
 690     for (int i = 0; i < cur_array->frames(); i++) {
 691       vframeArrayElement* el = cur_array->element(i);
 692       frame* iframe = el->iframe();
 693       guarantee(iframe->is_interpreted_frame(), "Wrong frame type");
 694 
 695       // Get the oop map for this bci
 696       InterpreterOopMap mask;
 697       int cur_invoke_parameter_size = 0;
 698       bool try_next_mask = false;
 699       int next_mask_expression_stack_size = -1;
 700       int top_frame_expression_stack_adjustment = 0;
 701       methodHandle mh(thread, iframe->interpreter_frame_method());
 702       OopMapCache::compute_one_oop_map(mh, iframe->interpreter_frame_bci(), &mask);
 703       BytecodeStream str(mh, iframe->interpreter_frame_bci());
 704       int max_bci = mh->code_size();
 705       // Get to the next bytecode if possible
 706       assert(str.bci() < max_bci, "bci in interpreter frame out of bounds");
 707       // Check to see if we can grab the number of outgoing arguments
 708       // at an uncommon trap for an invoke (where the compiler
 709       // generates debug info before the invoke has executed)
 710       Bytecodes::Code cur_code = str.next();
 711       if (Bytecodes::is_invoke(cur_code)) {
 712         Bytecode_invoke invoke(mh, iframe->interpreter_frame_bci());
 713         cur_invoke_parameter_size = invoke.size_of_parameters();
 714         if (i != 0 && !invoke.is_invokedynamic() && MethodHandles::has_member_arg(invoke.klass(), invoke.name())) {
 715           callee_size_of_parameters++;
 716         }
 717       }
 718       if (str.bci() < max_bci) {
 719         Bytecodes::Code next_code = str.next();
 720         if (next_code >= 0) {
 721           // The interpreter oop map generator reports results before
 722           // the current bytecode has executed except in the case of
 723           // calls. It seems to be hard to tell whether the compiler
 724           // has emitted debug information matching the "state before"
 725           // a given bytecode or the state after, so we try both
 726           if (!Bytecodes::is_invoke(cur_code) && cur_code != Bytecodes::_athrow) {
 727             // Get expression stack size for the next bytecode
 728             InterpreterOopMap next_mask;
 729             OopMapCache::compute_one_oop_map(mh, str.bci(), &next_mask);
 730             next_mask_expression_stack_size = next_mask.expression_stack_size();
 731             if (Bytecodes::is_invoke(next_code)) {
 732               Bytecode_invoke invoke(mh, str.bci());
 733               next_mask_expression_stack_size += invoke.size_of_parameters();
 734             }
 735             // Need to subtract off the size of the result type of
 736             // the bytecode because this is not described in the
 737             // debug info but returned to the interpreter in the TOS
 738             // caching register
 739             BasicType bytecode_result_type = Bytecodes::result_type(cur_code);
 740             if (bytecode_result_type != T_ILLEGAL) {
 741               top_frame_expression_stack_adjustment = type2size[bytecode_result_type];
 742             }
 743             assert(top_frame_expression_stack_adjustment >= 0, "stack adjustment must be positive");
 744             try_next_mask = true;
 745           }
 746         }
 747       }
 748 
 749       // Verify stack depth and oops in frame
 750       // This assertion may be dependent on the platform we're running on and may need modification (tested on x86 and sparc)
 751       if (!(
 752             /* SPARC */
 753             (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + callee_size_of_parameters) ||
 754             /* x86 */
 755             (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + callee_max_locals) ||
 756             (try_next_mask &&
 757              (iframe->interpreter_frame_expression_stack_size() == (next_mask_expression_stack_size -
 758                                                                     top_frame_expression_stack_adjustment))) ||
 759             (is_top_frame && (exec_mode == Unpack_exception) && iframe->interpreter_frame_expression_stack_size() == 0) ||
 760             (is_top_frame && (exec_mode == Unpack_uncommon_trap || exec_mode == Unpack_reexecute || el->should_reexecute()) &&
 761              (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + cur_invoke_parameter_size))
 762             )) {
 763         {
 764           ttyLocker ttyl;
 765 
 766           // Print out some information that will help us debug the problem
 767           tty->print_cr("Wrong number of expression stack elements during deoptimization");
 768           tty->print_cr("  Error occurred while verifying frame %d (0..%d, 0 is topmost)", i, cur_array->frames() - 1);
 769           tty->print_cr("  Fabricated interpreter frame had %d expression stack elements",
 770                         iframe->interpreter_frame_expression_stack_size());
 771           tty->print_cr("  Interpreter oop map had %d expression stack elements", mask.expression_stack_size());
 772           tty->print_cr("  try_next_mask = %d", try_next_mask);
 773           tty->print_cr("  next_mask_expression_stack_size = %d", next_mask_expression_stack_size);
 774           tty->print_cr("  callee_size_of_parameters = %d", callee_size_of_parameters);
 775           tty->print_cr("  callee_max_locals = %d", callee_max_locals);
 776           tty->print_cr("  top_frame_expression_stack_adjustment = %d", top_frame_expression_stack_adjustment);
 777           tty->print_cr("  exec_mode = %d", exec_mode);
 778           tty->print_cr("  cur_invoke_parameter_size = %d", cur_invoke_parameter_size);
 779           tty->print_cr("  Thread = " INTPTR_FORMAT ", thread ID = %d", p2i(thread), thread->osthread()->thread_id());
 780           tty->print_cr("  Interpreted frames:");
 781           for (int k = 0; k < cur_array->frames(); k++) {
 782             vframeArrayElement* el = cur_array->element(k);
 783             tty->print_cr("    %s (bci %d)", el->method()->name_and_sig_as_C_string(), el->bci());
 784           }
 785           cur_array->print_on_2(tty);
 786         } // release tty lock before calling guarantee
 787         guarantee(false, "wrong number of expression stack elements during deopt");
 788       }
 789       VerifyOopClosure verify;
 790       iframe->oops_interpreted_do(&verify, &rm, false);
 791       callee_size_of_parameters = mh->size_of_parameters();
 792       callee_max_locals = mh->max_locals();
 793       is_top_frame = false;
 794     }
 795   }
 796 #endif /* !PRODUCT */
 797 
 798 
 799   return bt;
 800 JRT_END
 801 
 802 
 803 int Deoptimization::deoptimize_dependents() {
 804   Threads::deoptimized_wrt_marked_nmethods();
 805   return 0;
 806 }
 807 
 808 Deoptimization::DeoptAction Deoptimization::_unloaded_action
 809   = Deoptimization::Action_reinterpret;
 810 
 811 
 812 
 813 #if INCLUDE_JVMCI || INCLUDE_AOT
 814 template<typename CacheType>
 815 class BoxCacheBase : public CHeapObj<mtCompiler> {
 816 protected:
 817   static InstanceKlass* find_cache_klass(Symbol* klass_name, TRAPS) {
 818     ResourceMark rm;
 819     char* klass_name_str = klass_name->as_C_string();
 820     Klass* k = SystemDictionary::find(klass_name, Handle(), Handle(), THREAD);
 821     guarantee(k != NULL, "%s must be loaded", klass_name_str);
 822     InstanceKlass* ik = InstanceKlass::cast(k);
 823     guarantee(ik->is_initialized(), "%s must be initialized", klass_name_str);
 824     CacheType::compute_offsets(ik);
 825     return ik;
 826   }
 827 };
 828 
 829 template<typename PrimitiveType, typename CacheType, typename BoxType> class BoxCache  : public BoxCacheBase<CacheType> {
 830   PrimitiveType _low;
 831   PrimitiveType _high;
 832   jobject _cache;
 833 protected:
 834   static BoxCache<PrimitiveType, CacheType, BoxType> *_singleton;
 835   BoxCache(Thread* thread) {
 836     InstanceKlass* ik = BoxCacheBase<CacheType>::find_cache_klass(CacheType::symbol(), thread);
 837     objArrayOop cache = CacheType::cache(ik);
 838     assert(cache->length() > 0, "Empty cache");
 839     _low = BoxType::value(cache->obj_at(0));
 840     _high = _low + cache->length() - 1;
 841     _cache = JNIHandles::make_global(Handle(thread, cache));
 842   }
 843   ~BoxCache() {
 844     JNIHandles::destroy_global(_cache);
 845   }
 846 public:
 847   static BoxCache<PrimitiveType, CacheType, BoxType>* singleton(Thread* thread) {
 848     if (_singleton == NULL) {
 849       BoxCache<PrimitiveType, CacheType, BoxType>* s = new BoxCache<PrimitiveType, CacheType, BoxType>(thread);
 850       if (!Atomic::replace_if_null(s, &_singleton)) {
 851         delete s;
 852       }
 853     }
 854     return _singleton;
 855   }
 856   oop lookup(PrimitiveType value) {
 857     if (_low <= value && value <= _high) {
 858       int offset = value - _low;
 859       return objArrayOop(JNIHandles::resolve_non_null(_cache))->obj_at(offset);
 860     }
 861     return NULL;
 862   }
 863   oop lookup_raw(intptr_t raw_value) {
 864     // Have to cast to avoid little/big-endian problems.
 865     if (sizeof(PrimitiveType) > sizeof(jint)) {
 866       jlong value = (jlong)raw_value;
 867       return lookup(value);
 868     }
 869     PrimitiveType value = (PrimitiveType)*((jint*)&raw_value);
 870     return lookup(value);
 871   }
 872 };
 873 
 874 typedef BoxCache<jint, java_lang_Integer_IntegerCache, java_lang_Integer> IntegerBoxCache;
 875 typedef BoxCache<jlong, java_lang_Long_LongCache, java_lang_Long> LongBoxCache;
 876 typedef BoxCache<jchar, java_lang_Character_CharacterCache, java_lang_Character> CharacterBoxCache;
 877 typedef BoxCache<jshort, java_lang_Short_ShortCache, java_lang_Short> ShortBoxCache;
 878 typedef BoxCache<jbyte, java_lang_Byte_ByteCache, java_lang_Byte> ByteBoxCache;
 879 
 880 template<> BoxCache<jint, java_lang_Integer_IntegerCache, java_lang_Integer>* BoxCache<jint, java_lang_Integer_IntegerCache, java_lang_Integer>::_singleton = NULL;
 881 template<> BoxCache<jlong, java_lang_Long_LongCache, java_lang_Long>* BoxCache<jlong, java_lang_Long_LongCache, java_lang_Long>::_singleton = NULL;
 882 template<> BoxCache<jchar, java_lang_Character_CharacterCache, java_lang_Character>* BoxCache<jchar, java_lang_Character_CharacterCache, java_lang_Character>::_singleton = NULL;
 883 template<> BoxCache<jshort, java_lang_Short_ShortCache, java_lang_Short>* BoxCache<jshort, java_lang_Short_ShortCache, java_lang_Short>::_singleton = NULL;
 884 template<> BoxCache<jbyte, java_lang_Byte_ByteCache, java_lang_Byte>* BoxCache<jbyte, java_lang_Byte_ByteCache, java_lang_Byte>::_singleton = NULL;
 885 
 886 class BooleanBoxCache : public BoxCacheBase<java_lang_Boolean> {
 887   jobject _true_cache;
 888   jobject _false_cache;
 889 protected:
 890   static BooleanBoxCache *_singleton;
 891   BooleanBoxCache(Thread *thread) {
 892     InstanceKlass* ik = find_cache_klass(java_lang_Boolean::symbol(), thread);
 893     _true_cache = JNIHandles::make_global(Handle(thread, java_lang_Boolean::get_TRUE(ik)));
 894     _false_cache = JNIHandles::make_global(Handle(thread, java_lang_Boolean::get_FALSE(ik)));
 895   }
 896   ~BooleanBoxCache() {
 897     JNIHandles::destroy_global(_true_cache);
 898     JNIHandles::destroy_global(_false_cache);
 899   }
 900 public:
 901   static BooleanBoxCache* singleton(Thread* thread) {
 902     if (_singleton == NULL) {
 903       BooleanBoxCache* s = new BooleanBoxCache(thread);
 904       if (!Atomic::replace_if_null(s, &_singleton)) {
 905         delete s;
 906       }
 907     }
 908     return _singleton;
 909   }
 910   oop lookup_raw(intptr_t raw_value) {
 911     // Have to cast to avoid little/big-endian problems.
 912     jboolean value = (jboolean)*((jint*)&raw_value);
 913     return lookup(value);
 914   }
 915   oop lookup(jboolean value) {
 916     if (value != 0) {
 917       return JNIHandles::resolve_non_null(_true_cache);
 918     }
 919     return JNIHandles::resolve_non_null(_false_cache);
 920   }
 921 };
 922 
 923 BooleanBoxCache* BooleanBoxCache::_singleton = NULL;
 924 
 925 oop Deoptimization::get_cached_box(AutoBoxObjectValue* bv, frame* fr, RegisterMap* reg_map, TRAPS) {
 926    Klass* k = java_lang_Class::as_Klass(bv->klass()->as_ConstantOopReadValue()->value()());
 927    BasicType box_type = SystemDictionary::box_klass_type(k);
 928    if (box_type != T_OBJECT) {
 929      StackValue* value = StackValue::create_stack_value(fr, reg_map, bv->field_at(box_type == T_LONG ? 1 : 0));
 930      switch(box_type) {
 931        case T_INT:     return IntegerBoxCache::singleton(THREAD)->lookup_raw(value->get_int());
 932        case T_CHAR:    return CharacterBoxCache::singleton(THREAD)->lookup_raw(value->get_int());
 933        case T_SHORT:   return ShortBoxCache::singleton(THREAD)->lookup_raw(value->get_int());
 934        case T_BYTE:    return ByteBoxCache::singleton(THREAD)->lookup_raw(value->get_int());
 935        case T_BOOLEAN: return BooleanBoxCache::singleton(THREAD)->lookup_raw(value->get_int());
 936        case T_LONG:    return LongBoxCache::singleton(THREAD)->lookup_raw(value->get_int());
 937        default:;
 938      }
 939    }
 940    return NULL;
 941 }
 942 #endif // INCLUDE_JVMCI || INCLUDE_AOT
 943 
 944 #if COMPILER2_OR_JVMCI
 945 bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, TRAPS) {
 946   Handle pending_exception(THREAD, thread->pending_exception());
 947   const char* exception_file = thread->exception_file();
 948   int exception_line = thread->exception_line();
 949   thread->clear_pending_exception();
 950 
 951   bool failures = false;
 952 
 953   for (int i = 0; i < objects->length(); i++) {
 954     assert(objects->at(i)->is_object(), "invalid debug information");
 955     ObjectValue* sv = (ObjectValue*) objects->at(i);
 956 
 957     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
 958     oop obj = NULL;
 959 
 960     if (k->is_instance_klass()) {
 961 #if INCLUDE_JVMCI || INCLUDE_AOT
 962       CompiledMethod* cm = fr->cb()->as_compiled_method_or_null();
 963       if (cm->is_compiled_by_jvmci() && sv->is_auto_box()) {
 964         AutoBoxObjectValue* abv = (AutoBoxObjectValue*) sv;
 965         obj = get_cached_box(abv, fr, reg_map, THREAD);
 966         if (obj != NULL) {
 967           // Set the flag to indicate the box came from a cache, so that we can skip the field reassignment for it.
 968           abv->set_cached(true);
 969         }
 970       }
 971 #endif // INCLUDE_JVMCI || INCLUDE_AOT
 972       InstanceKlass* ik = InstanceKlass::cast(k);
 973       if (obj == NULL) {
 974         obj = ik->allocate_instance(THREAD);
 975       }
 976     } else if (k->is_valueArray_klass()) {
 977       ValueArrayKlass* ak = ValueArrayKlass::cast(k);
 978       // Value type array must be zeroed because not all memory is reassigned
 979       obj = ak->allocate(sv->field_size(), THREAD);
 980     } else if (k->is_typeArray_klass()) {
 981       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
 982       assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
 983       int len = sv->field_size() / type2size[ak->element_type()];
 984       obj = ak->allocate(len, THREAD);
 985     } else if (k->is_objArray_klass()) {
 986       ObjArrayKlass* ak = ObjArrayKlass::cast(k);
 987       obj = ak->allocate(sv->field_size(), THREAD);
 988     }
 989 
 990     if (obj == NULL) {
 991       failures = true;
 992     }
 993 
 994     assert(sv->value().is_null(), "redundant reallocation");
 995     assert(obj != NULL || HAS_PENDING_EXCEPTION, "allocation should succeed or we should get an exception");
 996     CLEAR_PENDING_EXCEPTION;
 997     sv->set_value(obj);
 998   }
 999 
1000   if (failures) {
1001     THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures);
1002   } else if (pending_exception.not_null()) {
1003     thread->set_pending_exception(pending_exception(), exception_file, exception_line);
1004   }
1005 
1006   return failures;
1007 }
1008 
1009 // We're deoptimizing at the return of a call, value type fields are
1010 // in registers. When we go back to the interpreter, it will expect a
1011 // reference to a value type instance. Allocate and initialize it from
1012 // the register values here.
1013 bool Deoptimization::realloc_value_type_result(ValueKlass* vk, const RegisterMap& map, GrowableArray<Handle>& return_oops, TRAPS) {
1014   oop new_vt = vk->realloc_result(map, return_oops, THREAD);
1015   if (new_vt == NULL) {
1016     CLEAR_PENDING_EXCEPTION;
1017     THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), true);
1018   }
1019   return_oops.clear();
1020   return_oops.push(Handle(THREAD, new_vt));
1021   return false;
1022 }
1023 
1024 // restore elements of an eliminated type array
1025 void Deoptimization::reassign_type_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, typeArrayOop obj, BasicType type) {
1026   int index = 0;
1027   intptr_t val;
1028 
1029   for (int i = 0; i < sv->field_size(); i++) {
1030     StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
1031     switch(type) {
1032     case T_LONG: case T_DOUBLE: {
1033       assert(value->type() == T_INT, "Agreement.");
1034       StackValue* low =
1035         StackValue::create_stack_value(fr, reg_map, sv->field_at(++i));
1036 #ifdef _LP64
1037       jlong res = (jlong)low->get_int();
1038 #else
1039 #ifdef SPARC
1040       // For SPARC we have to swap high and low words.
1041       jlong res = jlong_from((jint)low->get_int(), (jint)value->get_int());
1042 #else
1043       jlong res = jlong_from((jint)value->get_int(), (jint)low->get_int());
1044 #endif //SPARC
1045 #endif
1046       obj->long_at_put(index, res);
1047       break;
1048     }
1049 
1050     // Have to cast to INT (32 bits) pointer to avoid little/big-endian problem.
1051     case T_INT: case T_FLOAT: { // 4 bytes.
1052       assert(value->type() == T_INT, "Agreement.");
1053       bool big_value = false;
1054       if (i + 1 < sv->field_size() && type == T_INT) {
1055         if (sv->field_at(i)->is_location()) {
1056           Location::Type type = ((LocationValue*) sv->field_at(i))->location().type();
1057           if (type == Location::dbl || type == Location::lng) {
1058             big_value = true;
1059           }
1060         } else if (sv->field_at(i)->is_constant_int()) {
1061           ScopeValue* next_scope_field = sv->field_at(i + 1);
1062           if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) {
1063             big_value = true;
1064           }
1065         }
1066       }
1067 
1068       if (big_value) {
1069         StackValue* low = StackValue::create_stack_value(fr, reg_map, sv->field_at(++i));
1070   #ifdef _LP64
1071         jlong res = (jlong)low->get_int();
1072   #else
1073   #ifdef SPARC
1074         // For SPARC we have to swap high and low words.
1075         jlong res = jlong_from((jint)low->get_int(), (jint)value->get_int());
1076   #else
1077         jlong res = jlong_from((jint)value->get_int(), (jint)low->get_int());
1078   #endif //SPARC
1079   #endif
1080         obj->int_at_put(index, (jint)*((jint*)&res));
1081         obj->int_at_put(++index, (jint)*(((jint*)&res) + 1));
1082       } else {
1083         val = value->get_int();
1084         obj->int_at_put(index, (jint)*((jint*)&val));
1085       }
1086       break;
1087     }
1088 
1089     case T_SHORT:
1090       assert(value->type() == T_INT, "Agreement.");
1091       val = value->get_int();
1092       obj->short_at_put(index, (jshort)*((jint*)&val));
1093       break;
1094 
1095     case T_CHAR:
1096       assert(value->type() == T_INT, "Agreement.");
1097       val = value->get_int();
1098       obj->char_at_put(index, (jchar)*((jint*)&val));
1099       break;
1100 
1101     case T_BYTE:
1102       assert(value->type() == T_INT, "Agreement.");
1103       val = value->get_int();
1104       obj->byte_at_put(index, (jbyte)*((jint*)&val));
1105       break;
1106 
1107     case T_BOOLEAN:
1108       assert(value->type() == T_INT, "Agreement.");
1109       val = value->get_int();
1110       obj->bool_at_put(index, (jboolean)*((jint*)&val));
1111       break;
1112 
1113       default:
1114         ShouldNotReachHere();
1115     }
1116     index++;
1117   }
1118 }
1119 
1120 
1121 // restore fields of an eliminated object array
1122 void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) {
1123   for (int i = 0; i < sv->field_size(); i++) {
1124     StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
1125     assert(value->type() == T_OBJECT, "object element expected");
1126     obj->obj_at_put(i, value->get_obj()());
1127   }
1128 }
1129 
1130 class ReassignedField {
1131 public:
1132   int _offset;
1133   BasicType _type;
1134   InstanceKlass* _klass;
1135 public:
1136   ReassignedField() {
1137     _offset = 0;
1138     _type = T_ILLEGAL;
1139     _klass = NULL;
1140   }
1141 };
1142 
1143 int compare(ReassignedField* left, ReassignedField* right) {
1144   return left->_offset - right->_offset;
1145 }
1146 
1147 // Restore fields of an eliminated instance object using the same field order
1148 // returned by HotSpotResolvedObjectTypeImpl.getInstanceFields(true)
1149 static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool skip_internal, int base_offset, TRAPS) {
1150   if (klass->superklass() != NULL) {
1151     svIndex = reassign_fields_by_klass(klass->superklass(), fr, reg_map, sv, svIndex, obj, skip_internal, 0, CHECK_0);
1152   }
1153 
1154   GrowableArray<ReassignedField>* fields = new GrowableArray<ReassignedField>();
1155   for (AllFieldStream fs(klass); !fs.done(); fs.next()) {
1156     if (!fs.access_flags().is_static() && (!skip_internal || !fs.access_flags().is_internal())) {
1157       ReassignedField field;
1158       field._offset = fs.offset();
1159       field._type = FieldType::basic_type(fs.signature());
1160       if (field._type == T_VALUETYPE) {
1161         field._type = T_OBJECT;
1162       }
1163       if (fs.is_flattened()) {
1164         // Resolve klass of flattened value type field
1165         Klass* vk = klass->get_value_field_klass(fs.index());
1166         field._klass = ValueKlass::cast(vk);
1167         field._type = T_VALUETYPE;
1168       }
1169       fields->append(field);
1170     }
1171   }
1172   fields->sort(compare);
1173   for (int i = 0; i < fields->length(); i++) {
1174     intptr_t val;
1175     ScopeValue* scope_field = sv->field_at(svIndex);
1176     StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field);
1177     int offset = base_offset + fields->at(i)._offset;
1178     BasicType type = fields->at(i)._type;
1179     switch (type) {
1180       case T_OBJECT:
1181       case T_ARRAY:
1182         assert(value->type() == T_OBJECT, "Agreement.");
1183         obj->obj_field_put(offset, value->get_obj()());
1184         break;
1185 
1186       case T_VALUETYPE: {
1187         // Recursively re-assign flattened value type fields
1188         InstanceKlass* vk = fields->at(i)._klass;
1189         assert(vk != NULL, "must be resolved");
1190         offset -= ValueKlass::cast(vk)->first_field_offset(); // Adjust offset to omit oop header
1191         svIndex = reassign_fields_by_klass(vk, fr, reg_map, sv, svIndex, obj, skip_internal, offset, CHECK_0);
1192         continue; // Continue because we don't need to increment svIndex
1193       }
1194 
1195       // Have to cast to INT (32 bits) pointer to avoid little/big-endian problem.
1196       case T_INT: case T_FLOAT: { // 4 bytes.
1197         assert(value->type() == T_INT, "Agreement.");
1198         bool big_value = false;
1199         if (i+1 < fields->length() && fields->at(i+1)._type == T_INT) {
1200           if (scope_field->is_location()) {
1201             Location::Type type = ((LocationValue*) scope_field)->location().type();
1202             if (type == Location::dbl || type == Location::lng) {
1203               big_value = true;
1204             }
1205           }
1206           if (scope_field->is_constant_int()) {
1207             ScopeValue* next_scope_field = sv->field_at(svIndex + 1);
1208             if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) {
1209               big_value = true;
1210             }
1211           }
1212         }
1213 
1214         if (big_value) {
1215           i++;
1216           assert(i < fields->length(), "second T_INT field needed");
1217           assert(fields->at(i)._type == T_INT, "T_INT field needed");
1218         } else {
1219           val = value->get_int();
1220           obj->int_field_put(offset, (jint)*((jint*)&val));
1221           break;
1222         }
1223       }
1224         /* no break */
1225 
1226       case T_LONG: case T_DOUBLE: {
1227         assert(value->type() == T_INT, "Agreement.");
1228         StackValue* low = StackValue::create_stack_value(fr, reg_map, sv->field_at(++svIndex));
1229 #ifdef _LP64
1230         jlong res = (jlong)low->get_int();
1231 #else
1232 #ifdef SPARC
1233         // For SPARC we have to swap high and low words.
1234         jlong res = jlong_from((jint)low->get_int(), (jint)value->get_int());
1235 #else
1236         jlong res = jlong_from((jint)value->get_int(), (jint)low->get_int());
1237 #endif //SPARC
1238 #endif
1239         obj->long_field_put(offset, res);
1240         break;
1241       }
1242 
1243       case T_SHORT:
1244         assert(value->type() == T_INT, "Agreement.");
1245         val = value->get_int();
1246         obj->short_field_put(offset, (jshort)*((jint*)&val));
1247         break;
1248 
1249       case T_CHAR:
1250         assert(value->type() == T_INT, "Agreement.");
1251         val = value->get_int();
1252         obj->char_field_put(offset, (jchar)*((jint*)&val));
1253         break;
1254 
1255       case T_BYTE:
1256         assert(value->type() == T_INT, "Agreement.");
1257         val = value->get_int();
1258         obj->byte_field_put(offset, (jbyte)*((jint*)&val));
1259         break;
1260 
1261       case T_BOOLEAN:
1262         assert(value->type() == T_INT, "Agreement.");
1263         val = value->get_int();
1264         obj->bool_field_put(offset, (jboolean)*((jint*)&val));
1265         break;
1266 
1267       default:
1268         ShouldNotReachHere();
1269     }
1270     svIndex++;
1271   }
1272   return svIndex;
1273 }
1274 
1275 // restore fields of an eliminated value type array
1276 void Deoptimization::reassign_value_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, valueArrayOop obj, ValueArrayKlass* vak, TRAPS) {
1277   ValueKlass* vk = vak->element_klass();
1278   assert(vk->flatten_array(), "should only be used for flattened value type arrays");
1279   // Adjust offset to omit oop header
1280   int base_offset = arrayOopDesc::base_offset_in_bytes(T_VALUETYPE) - ValueKlass::cast(vk)->first_field_offset();
1281   // Initialize all elements of the flattened value type array
1282   for (int i = 0; i < sv->field_size(); i++) {
1283     ScopeValue* val = sv->field_at(i);
1284     int offset = base_offset + (i << Klass::layout_helper_log2_element_size(vak->layout_helper()));
1285     reassign_fields_by_klass(vk, fr, reg_map, val->as_ObjectValue(), 0, (oop)obj, false /* skip_internal */, offset, CHECK);
1286   }
1287 }
1288 
1289 // restore fields of all eliminated objects and arrays
1290 void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures, bool skip_internal, TRAPS) {
1291   for (int i = 0; i < objects->length(); i++) {
1292     ObjectValue* sv = (ObjectValue*) objects->at(i);
1293     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1294     Handle obj = sv->value();
1295     assert(obj.not_null() || realloc_failures, "reallocation was missed");
1296     if (PrintDeoptimizationDetails) {
1297       tty->print_cr("reassign fields for object of type %s!", k->name()->as_C_string());
1298     }
1299     if (obj.is_null()) {
1300       continue;
1301     }
1302 #if INCLUDE_JVMCI || INCLUDE_AOT
1303     // Don't reassign fields of boxes that came from a cache. Caches may be in CDS.
1304     if (sv->is_auto_box() && ((AutoBoxObjectValue*) sv)->is_cached()) {
1305       continue;
1306     }
1307 #endif // INCLUDE_JVMCI || INCLUDE_AOT
1308     if (k->is_instance_klass()) {
1309       InstanceKlass* ik = InstanceKlass::cast(k);
1310       reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), skip_internal, 0, CHECK);
1311     } else if (k->is_valueArray_klass()) {
1312       ValueArrayKlass* vak = ValueArrayKlass::cast(k);
1313       reassign_value_array_elements(fr, reg_map, sv, (valueArrayOop) obj(), vak, CHECK);
1314     } else if (k->is_typeArray_klass()) {
1315       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1316       reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
1317     } else if (k->is_objArray_klass()) {
1318       reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
1319     }
1320   }
1321 }
1322 
1323 
1324 // relock objects for which synchronization was eliminated
1325 void Deoptimization::relock_objects(GrowableArray<MonitorInfo*>* monitors, JavaThread* thread, bool realloc_failures) {
1326   for (int i = 0; i < monitors->length(); i++) {
1327     MonitorInfo* mon_info = monitors->at(i);
1328     if (mon_info->eliminated()) {
1329       assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed");
1330       if (!mon_info->owner_is_scalar_replaced()) {
1331         Handle obj(thread, mon_info->owner());
1332         markWord mark = obj->mark();
1333         if (UseBiasedLocking && mark.has_bias_pattern()) {
1334           // New allocated objects may have the mark set to anonymously biased.
1335           // Also the deoptimized method may called methods with synchronization
1336           // where the thread-local object is bias locked to the current thread.
1337           assert(mark.is_biased_anonymously() ||
1338                  mark.biased_locker() == thread, "should be locked to current thread");
1339           // Reset mark word to unbiased prototype.
1340           markWord unbiased_prototype = markWord::prototype().set_age(mark.age());
1341           obj->set_mark(unbiased_prototype);
1342         }
1343         BasicLock* lock = mon_info->lock();
1344         ObjectSynchronizer::enter(obj, lock, thread);
1345         assert(mon_info->owner()->is_locked(), "object must be locked now");
1346       }
1347     }
1348   }
1349 }
1350 
1351 
1352 #ifndef PRODUCT
1353 // print information about reallocated objects
1354 void Deoptimization::print_objects(GrowableArray<ScopeValue*>* objects, bool realloc_failures) {
1355   fieldDescriptor fd;
1356 
1357   for (int i = 0; i < objects->length(); i++) {
1358     ObjectValue* sv = (ObjectValue*) objects->at(i);
1359     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1360     Handle obj = sv->value();
1361 
1362     tty->print("     object <" INTPTR_FORMAT "> of type ", p2i(sv->value()()));
1363     k->print_value();
1364     assert(obj.not_null() || realloc_failures, "reallocation was missed");
1365     if (obj.is_null()) {
1366       tty->print(" allocation failed");
1367     } else {
1368       tty->print(" allocated (%d bytes)", obj->size() * HeapWordSize);
1369     }
1370     tty->cr();
1371 
1372     if (Verbose && !obj.is_null()) {
1373       k->oop_print_on(obj(), tty);
1374     }
1375   }
1376 }
1377 #endif
1378 #endif // COMPILER2_OR_JVMCI
1379 
1380 vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures) {
1381   Events::log_deopt_message(thread, "DEOPT PACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT, p2i(fr.pc()), p2i(fr.sp()));
1382 
1383 #ifndef PRODUCT
1384   if (PrintDeoptimizationDetails) {
1385     ttyLocker ttyl;
1386     tty->print("DEOPT PACKING thread " INTPTR_FORMAT " ", p2i(thread));
1387     fr.print_on(tty);
1388     tty->print_cr("     Virtual frames (innermost first):");
1389     for (int index = 0; index < chunk->length(); index++) {
1390       compiledVFrame* vf = chunk->at(index);
1391       tty->print("       %2d - ", index);
1392       vf->print_value();
1393       int bci = chunk->at(index)->raw_bci();
1394       const char* code_name;
1395       if (bci == SynchronizationEntryBCI) {
1396         code_name = "sync entry";
1397       } else {
1398         Bytecodes::Code code = vf->method()->code_at(bci);
1399         code_name = Bytecodes::name(code);
1400       }
1401       tty->print(" - %s", code_name);
1402       tty->print_cr(" @ bci %d ", bci);
1403       if (Verbose) {
1404         vf->print();
1405         tty->cr();
1406       }
1407     }
1408   }
1409 #endif
1410 
1411   // Register map for next frame (used for stack crawl).  We capture
1412   // the state of the deopt'ing frame's caller.  Thus if we need to
1413   // stuff a C2I adapter we can properly fill in the callee-save
1414   // register locations.
1415   frame caller = fr.sender(reg_map);
1416   int frame_size = caller.sp() - fr.sp();
1417 
1418   frame sender = caller;
1419 
1420   // Since the Java thread being deoptimized will eventually adjust it's own stack,
1421   // the vframeArray containing the unpacking information is allocated in the C heap.
1422   // For Compiler1, the caller of the deoptimized frame is saved for use by unpack_frames().
1423   vframeArray* array = vframeArray::allocate(thread, frame_size, chunk, reg_map, sender, caller, fr, realloc_failures);
1424 
1425   // Compare the vframeArray to the collected vframes
1426   assert(array->structural_compare(thread, chunk), "just checking");
1427 
1428 #ifndef PRODUCT
1429   if (PrintDeoptimizationDetails) {
1430     ttyLocker ttyl;
1431     tty->print_cr("     Created vframeArray " INTPTR_FORMAT, p2i(array));
1432   }
1433 #endif // PRODUCT
1434 
1435   return array;
1436 }
1437 
1438 #if COMPILER2_OR_JVMCI
1439 void Deoptimization::pop_frames_failed_reallocs(JavaThread* thread, vframeArray* array) {
1440   // Reallocation of some scalar replaced objects failed. Record
1441   // that we need to pop all the interpreter frames for the
1442   // deoptimized compiled frame.
1443   assert(thread->frames_to_pop_failed_realloc() == 0, "missed frames to pop?");
1444   thread->set_frames_to_pop_failed_realloc(array->frames());
1445   // Unlock all monitors here otherwise the interpreter will see a
1446   // mix of locked and unlocked monitors (because of failed
1447   // reallocations of synchronized objects) and be confused.
1448   for (int i = 0; i < array->frames(); i++) {
1449     MonitorChunk* monitors = array->element(i)->monitors();
1450     if (monitors != NULL) {
1451       for (int j = 0; j < monitors->number_of_monitors(); j++) {
1452         BasicObjectLock* src = monitors->at(j);
1453         if (src->obj() != NULL) {
1454           ObjectSynchronizer::exit(src->obj(), src->lock(), thread);
1455         }
1456       }
1457       array->element(i)->free_monitors(thread);
1458 #ifdef ASSERT
1459       array->element(i)->set_removed_monitors();
1460 #endif
1461     }
1462   }
1463 }
1464 #endif
1465 
1466 static void collect_monitors(compiledVFrame* cvf, GrowableArray<Handle>* objects_to_revoke) {
1467   GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
1468   Thread* thread = Thread::current();
1469   for (int i = 0; i < monitors->length(); i++) {
1470     MonitorInfo* mon_info = monitors->at(i);
1471     if (!mon_info->eliminated() && mon_info->owner() != NULL) {
1472       objects_to_revoke->append(Handle(thread, mon_info->owner()));
1473     }
1474   }
1475 }
1476 
1477 
1478 void Deoptimization::revoke_biases_of_monitors(JavaThread* thread, frame fr, RegisterMap* map) {
1479   if (!UseBiasedLocking) {
1480     return;
1481   }
1482 
1483   GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>();
1484 
1485   // Unfortunately we don't have a RegisterMap available in most of
1486   // the places we want to call this routine so we need to walk the
1487   // stack again to update the register map.
1488   if (map == NULL || !map->update_map()) {
1489     StackFrameStream sfs(thread, true);
1490     bool found = false;
1491     while (!found && !sfs.is_done()) {
1492       frame* cur = sfs.current();
1493       sfs.next();
1494       found = cur->id() == fr.id();
1495     }
1496     assert(found, "frame to be deoptimized not found on target thread's stack");
1497     map = sfs.register_map();
1498   }
1499 
1500   vframe* vf = vframe::new_vframe(&fr, map, thread);
1501   compiledVFrame* cvf = compiledVFrame::cast(vf);
1502   // Revoke monitors' biases in all scopes
1503   while (!cvf->is_top()) {
1504     collect_monitors(cvf, objects_to_revoke);
1505     cvf = compiledVFrame::cast(cvf->sender());
1506   }
1507   collect_monitors(cvf, objects_to_revoke);
1508 
1509   if (SafepointSynchronize::is_at_safepoint()) {
1510     BiasedLocking::revoke_at_safepoint(objects_to_revoke);
1511   } else {
1512     BiasedLocking::revoke(objects_to_revoke, thread);
1513   }
1514 }
1515 
1516 
1517 void Deoptimization::deoptimize_single_frame(JavaThread* thread, frame fr, Deoptimization::DeoptReason reason) {
1518   assert(fr.can_be_deoptimized(), "checking frame type");
1519 
1520   gather_statistics(reason, Action_none, Bytecodes::_illegal);
1521 
1522   if (LogCompilation && xtty != NULL) {
1523     CompiledMethod* cm = fr.cb()->as_compiled_method_or_null();
1524     assert(cm != NULL, "only compiled methods can deopt");
1525 
1526     ttyLocker ttyl;
1527     xtty->begin_head("deoptimized thread='" UINTX_FORMAT "' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc()));
1528     cm->log_identity(xtty);
1529     xtty->end_head();
1530     for (ScopeDesc* sd = cm->scope_desc_at(fr.pc()); ; sd = sd->sender()) {
1531       xtty->begin_elem("jvms bci='%d'", sd->bci());
1532       xtty->method(sd->method());
1533       xtty->end_elem();
1534       if (sd->is_top())  break;
1535     }
1536     xtty->tail("deoptimized");
1537   }
1538 
1539   // Patch the compiled method so that when execution returns to it we will
1540   // deopt the execution state and return to the interpreter.
1541   fr.deoptimize(thread);
1542 }
1543 
1544 void Deoptimization::deoptimize(JavaThread* thread, frame fr, RegisterMap *map) {
1545   deoptimize(thread, fr, map, Reason_constraint);
1546 }
1547 
1548 void Deoptimization::deoptimize(JavaThread* thread, frame fr, RegisterMap *map, DeoptReason reason) {
1549   // Deoptimize only if the frame comes from compile code.
1550   // Do not deoptimize the frame which is already patched
1551   // during the execution of the loops below.
1552   if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
1553     return;
1554   }
1555   ResourceMark rm;
1556   DeoptimizationMarker dm;
1557   if (UseBiasedLocking) {
1558     revoke_biases_of_monitors(thread, fr, map);
1559   }
1560   deoptimize_single_frame(thread, fr, reason);
1561 
1562 }
1563 
1564 #if INCLUDE_JVMCI
1565 address Deoptimization::deoptimize_for_missing_exception_handler(CompiledMethod* cm) {
1566   // there is no exception handler for this pc => deoptimize
1567   cm->make_not_entrant();
1568 
1569   // Use Deoptimization::deoptimize for all of its side-effects:
1570   // revoking biases of monitors, gathering traps statistics, logging...
1571   // it also patches the return pc but we do not care about that
1572   // since we return a continuation to the deopt_blob below.
1573   JavaThread* thread = JavaThread::current();
1574   RegisterMap reg_map(thread, UseBiasedLocking);
1575   frame runtime_frame = thread->last_frame();
1576   frame caller_frame = runtime_frame.sender(&reg_map);
1577   assert(caller_frame.cb()->as_compiled_method_or_null() == cm, "expect top frame compiled method");
1578   Deoptimization::deoptimize(thread, caller_frame, &reg_map, Deoptimization::Reason_not_compiled_exception_handler);
1579 
1580   MethodData* trap_mdo = get_method_data(thread, cm->method(), true);
1581   if (trap_mdo != NULL) {
1582     trap_mdo->inc_trap_count(Deoptimization::Reason_not_compiled_exception_handler);
1583   }
1584 
1585   return SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
1586 }
1587 #endif
1588 
1589 void Deoptimization::deoptimize_frame_internal(JavaThread* thread, intptr_t* id, DeoptReason reason) {
1590   assert(thread == Thread::current() || SafepointSynchronize::is_at_safepoint(),
1591          "can only deoptimize other thread at a safepoint");
1592   // Compute frame and register map based on thread and sp.
1593   RegisterMap reg_map(thread, UseBiasedLocking);
1594   frame fr = thread->last_frame();
1595   while (fr.id() != id) {
1596     fr = fr.sender(&reg_map);
1597   }
1598   deoptimize(thread, fr, &reg_map, reason);
1599 }
1600 
1601 
1602 void Deoptimization::deoptimize_frame(JavaThread* thread, intptr_t* id, DeoptReason reason) {
1603   if (thread == Thread::current()) {
1604     Deoptimization::deoptimize_frame_internal(thread, id, reason);
1605   } else {
1606     VM_DeoptimizeFrame deopt(thread, id, reason);
1607     VMThread::execute(&deopt);
1608   }
1609 }
1610 
1611 void Deoptimization::deoptimize_frame(JavaThread* thread, intptr_t* id) {
1612   deoptimize_frame(thread, id, Reason_constraint);
1613 }
1614 
1615 // JVMTI PopFrame support
1616 JRT_LEAF(void, Deoptimization::popframe_preserve_args(JavaThread* thread, int bytes_to_save, void* start_address))
1617 {
1618   thread->popframe_preserve_args(in_ByteSize(bytes_to_save), start_address);
1619 }
1620 JRT_END
1621 
1622 MethodData*
1623 Deoptimization::get_method_data(JavaThread* thread, const methodHandle& m,
1624                                 bool create_if_missing) {
1625   Thread* THREAD = thread;
1626   MethodData* mdo = m()->method_data();
1627   if (mdo == NULL && create_if_missing && !HAS_PENDING_EXCEPTION) {
1628     // Build an MDO.  Ignore errors like OutOfMemory;
1629     // that simply means we won't have an MDO to update.
1630     Method::build_interpreter_method_data(m, THREAD);
1631     if (HAS_PENDING_EXCEPTION) {
1632       assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here");
1633       CLEAR_PENDING_EXCEPTION;
1634     }
1635     mdo = m()->method_data();
1636   }
1637   return mdo;
1638 }
1639 
1640 #if COMPILER2_OR_JVMCI
1641 void Deoptimization::load_class_by_index(const constantPoolHandle& constant_pool, int index, TRAPS) {
1642   // in case of an unresolved klass entry, load the class.
1643   if (constant_pool->tag_at(index).is_unresolved_klass()) {
1644     Klass* tk = constant_pool->klass_at_ignore_error(index, CHECK);
1645     return;
1646   }
1647 
1648   if (!constant_pool->tag_at(index).is_symbol()) return;
1649 
1650   Handle class_loader (THREAD, constant_pool->pool_holder()->class_loader());
1651   Symbol*  symbol  = constant_pool->symbol_at(index);
1652 
1653   // class name?
1654   if (symbol->char_at(0) != '(') {
1655     Handle protection_domain (THREAD, constant_pool->pool_holder()->protection_domain());
1656     SystemDictionary::resolve_or_null(symbol, class_loader, protection_domain, CHECK);
1657     return;
1658   }
1659 
1660   // then it must be a signature!
1661   ResourceMark rm(THREAD);
1662   for (SignatureStream ss(symbol); !ss.is_done(); ss.next()) {
1663     if (ss.is_object()) {
1664       Symbol* class_name = ss.as_symbol();
1665       Handle protection_domain (THREAD, constant_pool->pool_holder()->protection_domain());
1666       SystemDictionary::resolve_or_null(class_name, class_loader, protection_domain, CHECK);
1667     }
1668   }
1669 }
1670 
1671 
1672 void Deoptimization::load_class_by_index(const constantPoolHandle& constant_pool, int index) {
1673   EXCEPTION_MARK;
1674   load_class_by_index(constant_pool, index, THREAD);
1675   if (HAS_PENDING_EXCEPTION) {
1676     // Exception happened during classloading. We ignore the exception here, since it
1677     // is going to be rethrown since the current activation is going to be deoptimized and
1678     // the interpreter will re-execute the bytecode.
1679     CLEAR_PENDING_EXCEPTION;
1680     // Class loading called java code which may have caused a stack
1681     // overflow. If the exception was thrown right before the return
1682     // to the runtime the stack is no longer guarded. Reguard the
1683     // stack otherwise if we return to the uncommon trap blob and the
1684     // stack bang causes a stack overflow we crash.
1685     assert(THREAD->is_Java_thread(), "only a java thread can be here");
1686     JavaThread* thread = (JavaThread*)THREAD;
1687     bool guard_pages_enabled = thread->stack_guards_enabled();
1688     if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack();
1689     assert(guard_pages_enabled, "stack banging in uncommon trap blob may cause crash");
1690   }
1691 }
1692 
1693 JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* thread, jint trap_request)) {
1694   HandleMark hm;
1695 
1696   // uncommon_trap() is called at the beginning of the uncommon trap
1697   // handler. Note this fact before we start generating temporary frames
1698   // that can confuse an asynchronous stack walker. This counter is
1699   // decremented at the end of unpack_frames().
1700   thread->inc_in_deopt_handler();
1701 
1702   // We need to update the map if we have biased locking.
1703 #if INCLUDE_JVMCI
1704   // JVMCI might need to get an exception from the stack, which in turn requires the register map to be valid
1705   RegisterMap reg_map(thread, true);
1706 #else
1707   RegisterMap reg_map(thread, UseBiasedLocking);
1708 #endif
1709   frame stub_frame = thread->last_frame();
1710   frame fr = stub_frame.sender(&reg_map);
1711   // Make sure the calling nmethod is not getting deoptimized and removed
1712   // before we are done with it.
1713   nmethodLocker nl(fr.pc());
1714 
1715   // Log a message
1716   Events::log_deopt_message(thread, "Uncommon trap: trap_request=" PTR32_FORMAT " fr.pc=" INTPTR_FORMAT " relative=" INTPTR_FORMAT,
1717               trap_request, p2i(fr.pc()), fr.pc() - fr.cb()->code_begin());
1718 
1719   {
1720     ResourceMark rm;
1721 
1722     // Revoke biases of any monitors in the frame to ensure we can migrate them
1723     revoke_biases_of_monitors(thread, fr, &reg_map);
1724 
1725     DeoptReason reason = trap_request_reason(trap_request);
1726     DeoptAction action = trap_request_action(trap_request);
1727 #if INCLUDE_JVMCI
1728     int debug_id = trap_request_debug_id(trap_request);
1729 #endif
1730     jint unloaded_class_index = trap_request_index(trap_request); // CP idx or -1
1731 
1732     vframe*  vf  = vframe::new_vframe(&fr, &reg_map, thread);
1733     compiledVFrame* cvf = compiledVFrame::cast(vf);
1734 
1735     CompiledMethod* nm = cvf->code();
1736 
1737     ScopeDesc*      trap_scope  = cvf->scope();
1738 
1739     if (TraceDeoptimization) {
1740       ttyLocker ttyl;
1741       tty->print_cr("  bci=%d pc=" INTPTR_FORMAT ", relative_pc=" INTPTR_FORMAT ", method=%s" JVMCI_ONLY(", debug_id=%d"), trap_scope->bci(), p2i(fr.pc()), fr.pc() - nm->code_begin(), trap_scope->method()->name_and_sig_as_C_string()
1742 #if INCLUDE_JVMCI
1743           , debug_id
1744 #endif
1745           );
1746     }
1747 
1748     methodHandle    trap_method = trap_scope->method();
1749     int             trap_bci    = trap_scope->bci();
1750 #if INCLUDE_JVMCI
1751     jlong           speculation = thread->pending_failed_speculation();
1752     if (nm->is_compiled_by_jvmci() && nm->is_nmethod()) { // Exclude AOTed methods
1753       nm->as_nmethod()->update_speculation(thread);
1754     } else {
1755       assert(speculation == 0, "There should not be a speculation for methods compiled by non-JVMCI compilers");
1756     }
1757 
1758     if (trap_bci == SynchronizationEntryBCI) {
1759       trap_bci = 0;
1760       thread->set_pending_monitorenter(true);
1761     }
1762 
1763     if (reason == Deoptimization::Reason_transfer_to_interpreter) {
1764       thread->set_pending_transfer_to_interpreter(true);
1765     }
1766 #endif
1767 
1768     Bytecodes::Code trap_bc     = trap_method->java_code_at(trap_bci);
1769     // Record this event in the histogram.
1770     gather_statistics(reason, action, trap_bc);
1771 
1772     // Ensure that we can record deopt. history:
1773     // Need MDO to record RTM code generation state.
1774     bool create_if_missing = ProfileTraps || UseCodeAging RTM_OPT_ONLY( || UseRTMLocking );
1775 
1776     methodHandle profiled_method;
1777 #if INCLUDE_JVMCI
1778     if (nm->is_compiled_by_jvmci()) {
1779       profiled_method = nm->method();
1780     } else {
1781       profiled_method = trap_method;
1782     }
1783 #else
1784     profiled_method = trap_method;
1785 #endif
1786 
1787     MethodData* trap_mdo =
1788       get_method_data(thread, profiled_method, create_if_missing);
1789 
1790     // Log a message
1791     Events::log_deopt_message(thread, "Uncommon trap: reason=%s action=%s pc=" INTPTR_FORMAT " method=%s @ %d %s",
1792                               trap_reason_name(reason), trap_action_name(action), p2i(fr.pc()),
1793                               trap_method->name_and_sig_as_C_string(), trap_bci, nm->compiler_name());
1794 
1795     // Print a bunch of diagnostics, if requested.
1796     if (TraceDeoptimization || LogCompilation) {
1797       ResourceMark rm;
1798       ttyLocker ttyl;
1799       char buf[100];
1800       if (xtty != NULL) {
1801         xtty->begin_head("uncommon_trap thread='" UINTX_FORMAT "' %s",
1802                          os::current_thread_id(),
1803                          format_trap_request(buf, sizeof(buf), trap_request));
1804 #if INCLUDE_JVMCI
1805         if (speculation != 0) {
1806           xtty->print(" speculation='" JLONG_FORMAT "'", speculation);
1807         }
1808 #endif
1809         nm->log_identity(xtty);
1810       }
1811       Symbol* class_name = NULL;
1812       bool unresolved = false;
1813       if (unloaded_class_index >= 0) {
1814         constantPoolHandle constants (THREAD, trap_method->constants());
1815         if (constants->tag_at(unloaded_class_index).is_unresolved_klass()) {
1816           class_name = constants->klass_name_at(unloaded_class_index);
1817           unresolved = true;
1818           if (xtty != NULL)
1819             xtty->print(" unresolved='1'");
1820         } else if (constants->tag_at(unloaded_class_index).is_symbol()) {
1821           class_name = constants->symbol_at(unloaded_class_index);
1822         }
1823         if (xtty != NULL)
1824           xtty->name(class_name);
1825       }
1826       if (xtty != NULL && trap_mdo != NULL && (int)reason < (int)MethodData::_trap_hist_limit) {
1827         // Dump the relevant MDO state.
1828         // This is the deopt count for the current reason, any previous
1829         // reasons or recompiles seen at this point.
1830         int dcnt = trap_mdo->trap_count(reason);
1831         if (dcnt != 0)
1832           xtty->print(" count='%d'", dcnt);
1833         ProfileData* pdata = trap_mdo->bci_to_data(trap_bci);
1834         int dos = (pdata == NULL)? 0: pdata->trap_state();
1835         if (dos != 0) {
1836           xtty->print(" state='%s'", format_trap_state(buf, sizeof(buf), dos));
1837           if (trap_state_is_recompiled(dos)) {
1838             int recnt2 = trap_mdo->overflow_recompile_count();
1839             if (recnt2 != 0)
1840               xtty->print(" recompiles2='%d'", recnt2);
1841           }
1842         }
1843       }
1844       if (xtty != NULL) {
1845         xtty->stamp();
1846         xtty->end_head();
1847       }
1848       if (TraceDeoptimization) {  // make noise on the tty
1849         tty->print("Uncommon trap occurred in");
1850         nm->method()->print_short_name(tty);
1851         tty->print(" compiler=%s compile_id=%d", nm->compiler_name(), nm->compile_id());
1852 #if INCLUDE_JVMCI
1853         if (nm->is_nmethod()) {
1854           const char* installed_code_name = nm->as_nmethod()->jvmci_name();
1855           if (installed_code_name != NULL) {
1856             tty->print(" (JVMCI: installed code name=%s) ", installed_code_name);
1857           }
1858         }
1859 #endif
1860         tty->print(" (@" INTPTR_FORMAT ") thread=" UINTX_FORMAT " reason=%s action=%s unloaded_class_index=%d" JVMCI_ONLY(" debug_id=%d"),
1861                    p2i(fr.pc()),
1862                    os::current_thread_id(),
1863                    trap_reason_name(reason),
1864                    trap_action_name(action),
1865                    unloaded_class_index
1866 #if INCLUDE_JVMCI
1867                    , debug_id
1868 #endif
1869                    );
1870         if (class_name != NULL) {
1871           tty->print(unresolved ? " unresolved class: " : " symbol: ");
1872           class_name->print_symbol_on(tty);
1873         }
1874         tty->cr();
1875       }
1876       if (xtty != NULL) {
1877         // Log the precise location of the trap.
1878         for (ScopeDesc* sd = trap_scope; ; sd = sd->sender()) {
1879           xtty->begin_elem("jvms bci='%d'", sd->bci());
1880           xtty->method(sd->method());
1881           xtty->end_elem();
1882           if (sd->is_top())  break;
1883         }
1884         xtty->tail("uncommon_trap");
1885       }
1886     }
1887     // (End diagnostic printout.)
1888 
1889     // Load class if necessary
1890     if (unloaded_class_index >= 0) {
1891       constantPoolHandle constants(THREAD, trap_method->constants());
1892       load_class_by_index(constants, unloaded_class_index);
1893     }
1894 
1895     // Flush the nmethod if necessary and desirable.
1896     //
1897     // We need to avoid situations where we are re-flushing the nmethod
1898     // because of a hot deoptimization site.  Repeated flushes at the same
1899     // point need to be detected by the compiler and avoided.  If the compiler
1900     // cannot avoid them (or has a bug and "refuses" to avoid them), this
1901     // module must take measures to avoid an infinite cycle of recompilation
1902     // and deoptimization.  There are several such measures:
1903     //
1904     //   1. If a recompilation is ordered a second time at some site X
1905     //   and for the same reason R, the action is adjusted to 'reinterpret',
1906     //   to give the interpreter time to exercise the method more thoroughly.
1907     //   If this happens, the method's overflow_recompile_count is incremented.
1908     //
1909     //   2. If the compiler fails to reduce the deoptimization rate, then
1910     //   the method's overflow_recompile_count will begin to exceed the set
1911     //   limit PerBytecodeRecompilationCutoff.  If this happens, the action
1912     //   is adjusted to 'make_not_compilable', and the method is abandoned
1913     //   to the interpreter.  This is a performance hit for hot methods,
1914     //   but is better than a disastrous infinite cycle of recompilations.
1915     //   (Actually, only the method containing the site X is abandoned.)
1916     //
1917     //   3. In parallel with the previous measures, if the total number of
1918     //   recompilations of a method exceeds the much larger set limit
1919     //   PerMethodRecompilationCutoff, the method is abandoned.
1920     //   This should only happen if the method is very large and has
1921     //   many "lukewarm" deoptimizations.  The code which enforces this
1922     //   limit is elsewhere (class nmethod, class Method).
1923     //
1924     // Note that the per-BCI 'is_recompiled' bit gives the compiler one chance
1925     // to recompile at each bytecode independently of the per-BCI cutoff.
1926     //
1927     // The decision to update code is up to the compiler, and is encoded
1928     // in the Action_xxx code.  If the compiler requests Action_none
1929     // no trap state is changed, no compiled code is changed, and the
1930     // computation suffers along in the interpreter.
1931     //
1932     // The other action codes specify various tactics for decompilation
1933     // and recompilation.  Action_maybe_recompile is the loosest, and
1934     // allows the compiled code to stay around until enough traps are seen,
1935     // and until the compiler gets around to recompiling the trapping method.
1936     //
1937     // The other actions cause immediate removal of the present code.
1938 
1939     // Traps caused by injected profile shouldn't pollute trap counts.
1940     bool injected_profile_trap = trap_method->has_injected_profile() &&
1941                                  (reason == Reason_intrinsic || reason == Reason_unreached);
1942 
1943     bool update_trap_state = (reason != Reason_tenured) && !injected_profile_trap;
1944     bool make_not_entrant = false;
1945     bool make_not_compilable = false;
1946     bool reprofile = false;
1947     switch (action) {
1948     case Action_none:
1949       // Keep the old code.
1950       update_trap_state = false;
1951       break;
1952     case Action_maybe_recompile:
1953       // Do not need to invalidate the present code, but we can
1954       // initiate another
1955       // Start compiler without (necessarily) invalidating the nmethod.
1956       // The system will tolerate the old code, but new code should be
1957       // generated when possible.
1958       break;
1959     case Action_reinterpret:
1960       // Go back into the interpreter for a while, and then consider
1961       // recompiling form scratch.
1962       make_not_entrant = true;
1963       // Reset invocation counter for outer most method.
1964       // This will allow the interpreter to exercise the bytecodes
1965       // for a while before recompiling.
1966       // By contrast, Action_make_not_entrant is immediate.
1967       //
1968       // Note that the compiler will track null_check, null_assert,
1969       // range_check, and class_check events and log them as if they
1970       // had been traps taken from compiled code.  This will update
1971       // the MDO trap history so that the next compilation will
1972       // properly detect hot trap sites.
1973       reprofile = true;
1974       break;
1975     case Action_make_not_entrant:
1976       // Request immediate recompilation, and get rid of the old code.
1977       // Make them not entrant, so next time they are called they get
1978       // recompiled.  Unloaded classes are loaded now so recompile before next
1979       // time they are called.  Same for uninitialized.  The interpreter will
1980       // link the missing class, if any.
1981       make_not_entrant = true;
1982       break;
1983     case Action_make_not_compilable:
1984       // Give up on compiling this method at all.
1985       make_not_entrant = true;
1986       make_not_compilable = true;
1987       break;
1988     default:
1989       ShouldNotReachHere();
1990     }
1991 
1992     // Setting +ProfileTraps fixes the following, on all platforms:
1993     // 4852688: ProfileInterpreter is off by default for ia64.  The result is
1994     // infinite heroic-opt-uncommon-trap/deopt/recompile cycles, since the
1995     // recompile relies on a MethodData* to record heroic opt failures.
1996 
1997     // Whether the interpreter is producing MDO data or not, we also need
1998     // to use the MDO to detect hot deoptimization points and control
1999     // aggressive optimization.
2000     bool inc_recompile_count = false;
2001     ProfileData* pdata = NULL;
2002     if (ProfileTraps && !is_client_compilation_mode_vm() && update_trap_state && trap_mdo != NULL) {
2003       assert(trap_mdo == get_method_data(thread, profiled_method, false), "sanity");
2004       uint this_trap_count = 0;
2005       bool maybe_prior_trap = false;
2006       bool maybe_prior_recompile = false;
2007       pdata = query_update_method_data(trap_mdo, trap_bci, reason, true,
2008 #if INCLUDE_JVMCI
2009                                    nm->is_compiled_by_jvmci() && nm->is_osr_method(),
2010 #endif
2011                                    nm->method(),
2012                                    //outputs:
2013                                    this_trap_count,
2014                                    maybe_prior_trap,
2015                                    maybe_prior_recompile);
2016       // Because the interpreter also counts null, div0, range, and class
2017       // checks, these traps from compiled code are double-counted.
2018       // This is harmless; it just means that the PerXTrapLimit values
2019       // are in effect a little smaller than they look.
2020 
2021       DeoptReason per_bc_reason = reason_recorded_per_bytecode_if_any(reason);
2022       if (per_bc_reason != Reason_none) {
2023         // Now take action based on the partially known per-BCI history.
2024         if (maybe_prior_trap
2025             && this_trap_count >= (uint)PerBytecodeTrapLimit) {
2026           // If there are too many traps at this BCI, force a recompile.
2027           // This will allow the compiler to see the limit overflow, and
2028           // take corrective action, if possible.  The compiler generally
2029           // does not use the exact PerBytecodeTrapLimit value, but instead
2030           // changes its tactics if it sees any traps at all.  This provides
2031           // a little hysteresis, delaying a recompile until a trap happens
2032           // several times.
2033           //
2034           // Actually, since there is only one bit of counter per BCI,
2035           // the possible per-BCI counts are {0,1,(per-method count)}.
2036           // This produces accurate results if in fact there is only
2037           // one hot trap site, but begins to get fuzzy if there are
2038           // many sites.  For example, if there are ten sites each
2039           // trapping two or more times, they each get the blame for
2040           // all of their traps.
2041           make_not_entrant = true;
2042         }
2043 
2044         // Detect repeated recompilation at the same BCI, and enforce a limit.
2045         if (make_not_entrant && maybe_prior_recompile) {
2046           // More than one recompile at this point.
2047           inc_recompile_count = maybe_prior_trap;
2048         }
2049       } else {
2050         // For reasons which are not recorded per-bytecode, we simply
2051         // force recompiles unconditionally.
2052         // (Note that PerMethodRecompilationCutoff is enforced elsewhere.)
2053         make_not_entrant = true;
2054       }
2055 
2056       // Go back to the compiler if there are too many traps in this method.
2057       if (this_trap_count >= per_method_trap_limit(reason)) {
2058         // If there are too many traps in this method, force a recompile.
2059         // This will allow the compiler to see the limit overflow, and
2060         // take corrective action, if possible.
2061         // (This condition is an unlikely backstop only, because the
2062         // PerBytecodeTrapLimit is more likely to take effect first,
2063         // if it is applicable.)
2064         make_not_entrant = true;
2065       }
2066 
2067       // Here's more hysteresis:  If there has been a recompile at
2068       // this trap point already, run the method in the interpreter
2069       // for a while to exercise it more thoroughly.
2070       if (make_not_entrant && maybe_prior_recompile && maybe_prior_trap) {
2071         reprofile = true;
2072       }
2073     }
2074 
2075     // Take requested actions on the method:
2076 
2077     // Recompile
2078     if (make_not_entrant) {
2079       if (!nm->make_not_entrant()) {
2080         return; // the call did not change nmethod's state
2081       }
2082 
2083       if (pdata != NULL) {
2084         // Record the recompilation event, if any.
2085         int tstate0 = pdata->trap_state();
2086         int tstate1 = trap_state_set_recompiled(tstate0, true);
2087         if (tstate1 != tstate0)
2088           pdata->set_trap_state(tstate1);
2089       }
2090 
2091 #if INCLUDE_RTM_OPT
2092       // Restart collecting RTM locking abort statistic if the method
2093       // is recompiled for a reason other than RTM state change.
2094       // Assume that in new recompiled code the statistic could be different,
2095       // for example, due to different inlining.
2096       if ((reason != Reason_rtm_state_change) && (trap_mdo != NULL) &&
2097           UseRTMDeopt && (nm->as_nmethod()->rtm_state() != ProfileRTM)) {
2098         trap_mdo->atomic_set_rtm_state(ProfileRTM);
2099       }
2100 #endif
2101       // For code aging we count traps separately here, using make_not_entrant()
2102       // as a guard against simultaneous deopts in multiple threads.
2103       if (reason == Reason_tenured && trap_mdo != NULL) {
2104         trap_mdo->inc_tenure_traps();
2105       }
2106     }
2107 
2108     if (inc_recompile_count) {
2109       trap_mdo->inc_overflow_recompile_count();
2110       if ((uint)trap_mdo->overflow_recompile_count() >
2111           (uint)PerBytecodeRecompilationCutoff) {
2112         // Give up on the method containing the bad BCI.
2113         if (trap_method() == nm->method()) {
2114           make_not_compilable = true;
2115         } else {
2116           trap_method->set_not_compilable("overflow_recompile_count > PerBytecodeRecompilationCutoff", CompLevel_full_optimization);
2117           // But give grace to the enclosing nm->method().
2118         }
2119       }
2120     }
2121 
2122     // Reprofile
2123     if (reprofile) {
2124       CompilationPolicy::policy()->reprofile(trap_scope, nm->is_osr_method());
2125     }
2126 
2127     // Give up compiling
2128     if (make_not_compilable && !nm->method()->is_not_compilable(CompLevel_full_optimization)) {
2129       assert(make_not_entrant, "consistent");
2130       nm->method()->set_not_compilable("give up compiling", CompLevel_full_optimization);
2131     }
2132 
2133   } // Free marked resources
2134 
2135 }
2136 JRT_END
2137 
2138 ProfileData*
2139 Deoptimization::query_update_method_data(MethodData* trap_mdo,
2140                                          int trap_bci,
2141                                          Deoptimization::DeoptReason reason,
2142                                          bool update_total_trap_count,
2143 #if INCLUDE_JVMCI
2144                                          bool is_osr,
2145 #endif
2146                                          Method* compiled_method,
2147                                          //outputs:
2148                                          uint& ret_this_trap_count,
2149                                          bool& ret_maybe_prior_trap,
2150                                          bool& ret_maybe_prior_recompile) {
2151   bool maybe_prior_trap = false;
2152   bool maybe_prior_recompile = false;
2153   uint this_trap_count = 0;
2154   if (update_total_trap_count) {
2155     uint idx = reason;
2156 #if INCLUDE_JVMCI
2157     if (is_osr) {
2158       idx += Reason_LIMIT;
2159     }
2160 #endif
2161     uint prior_trap_count = trap_mdo->trap_count(idx);
2162     this_trap_count  = trap_mdo->inc_trap_count(idx);
2163 
2164     // If the runtime cannot find a place to store trap history,
2165     // it is estimated based on the general condition of the method.
2166     // If the method has ever been recompiled, or has ever incurred
2167     // a trap with the present reason , then this BCI is assumed
2168     // (pessimistically) to be the culprit.
2169     maybe_prior_trap      = (prior_trap_count != 0);
2170     maybe_prior_recompile = (trap_mdo->decompile_count() != 0);
2171   }
2172   ProfileData* pdata = NULL;
2173 
2174 
2175   // For reasons which are recorded per bytecode, we check per-BCI data.
2176   DeoptReason per_bc_reason = reason_recorded_per_bytecode_if_any(reason);
2177   assert(per_bc_reason != Reason_none || update_total_trap_count, "must be");
2178   if (per_bc_reason != Reason_none) {
2179     // Find the profile data for this BCI.  If there isn't one,
2180     // try to allocate one from the MDO's set of spares.
2181     // This will let us detect a repeated trap at this point.
2182     pdata = trap_mdo->allocate_bci_to_data(trap_bci, reason_is_speculate(reason) ? compiled_method : NULL);
2183 
2184     if (pdata != NULL) {
2185       if (reason_is_speculate(reason) && !pdata->is_SpeculativeTrapData()) {
2186         if (LogCompilation && xtty != NULL) {
2187           ttyLocker ttyl;
2188           // no more room for speculative traps in this MDO
2189           xtty->elem("speculative_traps_oom");
2190         }
2191       }
2192       // Query the trap state of this profile datum.
2193       int tstate0 = pdata->trap_state();
2194       if (!trap_state_has_reason(tstate0, per_bc_reason))
2195         maybe_prior_trap = false;
2196       if (!trap_state_is_recompiled(tstate0))
2197         maybe_prior_recompile = false;
2198 
2199       // Update the trap state of this profile datum.
2200       int tstate1 = tstate0;
2201       // Record the reason.
2202       tstate1 = trap_state_add_reason(tstate1, per_bc_reason);
2203       // Store the updated state on the MDO, for next time.
2204       if (tstate1 != tstate0)
2205         pdata->set_trap_state(tstate1);
2206     } else {
2207       if (LogCompilation && xtty != NULL) {
2208         ttyLocker ttyl;
2209         // Missing MDP?  Leave a small complaint in the log.
2210         xtty->elem("missing_mdp bci='%d'", trap_bci);
2211       }
2212     }
2213   }
2214 
2215   // Return results:
2216   ret_this_trap_count = this_trap_count;
2217   ret_maybe_prior_trap = maybe_prior_trap;
2218   ret_maybe_prior_recompile = maybe_prior_recompile;
2219   return pdata;
2220 }
2221 
2222 void
2223 Deoptimization::update_method_data_from_interpreter(MethodData* trap_mdo, int trap_bci, int reason) {
2224   ResourceMark rm;
2225   // Ignored outputs:
2226   uint ignore_this_trap_count;
2227   bool ignore_maybe_prior_trap;
2228   bool ignore_maybe_prior_recompile;
2229   assert(!reason_is_speculate(reason), "reason speculate only used by compiler");
2230   // JVMCI uses the total counts to determine if deoptimizations are happening too frequently -> do not adjust total counts
2231   bool update_total_counts = true JVMCI_ONLY( && !UseJVMCICompiler);
2232   query_update_method_data(trap_mdo, trap_bci,
2233                            (DeoptReason)reason,
2234                            update_total_counts,
2235 #if INCLUDE_JVMCI
2236                            false,
2237 #endif
2238                            NULL,
2239                            ignore_this_trap_count,
2240                            ignore_maybe_prior_trap,
2241                            ignore_maybe_prior_recompile);
2242 }
2243 
2244 Deoptimization::UnrollBlock* Deoptimization::uncommon_trap(JavaThread* thread, jint trap_request, jint exec_mode) {
2245   if (TraceDeoptimization) {
2246     tty->print("Uncommon trap ");
2247   }
2248   // Still in Java no safepoints
2249   {
2250     // This enters VM and may safepoint
2251     uncommon_trap_inner(thread, trap_request);
2252   }
2253   return fetch_unroll_info_helper(thread, exec_mode);
2254 }
2255 
2256 // Local derived constants.
2257 // Further breakdown of DataLayout::trap_state, as promised by DataLayout.
2258 const int DS_REASON_MASK   = ((uint)DataLayout::trap_mask) >> 1;
2259 const int DS_RECOMPILE_BIT = DataLayout::trap_mask - DS_REASON_MASK;
2260 
2261 //---------------------------trap_state_reason---------------------------------
2262 Deoptimization::DeoptReason
2263 Deoptimization::trap_state_reason(int trap_state) {
2264   // This assert provides the link between the width of DataLayout::trap_bits
2265   // and the encoding of "recorded" reasons.  It ensures there are enough
2266   // bits to store all needed reasons in the per-BCI MDO profile.
2267   assert(DS_REASON_MASK >= Reason_RECORDED_LIMIT, "enough bits");
2268   int recompile_bit = (trap_state & DS_RECOMPILE_BIT);
2269   trap_state -= recompile_bit;
2270   if (trap_state == DS_REASON_MASK) {
2271     return Reason_many;
2272   } else {
2273     assert((int)Reason_none == 0, "state=0 => Reason_none");
2274     return (DeoptReason)trap_state;
2275   }
2276 }
2277 //-------------------------trap_state_has_reason-------------------------------
2278 int Deoptimization::trap_state_has_reason(int trap_state, int reason) {
2279   assert(reason_is_recorded_per_bytecode((DeoptReason)reason), "valid reason");
2280   assert(DS_REASON_MASK >= Reason_RECORDED_LIMIT, "enough bits");
2281   int recompile_bit = (trap_state & DS_RECOMPILE_BIT);
2282   trap_state -= recompile_bit;
2283   if (trap_state == DS_REASON_MASK) {
2284     return -1;  // true, unspecifically (bottom of state lattice)
2285   } else if (trap_state == reason) {
2286     return 1;   // true, definitely
2287   } else if (trap_state == 0) {
2288     return 0;   // false, definitely (top of state lattice)
2289   } else {
2290     return 0;   // false, definitely
2291   }
2292 }
2293 //-------------------------trap_state_add_reason-------------------------------
2294 int Deoptimization::trap_state_add_reason(int trap_state, int reason) {
2295   assert(reason_is_recorded_per_bytecode((DeoptReason)reason) || reason == Reason_many, "valid reason");
2296   int recompile_bit = (trap_state & DS_RECOMPILE_BIT);
2297   trap_state -= recompile_bit;
2298   if (trap_state == DS_REASON_MASK) {
2299     return trap_state + recompile_bit;     // already at state lattice bottom
2300   } else if (trap_state == reason) {
2301     return trap_state + recompile_bit;     // the condition is already true
2302   } else if (trap_state == 0) {
2303     return reason + recompile_bit;          // no condition has yet been true
2304   } else {
2305     return DS_REASON_MASK + recompile_bit;  // fall to state lattice bottom
2306   }
2307 }
2308 //-----------------------trap_state_is_recompiled------------------------------
2309 bool Deoptimization::trap_state_is_recompiled(int trap_state) {
2310   return (trap_state & DS_RECOMPILE_BIT) != 0;
2311 }
2312 //-----------------------trap_state_set_recompiled-----------------------------
2313 int Deoptimization::trap_state_set_recompiled(int trap_state, bool z) {
2314   if (z)  return trap_state |  DS_RECOMPILE_BIT;
2315   else    return trap_state & ~DS_RECOMPILE_BIT;
2316 }
2317 //---------------------------format_trap_state---------------------------------
2318 // This is used for debugging and diagnostics, including LogFile output.
2319 const char* Deoptimization::format_trap_state(char* buf, size_t buflen,
2320                                               int trap_state) {
2321   assert(buflen > 0, "sanity");
2322   DeoptReason reason      = trap_state_reason(trap_state);
2323   bool        recomp_flag = trap_state_is_recompiled(trap_state);
2324   // Re-encode the state from its decoded components.
2325   int decoded_state = 0;
2326   if (reason_is_recorded_per_bytecode(reason) || reason == Reason_many)
2327     decoded_state = trap_state_add_reason(decoded_state, reason);
2328   if (recomp_flag)
2329     decoded_state = trap_state_set_recompiled(decoded_state, recomp_flag);
2330   // If the state re-encodes properly, format it symbolically.
2331   // Because this routine is used for debugging and diagnostics,
2332   // be robust even if the state is a strange value.
2333   size_t len;
2334   if (decoded_state != trap_state) {
2335     // Random buggy state that doesn't decode??
2336     len = jio_snprintf(buf, buflen, "#%d", trap_state);
2337   } else {
2338     len = jio_snprintf(buf, buflen, "%s%s",
2339                        trap_reason_name(reason),
2340                        recomp_flag ? " recompiled" : "");
2341   }
2342   return buf;
2343 }
2344 
2345 
2346 //--------------------------------statics--------------------------------------
2347 const char* Deoptimization::_trap_reason_name[] = {
2348   // Note:  Keep this in sync. with enum DeoptReason.
2349   "none",
2350   "null_check",
2351   "null_assert" JVMCI_ONLY("_or_unreached0"),
2352   "range_check",
2353   "class_check",
2354   "array_check",
2355   "intrinsic" JVMCI_ONLY("_or_type_checked_inlining"),
2356   "bimorphic" JVMCI_ONLY("_or_optimized_type_check"),
2357   "profile_predicate",
2358   "unloaded",
2359   "uninitialized",
2360   "initialized",
2361   "unreached",
2362   "unhandled",
2363   "constraint",
2364   "div0_check",
2365   "age",
2366   "predicate",
2367   "loop_limit_check",
2368   "speculate_class_check",
2369   "speculate_null_check",
2370   "speculate_null_assert",
2371   "rtm_state_change",
2372   "unstable_if",
2373   "unstable_fused_if",
2374 #if INCLUDE_JVMCI
2375   "aliasing",
2376   "transfer_to_interpreter",
2377   "not_compiled_exception_handler",
2378   "unresolved",
2379   "jsr_mismatch",
2380 #endif
2381   "tenured"
2382 };
2383 const char* Deoptimization::_trap_action_name[] = {
2384   // Note:  Keep this in sync. with enum DeoptAction.
2385   "none",
2386   "maybe_recompile",
2387   "reinterpret",
2388   "make_not_entrant",
2389   "make_not_compilable"
2390 };
2391 
2392 const char* Deoptimization::trap_reason_name(int reason) {
2393   // Check that every reason has a name
2394   STATIC_ASSERT(sizeof(_trap_reason_name)/sizeof(const char*) == Reason_LIMIT);
2395 
2396   if (reason == Reason_many)  return "many";
2397   if ((uint)reason < Reason_LIMIT)
2398     return _trap_reason_name[reason];
2399   static char buf[20];
2400   sprintf(buf, "reason%d", reason);
2401   return buf;
2402 }
2403 const char* Deoptimization::trap_action_name(int action) {
2404   // Check that every action has a name
2405   STATIC_ASSERT(sizeof(_trap_action_name)/sizeof(const char*) == Action_LIMIT);
2406 
2407   if ((uint)action < Action_LIMIT)
2408     return _trap_action_name[action];
2409   static char buf[20];
2410   sprintf(buf, "action%d", action);
2411   return buf;
2412 }
2413 
2414 // This is used for debugging and diagnostics, including LogFile output.
2415 const char* Deoptimization::format_trap_request(char* buf, size_t buflen,
2416                                                 int trap_request) {
2417   jint unloaded_class_index = trap_request_index(trap_request);
2418   const char* reason = trap_reason_name(trap_request_reason(trap_request));
2419   const char* action = trap_action_name(trap_request_action(trap_request));
2420 #if INCLUDE_JVMCI
2421   int debug_id = trap_request_debug_id(trap_request);
2422 #endif
2423   size_t len;
2424   if (unloaded_class_index < 0) {
2425     len = jio_snprintf(buf, buflen, "reason='%s' action='%s'" JVMCI_ONLY(" debug_id='%d'"),
2426                        reason, action
2427 #if INCLUDE_JVMCI
2428                        ,debug_id
2429 #endif
2430                        );
2431   } else {
2432     len = jio_snprintf(buf, buflen, "reason='%s' action='%s' index='%d'" JVMCI_ONLY(" debug_id='%d'"),
2433                        reason, action, unloaded_class_index
2434 #if INCLUDE_JVMCI
2435                        ,debug_id
2436 #endif
2437                        );
2438   }
2439   return buf;
2440 }
2441 
2442 juint Deoptimization::_deoptimization_hist
2443         [Deoptimization::Reason_LIMIT]
2444     [1 + Deoptimization::Action_LIMIT]
2445         [Deoptimization::BC_CASE_LIMIT]
2446   = {0};
2447 
2448 enum {
2449   LSB_BITS = 8,
2450   LSB_MASK = right_n_bits(LSB_BITS)
2451 };
2452 
2453 void Deoptimization::gather_statistics(DeoptReason reason, DeoptAction action,
2454                                        Bytecodes::Code bc) {
2455   assert(reason >= 0 && reason < Reason_LIMIT, "oob");
2456   assert(action >= 0 && action < Action_LIMIT, "oob");
2457   _deoptimization_hist[Reason_none][0][0] += 1;  // total
2458   _deoptimization_hist[reason][0][0]      += 1;  // per-reason total
2459   juint* cases = _deoptimization_hist[reason][1+action];
2460   juint* bc_counter_addr = NULL;
2461   juint  bc_counter      = 0;
2462   // Look for an unused counter, or an exact match to this BC.
2463   if (bc != Bytecodes::_illegal) {
2464     for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) {
2465       juint* counter_addr = &cases[bc_case];
2466       juint  counter = *counter_addr;
2467       if ((counter == 0 && bc_counter_addr == NULL)
2468           || (Bytecodes::Code)(counter & LSB_MASK) == bc) {
2469         // this counter is either free or is already devoted to this BC
2470         bc_counter_addr = counter_addr;
2471         bc_counter = counter | bc;
2472       }
2473     }
2474   }
2475   if (bc_counter_addr == NULL) {
2476     // Overflow, or no given bytecode.
2477     bc_counter_addr = &cases[BC_CASE_LIMIT-1];
2478     bc_counter = (*bc_counter_addr & ~LSB_MASK);  // clear LSB
2479   }
2480   *bc_counter_addr = bc_counter + (1 << LSB_BITS);
2481 }
2482 
2483 jint Deoptimization::total_deoptimization_count() {
2484   return _deoptimization_hist[Reason_none][0][0];
2485 }
2486 
2487 void Deoptimization::print_statistics() {
2488   juint total = total_deoptimization_count();
2489   juint account = total;
2490   if (total != 0) {
2491     ttyLocker ttyl;
2492     if (xtty != NULL)  xtty->head("statistics type='deoptimization'");
2493     tty->print_cr("Deoptimization traps recorded:");
2494     #define PRINT_STAT_LINE(name, r) \
2495       tty->print_cr("  %4d (%4.1f%%) %s", (int)(r), ((r) * 100.0) / total, name);
2496     PRINT_STAT_LINE("total", total);
2497     // For each non-zero entry in the histogram, print the reason,
2498     // the action, and (if specifically known) the type of bytecode.
2499     for (int reason = 0; reason < Reason_LIMIT; reason++) {
2500       for (int action = 0; action < Action_LIMIT; action++) {
2501         juint* cases = _deoptimization_hist[reason][1+action];
2502         for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) {
2503           juint counter = cases[bc_case];
2504           if (counter != 0) {
2505             char name[1*K];
2506             Bytecodes::Code bc = (Bytecodes::Code)(counter & LSB_MASK);
2507             if (bc_case == BC_CASE_LIMIT && (int)bc == 0)
2508               bc = Bytecodes::_illegal;
2509             sprintf(name, "%s/%s/%s",
2510                     trap_reason_name(reason),
2511                     trap_action_name(action),
2512                     Bytecodes::is_defined(bc)? Bytecodes::name(bc): "other");
2513             juint r = counter >> LSB_BITS;
2514             tty->print_cr("  %40s: " UINT32_FORMAT " (%.1f%%)", name, r, (r * 100.0) / total);
2515             account -= r;
2516           }
2517         }
2518       }
2519     }
2520     if (account != 0) {
2521       PRINT_STAT_LINE("unaccounted", account);
2522     }
2523     #undef PRINT_STAT_LINE
2524     if (xtty != NULL)  xtty->tail("statistics");
2525   }
2526 }
2527 #else // COMPILER2_OR_JVMCI
2528 
2529 
2530 // Stubs for C1 only system.
2531 bool Deoptimization::trap_state_is_recompiled(int trap_state) {
2532   return false;
2533 }
2534 
2535 const char* Deoptimization::trap_reason_name(int reason) {
2536   return "unknown";
2537 }
2538 
2539 void Deoptimization::print_statistics() {
2540   // no output
2541 }
2542 
2543 void
2544 Deoptimization::update_method_data_from_interpreter(MethodData* trap_mdo, int trap_bci, int reason) {
2545   // no udpate
2546 }
2547 
2548 int Deoptimization::trap_state_has_reason(int trap_state, int reason) {
2549   return 0;
2550 }
2551 
2552 void Deoptimization::gather_statistics(DeoptReason reason, DeoptAction action,
2553                                        Bytecodes::Code bc) {
2554   // no update
2555 }
2556 
2557 const char* Deoptimization::format_trap_state(char* buf, size_t buflen,
2558                                               int trap_state) {
2559   jio_snprintf(buf, buflen, "#%d", trap_state);
2560   return buf;
2561 }
2562 
2563 #endif // COMPILER2_OR_JVMCI