1 /*
   2  * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "code/debugInfoRec.hpp"
  28 #include "code/nmethod.hpp"
  29 #include "code/pcDesc.hpp"
  30 #include "code/scopeDesc.hpp"
  31 #include "interpreter/bytecode.hpp"
  32 #include "interpreter/interpreter.hpp"
  33 #include "interpreter/oopMapCache.hpp"
  34 #include "memory/allocation.inline.hpp"
  35 #include "memory/oopFactory.hpp"
  36 #include "memory/resourceArea.hpp"
  37 #include "oops/methodOop.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "prims/jvmtiThreadState.hpp"
  40 #include "runtime/biasedLocking.hpp"
  41 #include "runtime/compilationPolicy.hpp"
  42 #include "runtime/deoptimization.hpp"
  43 #include "runtime/interfaceSupport.hpp"
  44 #include "runtime/sharedRuntime.hpp"
  45 #include "runtime/signature.hpp"
  46 #include "runtime/stubRoutines.hpp"
  47 #include "runtime/thread.hpp"
  48 #include "runtime/vframe.hpp"
  49 #include "runtime/vframeArray.hpp"
  50 #include "runtime/vframe_hp.hpp"
  51 #include "utilities/events.hpp"
  52 #include "utilities/xmlstream.hpp"
  53 #ifdef TARGET_ARCH_x86
  54 # include "vmreg_x86.inline.hpp"
  55 #endif
  56 #ifdef TARGET_ARCH_sparc
  57 # include "vmreg_sparc.inline.hpp"
  58 #endif
  59 #ifdef TARGET_ARCH_zero
  60 # include "vmreg_zero.inline.hpp"
  61 #endif
  62 #ifdef TARGET_ARCH_arm
  63 # include "vmreg_arm.inline.hpp"
  64 #endif
  65 #ifdef TARGET_ARCH_ppc
  66 # include "vmreg_ppc.inline.hpp"
  67 #endif
  68 #ifdef COMPILER2
  69 #ifdef TARGET_ARCH_MODEL_x86_32
  70 # include "adfiles/ad_x86_32.hpp"
  71 #endif
  72 #ifdef TARGET_ARCH_MODEL_x86_64
  73 # include "adfiles/ad_x86_64.hpp"
  74 #endif
  75 #ifdef TARGET_ARCH_MODEL_sparc
  76 # include "adfiles/ad_sparc.hpp"
  77 #endif
  78 #ifdef TARGET_ARCH_MODEL_zero
  79 # include "adfiles/ad_zero.hpp"
  80 #endif
  81 #ifdef TARGET_ARCH_MODEL_arm
  82 # include "adfiles/ad_arm.hpp"
  83 #endif
  84 #ifdef TARGET_ARCH_MODEL_ppc
  85 # include "adfiles/ad_ppc.hpp"
  86 #endif
  87 #endif
  88 
  89 bool DeoptimizationMarker::_is_active = false;
  90 
  91 Deoptimization::UnrollBlock::UnrollBlock(int  size_of_deoptimized_frame,
  92                                          int  caller_adjustment,
  93                                          int  caller_actual_parameters,
  94                                          int  number_of_frames,
  95                                          intptr_t* frame_sizes,
  96                                          address* frame_pcs,
  97                                          BasicType return_type) {
  98   _size_of_deoptimized_frame = size_of_deoptimized_frame;
  99   _caller_adjustment         = caller_adjustment;
 100   _caller_actual_parameters  = caller_actual_parameters;
 101   _number_of_frames          = number_of_frames;
 102   _frame_sizes               = frame_sizes;
 103   _frame_pcs                 = frame_pcs;
 104   _register_block            = NEW_C_HEAP_ARRAY(intptr_t, RegisterMap::reg_count * 2);
 105   _return_type               = return_type;
 106   _initial_fp                = 0;
 107   // PD (x86 only)
 108   _counter_temp              = 0;
 109   _unpack_kind               = 0;
 110   _sender_sp_temp            = 0;
 111 
 112   _total_frame_sizes         = size_of_frames();
 113 }
 114 
 115 
 116 Deoptimization::UnrollBlock::~UnrollBlock() {
 117   FREE_C_HEAP_ARRAY(intptr_t, _frame_sizes);
 118   FREE_C_HEAP_ARRAY(intptr_t, _frame_pcs);
 119   FREE_C_HEAP_ARRAY(intptr_t, _register_block);
 120 }
 121 
 122 
 123 intptr_t* Deoptimization::UnrollBlock::value_addr_at(int register_number) const {
 124   assert(register_number < RegisterMap::reg_count, "checking register number");
 125   return &_register_block[register_number * 2];
 126 }
 127 
 128 
 129 
 130 int Deoptimization::UnrollBlock::size_of_frames() const {
 131   // Acount first for the adjustment of the initial frame
 132   int result = _caller_adjustment;
 133   for (int index = 0; index < number_of_frames(); index++) {
 134     result += frame_sizes()[index];
 135   }
 136   return result;
 137 }
 138 
 139 
 140 void Deoptimization::UnrollBlock::print() {
 141   ttyLocker ttyl;
 142   tty->print_cr("UnrollBlock");
 143   tty->print_cr("  size_of_deoptimized_frame = %d", _size_of_deoptimized_frame);
 144   tty->print(   "  frame_sizes: ");
 145   for (int index = 0; index < number_of_frames(); index++) {
 146     tty->print("%d ", frame_sizes()[index]);
 147   }
 148   tty->cr();
 149 }
 150 
 151 
 152 // In order to make fetch_unroll_info work properly with escape
 153 // analysis, The method was changed from JRT_LEAF to JRT_BLOCK_ENTRY and
 154 // ResetNoHandleMark and HandleMark were removed from it. The actual reallocation
 155 // of previously eliminated objects occurs in realloc_objects, which is
 156 // called from the method fetch_unroll_info_helper below.
 157 JRT_BLOCK_ENTRY(Deoptimization::UnrollBlock*, Deoptimization::fetch_unroll_info(JavaThread* thread))
 158   // It is actually ok to allocate handles in a leaf method. It causes no safepoints,
 159   // but makes the entry a little slower. There is however a little dance we have to
 160   // do in debug mode to get around the NoHandleMark code in the JRT_LEAF macro
 161 
 162   // fetch_unroll_info() is called at the beginning of the deoptimization
 163   // handler. Note this fact before we start generating temporary frames
 164   // that can confuse an asynchronous stack walker. This counter is
 165   // decremented at the end of unpack_frames().
 166   thread->inc_in_deopt_handler();
 167 
 168   return fetch_unroll_info_helper(thread);
 169 JRT_END
 170 
 171 
 172 // This is factored, since it is both called from a JRT_LEAF (deoptimization) and a JRT_ENTRY (uncommon_trap)
 173 Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread* thread) {
 174 
 175   // Note: there is a safepoint safety issue here. No matter whether we enter
 176   // via vanilla deopt or uncommon trap we MUST NOT stop at a safepoint once
 177   // the vframeArray is created.
 178   //
 179 
 180   // Allocate our special deoptimization ResourceMark
 181   DeoptResourceMark* dmark = new DeoptResourceMark(thread);
 182   assert(thread->deopt_mark() == NULL, "Pending deopt!");
 183   thread->set_deopt_mark(dmark);
 184 
 185   frame stub_frame = thread->last_frame(); // Makes stack walkable as side effect
 186   RegisterMap map(thread, true);
 187   RegisterMap dummy_map(thread, false);
 188   // Now get the deoptee with a valid map
 189   frame deoptee = stub_frame.sender(&map);
 190   // Set the deoptee nmethod
 191   assert(thread->deopt_nmethod() == NULL, "Pending deopt!");
 192   thread->set_deopt_nmethod(deoptee.cb()->as_nmethod_or_null());
 193 
 194   if (VerifyStack) {
 195     thread->validate_frame_layout();
 196   }
 197 
 198   // Create a growable array of VFrames where each VFrame represents an inlined
 199   // Java frame.  This storage is allocated with the usual system arena.
 200   assert(deoptee.is_compiled_frame(), "Wrong frame type");
 201   GrowableArray<compiledVFrame*>* chunk = new GrowableArray<compiledVFrame*>(10);
 202   vframe* vf = vframe::new_vframe(&deoptee, &map, thread);
 203   while (!vf->is_top()) {
 204     assert(vf->is_compiled_frame(), "Wrong frame type");
 205     chunk->push(compiledVFrame::cast(vf));
 206     vf = vf->sender();
 207   }
 208   assert(vf->is_compiled_frame(), "Wrong frame type");
 209   chunk->push(compiledVFrame::cast(vf));
 210 
 211 #ifdef COMPILER2
 212   // Reallocate the non-escaping objects and restore their fields. Then
 213   // relock objects if synchronization on them was eliminated.
 214   if (DoEscapeAnalysis) {
 215     if (EliminateAllocations) {
 216       assert (chunk->at(0)->scope() != NULL,"expect only compiled java frames");
 217       GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects();
 218 
 219       // The flag return_oop() indicates call sites which return oop
 220       // in compiled code. Such sites include java method calls,
 221       // runtime calls (for example, used to allocate new objects/arrays
 222       // on slow code path) and any other calls generated in compiled code.
 223       // It is not guaranteed that we can get such information here only
 224       // by analyzing bytecode in deoptimized frames. This is why this flag
 225       // is set during method compilation (see Compile::Process_OopMap_Node()).
 226       bool save_oop_result = chunk->at(0)->scope()->return_oop();
 227       Handle return_value;
 228       if (save_oop_result) {
 229         // Reallocation may trigger GC. If deoptimization happened on return from
 230         // call which returns oop we need to save it since it is not in oopmap.
 231         oop result = deoptee.saved_oop_result(&map);
 232         assert(result == NULL || result->is_oop(), "must be oop");
 233         return_value = Handle(thread, result);
 234         assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
 235         if (TraceDeoptimization) {
 236           tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, result, thread);
 237         }
 238       }
 239       bool reallocated = false;
 240       if (objects != NULL) {
 241         JRT_BLOCK
 242           reallocated = realloc_objects(thread, &deoptee, objects, THREAD);
 243         JRT_END
 244       }
 245       if (reallocated) {
 246         reassign_fields(&deoptee, &map, objects);
 247 #ifndef PRODUCT
 248         if (TraceDeoptimization) {
 249           ttyLocker ttyl;
 250           tty->print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, thread);
 251           print_objects(objects);
 252         }
 253 #endif
 254       }
 255       if (save_oop_result) {
 256         // Restore result.
 257         deoptee.set_saved_oop_result(&map, return_value());
 258       }
 259     }
 260     if (EliminateLocks) {
 261 #ifndef PRODUCT
 262       bool first = true;
 263 #endif
 264       for (int i = 0; i < chunk->length(); i++) {
 265         compiledVFrame* cvf = chunk->at(i);
 266         assert (cvf->scope() != NULL,"expect only compiled java frames");
 267         GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
 268         if (monitors->is_nonempty()) {
 269           relock_objects(monitors, thread);
 270 #ifndef PRODUCT
 271           if (TraceDeoptimization) {
 272             ttyLocker ttyl;
 273             for (int j = 0; j < monitors->length(); j++) {
 274               MonitorInfo* mi = monitors->at(j);
 275               if (mi->eliminated()) {
 276                 if (first) {
 277                   first = false;
 278                   tty->print_cr("RELOCK OBJECTS in thread " INTPTR_FORMAT, thread);
 279                 }
 280                 tty->print_cr("     object <" INTPTR_FORMAT "> locked", mi->owner());
 281               }
 282             }
 283           }
 284 #endif
 285         }
 286       }
 287     }
 288   }
 289 #endif // COMPILER2
 290   // Ensure that no safepoint is taken after pointers have been stored
 291   // in fields of rematerialized objects.  If a safepoint occurs from here on
 292   // out the java state residing in the vframeArray will be missed.
 293   No_Safepoint_Verifier no_safepoint;
 294 
 295   vframeArray* array = create_vframeArray(thread, deoptee, &map, chunk);
 296 
 297   assert(thread->vframe_array_head() == NULL, "Pending deopt!");;
 298   thread->set_vframe_array_head(array);
 299 
 300   // Now that the vframeArray has been created if we have any deferred local writes
 301   // added by jvmti then we can free up that structure as the data is now in the
 302   // vframeArray
 303 
 304   if (thread->deferred_locals() != NULL) {
 305     GrowableArray<jvmtiDeferredLocalVariableSet*>* list = thread->deferred_locals();
 306     int i = 0;
 307     do {
 308       // Because of inlining we could have multiple vframes for a single frame
 309       // and several of the vframes could have deferred writes. Find them all.
 310       if (list->at(i)->id() == array->original().id()) {
 311         jvmtiDeferredLocalVariableSet* dlv = list->at(i);
 312         list->remove_at(i);
 313         // individual jvmtiDeferredLocalVariableSet are CHeapObj's
 314         delete dlv;
 315       } else {
 316         i++;
 317       }
 318     } while ( i < list->length() );
 319     if (list->length() == 0) {
 320       thread->set_deferred_locals(NULL);
 321       // free the list and elements back to C heap.
 322       delete list;
 323     }
 324 
 325   }
 326 
 327 #ifndef SHARK
 328   // Compute the caller frame based on the sender sp of stub_frame and stored frame sizes info.
 329   CodeBlob* cb = stub_frame.cb();
 330   // Verify we have the right vframeArray
 331   assert(cb->frame_size() >= 0, "Unexpected frame size");
 332   intptr_t* unpack_sp = stub_frame.sp() + cb->frame_size();
 333 
 334   // If the deopt call site is a MethodHandle invoke call site we have
 335   // to adjust the unpack_sp.
 336   nmethod* deoptee_nm = deoptee.cb()->as_nmethod_or_null();
 337   if (deoptee_nm != NULL && deoptee_nm->is_method_handle_return(deoptee.pc()))
 338     unpack_sp = deoptee.unextended_sp();
 339 
 340 #ifdef ASSERT
 341   assert(cb->is_deoptimization_stub() || cb->is_uncommon_trap_stub(), "just checking");
 342   Events::log("fetch unroll sp " INTPTR_FORMAT, unpack_sp);
 343 #endif
 344 #else
 345   intptr_t* unpack_sp = stub_frame.sender(&dummy_map).unextended_sp();
 346 #endif // !SHARK
 347 
 348   // This is a guarantee instead of an assert because if vframe doesn't match
 349   // we will unpack the wrong deoptimized frame and wind up in strange places
 350   // where it will be very difficult to figure out what went wrong. Better
 351   // to die an early death here than some very obscure death later when the
 352   // trail is cold.
 353   // Note: on ia64 this guarantee can be fooled by frames with no memory stack
 354   // in that it will fail to detect a problem when there is one. This needs
 355   // more work in tiger timeframe.
 356   guarantee(array->unextended_sp() == unpack_sp, "vframe_array_head must contain the vframeArray to unpack");
 357 
 358   int number_of_frames = array->frames();
 359 
 360   // Compute the vframes' sizes.  Note that frame_sizes[] entries are ordered from outermost to innermost
 361   // virtual activation, which is the reverse of the elements in the vframes array.
 362   intptr_t* frame_sizes = NEW_C_HEAP_ARRAY(intptr_t, number_of_frames);
 363   // +1 because we always have an interpreter return address for the final slot.
 364   address* frame_pcs = NEW_C_HEAP_ARRAY(address, number_of_frames + 1);
 365   int callee_parameters = 0;
 366   int callee_locals = 0;
 367   int popframe_extra_args = 0;
 368   // Create an interpreter return address for the stub to use as its return
 369   // address so the skeletal frames are perfectly walkable
 370   frame_pcs[number_of_frames] = Interpreter::deopt_entry(vtos, 0);
 371 
 372   // PopFrame requires that the preserved incoming arguments from the recently-popped topmost
 373   // activation be put back on the expression stack of the caller for reexecution
 374   if (JvmtiExport::can_pop_frame() && thread->popframe_forcing_deopt_reexecution()) {
 375     popframe_extra_args = in_words(thread->popframe_preserved_args_size_in_words());
 376   }
 377 
 378   // Find the current pc for sender of the deoptee. Since the sender may have been deoptimized
 379   // itself since the deoptee vframeArray was created we must get a fresh value of the pc rather
 380   // than simply use array->sender.pc(). This requires us to walk the current set of frames
 381   //
 382   frame deopt_sender = stub_frame.sender(&dummy_map); // First is the deoptee frame
 383   deopt_sender = deopt_sender.sender(&dummy_map);     // Now deoptee caller
 384 
 385   // It's possible that the number of paramters at the call site is
 386   // different than number of arguments in the callee when method
 387   // handles are used.  If the caller is interpreted get the real
 388   // value so that the proper amount of space can be added to it's
 389   // frame.
 390   int caller_actual_parameters = callee_parameters;
 391   if (deopt_sender.is_interpreted_frame()) {
 392     methodHandle method = deopt_sender.interpreter_frame_method();
 393     Bytecode_invoke cur = Bytecode_invoke_check(method,
 394                                                 deopt_sender.interpreter_frame_bci());
 395     Symbol* signature = method->constants()->signature_ref_at(cur.index());
 396     ArgumentSizeComputer asc(signature);
 397     caller_actual_parameters = asc.size() + (cur.has_receiver() ? 1 : 0);
 398   }
 399 
 400   //
 401   // frame_sizes/frame_pcs[0] oldest frame (int or c2i)
 402   // frame_sizes/frame_pcs[1] next oldest frame (int)
 403   // frame_sizes/frame_pcs[n] youngest frame (int)
 404   //
 405   // Now a pc in frame_pcs is actually the return address to the frame's caller (a frame
 406   // owns the space for the return address to it's caller).  Confusing ain't it.
 407   //
 408   // The vframe array can address vframes with indices running from
 409   // 0.._frames-1. Index  0 is the youngest frame and _frame - 1 is the oldest (root) frame.
 410   // When we create the skeletal frames we need the oldest frame to be in the zero slot
 411   // in the frame_sizes/frame_pcs so the assembly code can do a trivial walk.
 412   // so things look a little strange in this loop.
 413   //
 414   for (int index = 0; index < array->frames(); index++ ) {
 415     // frame[number_of_frames - 1 ] = on_stack_size(youngest)
 416     // frame[number_of_frames - 2 ] = on_stack_size(sender(youngest))
 417     // frame[number_of_frames - 3 ] = on_stack_size(sender(sender(youngest)))
 418     int caller_parms = callee_parameters;
 419     if (index == array->frames() - 1) {
 420       // Use the value from the interpreted caller
 421       caller_parms = caller_actual_parameters;
 422     }
 423     frame_sizes[number_of_frames - 1 - index] = BytesPerWord * array->element(index)->on_stack_size(caller_parms,
 424                                                                                                     callee_parameters,
 425                                                                                                     callee_locals,
 426                                                                                                     index == 0,
 427                                                                                                     popframe_extra_args);
 428     // This pc doesn't have to be perfect just good enough to identify the frame
 429     // as interpreted so the skeleton frame will be walkable
 430     // The correct pc will be set when the skeleton frame is completely filled out
 431     // The final pc we store in the loop is wrong and will be overwritten below
 432     frame_pcs[number_of_frames - 1 - index ] = Interpreter::deopt_entry(vtos, 0) - frame::pc_return_offset;
 433 
 434     callee_parameters = array->element(index)->method()->size_of_parameters();
 435     callee_locals = array->element(index)->method()->max_locals();
 436     popframe_extra_args = 0;
 437   }
 438 
 439   // Compute whether the root vframe returns a float or double value.
 440   BasicType return_type;
 441   {
 442     HandleMark hm;
 443     methodHandle method(thread, array->element(0)->method());
 444     Bytecode_invoke invoke = Bytecode_invoke_check(method, array->element(0)->bci());
 445     return_type = invoke.is_valid() ? invoke.result_type() : T_ILLEGAL;
 446   }
 447 
 448   // Compute information for handling adapters and adjusting the frame size of the caller.
 449   int caller_adjustment = 0;
 450 
 451   // Compute the amount the oldest interpreter frame will have to adjust
 452   // its caller's stack by. If the caller is a compiled frame then
 453   // we pretend that the callee has no parameters so that the
 454   // extension counts for the full amount of locals and not just
 455   // locals-parms. This is because without a c2i adapter the parm
 456   // area as created by the compiled frame will not be usable by
 457   // the interpreter. (Depending on the calling convention there
 458   // may not even be enough space).
 459 
 460   // QQQ I'd rather see this pushed down into last_frame_adjust
 461   // and have it take the sender (aka caller).
 462 
 463   if (deopt_sender.is_compiled_frame()) {
 464     caller_adjustment = last_frame_adjust(0, callee_locals);
 465   } else if (callee_locals > caller_actual_parameters) {
 466     // The caller frame may need extending to accommodate
 467     // non-parameter locals of the first unpacked interpreted frame.
 468     // Compute that adjustment.
 469     caller_adjustment = last_frame_adjust(caller_actual_parameters, callee_locals);
 470   }
 471 
 472   // If the sender is deoptimized the we must retrieve the address of the handler
 473   // since the frame will "magically" show the original pc before the deopt
 474   // and we'd undo the deopt.
 475 
 476   frame_pcs[0] = deopt_sender.raw_pc();
 477 
 478 #ifndef SHARK
 479   assert(CodeCache::find_blob_unsafe(frame_pcs[0]) != NULL, "bad pc");
 480 #endif // SHARK
 481 
 482   UnrollBlock* info = new UnrollBlock(array->frame_size() * BytesPerWord,
 483                                       caller_adjustment * BytesPerWord,
 484                                       caller_actual_parameters,
 485                                       number_of_frames,
 486                                       frame_sizes,
 487                                       frame_pcs,
 488                                       return_type);
 489   // On some platforms, we need a way to pass some platform dependent
 490   // information to the unpacking code so the skeletal frames come out
 491   // correct (initial fp value, unextended sp, ...)
 492   info->set_initial_fp((intptr_t) array->sender().initial_deoptimization_info());
 493 
 494   if (array->frames() > 1) {
 495     if (VerifyStack && TraceDeoptimization) {
 496       tty->print_cr("Deoptimizing method containing inlining");
 497     }
 498   }
 499 
 500   array->set_unroll_block(info);
 501   return info;
 502 }
 503 
 504 // Called to cleanup deoptimization data structures in normal case
 505 // after unpacking to stack and when stack overflow error occurs
 506 void Deoptimization::cleanup_deopt_info(JavaThread *thread,
 507                                         vframeArray *array) {
 508 
 509   // Get array if coming from exception
 510   if (array == NULL) {
 511     array = thread->vframe_array_head();
 512   }
 513   thread->set_vframe_array_head(NULL);
 514 
 515   // Free the previous UnrollBlock
 516   vframeArray* old_array = thread->vframe_array_last();
 517   thread->set_vframe_array_last(array);
 518 
 519   if (old_array != NULL) {
 520     UnrollBlock* old_info = old_array->unroll_block();
 521     old_array->set_unroll_block(NULL);
 522     delete old_info;
 523     delete old_array;
 524   }
 525 
 526   // Deallocate any resource creating in this routine and any ResourceObjs allocated
 527   // inside the vframeArray (StackValueCollections)
 528 
 529   delete thread->deopt_mark();
 530   thread->set_deopt_mark(NULL);
 531   thread->set_deopt_nmethod(NULL);
 532 
 533 
 534   if (JvmtiExport::can_pop_frame()) {
 535 #ifndef CC_INTERP
 536     // Regardless of whether we entered this routine with the pending
 537     // popframe condition bit set, we should always clear it now
 538     thread->clear_popframe_condition();
 539 #else
 540     // C++ interpeter will clear has_pending_popframe when it enters
 541     // with method_resume. For deopt_resume2 we clear it now.
 542     if (thread->popframe_forcing_deopt_reexecution())
 543         thread->clear_popframe_condition();
 544 #endif /* CC_INTERP */
 545   }
 546 
 547   // unpack_frames() is called at the end of the deoptimization handler
 548   // and (in C2) at the end of the uncommon trap handler. Note this fact
 549   // so that an asynchronous stack walker can work again. This counter is
 550   // incremented at the beginning of fetch_unroll_info() and (in C2) at
 551   // the beginning of uncommon_trap().
 552   thread->dec_in_deopt_handler();
 553 }
 554 
 555 
 556 // Return BasicType of value being returned
 557 JRT_LEAF(BasicType, Deoptimization::unpack_frames(JavaThread* thread, int exec_mode))
 558 
 559   // We are already active int he special DeoptResourceMark any ResourceObj's we
 560   // allocate will be freed at the end of the routine.
 561 
 562   // It is actually ok to allocate handles in a leaf method. It causes no safepoints,
 563   // but makes the entry a little slower. There is however a little dance we have to
 564   // do in debug mode to get around the NoHandleMark code in the JRT_LEAF macro
 565   ResetNoHandleMark rnhm; // No-op in release/product versions
 566   HandleMark hm;
 567 
 568   frame stub_frame = thread->last_frame();
 569 
 570   // Since the frame to unpack is the top frame of this thread, the vframe_array_head
 571   // must point to the vframeArray for the unpack frame.
 572   vframeArray* array = thread->vframe_array_head();
 573 
 574 #ifndef PRODUCT
 575   if (TraceDeoptimization) {
 576     tty->print_cr("DEOPT UNPACKING thread " INTPTR_FORMAT " vframeArray " INTPTR_FORMAT " mode %d", thread, array, exec_mode);
 577   }
 578 #endif
 579 
 580   UnrollBlock* info = array->unroll_block();
 581 
 582   // Unpack the interpreter frames and any adapter frame (c2 only) we might create.
 583   array->unpack_to_stack(stub_frame, exec_mode, info->caller_actual_parameters());
 584 
 585   BasicType bt = info->return_type();
 586 
 587   // If we have an exception pending, claim that the return type is an oop
 588   // so the deopt_blob does not overwrite the exception_oop.
 589 
 590   if (exec_mode == Unpack_exception)
 591     bt = T_OBJECT;
 592 
 593   // Cleanup thread deopt data
 594   cleanup_deopt_info(thread, array);
 595 
 596 #ifndef PRODUCT
 597   if (VerifyStack) {
 598     ResourceMark res_mark;
 599 
 600     thread->validate_frame_layout();
 601 
 602     // Verify that the just-unpacked frames match the interpreter's
 603     // notions of expression stack and locals
 604     vframeArray* cur_array = thread->vframe_array_last();
 605     RegisterMap rm(thread, false);
 606     rm.set_include_argument_oops(false);
 607     bool is_top_frame = true;
 608     int callee_size_of_parameters = 0;
 609     int callee_max_locals = 0;
 610     for (int i = 0; i < cur_array->frames(); i++) {
 611       vframeArrayElement* el = cur_array->element(i);
 612       frame* iframe = el->iframe();
 613       guarantee(iframe->is_interpreted_frame(), "Wrong frame type");
 614 
 615       // Get the oop map for this bci
 616       InterpreterOopMap mask;
 617       int cur_invoke_parameter_size = 0;
 618       bool try_next_mask = false;
 619       int next_mask_expression_stack_size = -1;
 620       int top_frame_expression_stack_adjustment = 0;
 621       methodHandle mh(thread, iframe->interpreter_frame_method());
 622       OopMapCache::compute_one_oop_map(mh, iframe->interpreter_frame_bci(), &mask);
 623       BytecodeStream str(mh);
 624       str.set_start(iframe->interpreter_frame_bci());
 625       int max_bci = mh->code_size();
 626       // Get to the next bytecode if possible
 627       assert(str.bci() < max_bci, "bci in interpreter frame out of bounds");
 628       // Check to see if we can grab the number of outgoing arguments
 629       // at an uncommon trap for an invoke (where the compiler
 630       // generates debug info before the invoke has executed)
 631       Bytecodes::Code cur_code = str.next();
 632       if (cur_code == Bytecodes::_invokevirtual ||
 633           cur_code == Bytecodes::_invokespecial ||
 634           cur_code == Bytecodes::_invokestatic  ||
 635           cur_code == Bytecodes::_invokeinterface) {
 636         Bytecode_invoke invoke(mh, iframe->interpreter_frame_bci());
 637         Symbol* signature = invoke.signature();
 638         ArgumentSizeComputer asc(signature);
 639         cur_invoke_parameter_size = asc.size();
 640         if (cur_code != Bytecodes::_invokestatic) {
 641           // Add in receiver
 642           ++cur_invoke_parameter_size;
 643         }
 644       }
 645       if (str.bci() < max_bci) {
 646         Bytecodes::Code bc = str.next();
 647         if (bc >= 0) {
 648           // The interpreter oop map generator reports results before
 649           // the current bytecode has executed except in the case of
 650           // calls. It seems to be hard to tell whether the compiler
 651           // has emitted debug information matching the "state before"
 652           // a given bytecode or the state after, so we try both
 653           switch (cur_code) {
 654             case Bytecodes::_invokevirtual:
 655             case Bytecodes::_invokespecial:
 656             case Bytecodes::_invokestatic:
 657             case Bytecodes::_invokeinterface:
 658             case Bytecodes::_athrow:
 659               break;
 660             default: {
 661               InterpreterOopMap next_mask;
 662               OopMapCache::compute_one_oop_map(mh, str.bci(), &next_mask);
 663               next_mask_expression_stack_size = next_mask.expression_stack_size();
 664               // Need to subtract off the size of the result type of
 665               // the bytecode because this is not described in the
 666               // debug info but returned to the interpreter in the TOS
 667               // caching register
 668               BasicType bytecode_result_type = Bytecodes::result_type(cur_code);
 669               if (bytecode_result_type != T_ILLEGAL) {
 670                 top_frame_expression_stack_adjustment = type2size[bytecode_result_type];
 671               }
 672               assert(top_frame_expression_stack_adjustment >= 0, "");
 673               try_next_mask = true;
 674               break;
 675             }
 676           }
 677         }
 678       }
 679 
 680       // Verify stack depth and oops in frame
 681       // This assertion may be dependent on the platform we're running on and may need modification (tested on x86 and sparc)
 682       if (!(
 683             /* SPARC */
 684             (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + callee_size_of_parameters) ||
 685             /* x86 */
 686             (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + callee_max_locals) ||
 687             (try_next_mask &&
 688              (iframe->interpreter_frame_expression_stack_size() == (next_mask_expression_stack_size -
 689                                                                     top_frame_expression_stack_adjustment))) ||
 690             (is_top_frame && (exec_mode == Unpack_exception) && iframe->interpreter_frame_expression_stack_size() == 0) ||
 691             (is_top_frame && (exec_mode == Unpack_uncommon_trap || exec_mode == Unpack_reexecute) &&
 692              (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + cur_invoke_parameter_size))
 693             )) {
 694         ttyLocker ttyl;
 695 
 696         // Print out some information that will help us debug the problem
 697         tty->print_cr("Wrong number of expression stack elements during deoptimization");
 698         tty->print_cr("  Error occurred while verifying frame %d (0..%d, 0 is topmost)", i, cur_array->frames() - 1);
 699         tty->print_cr("  Fabricated interpreter frame had %d expression stack elements",
 700                       iframe->interpreter_frame_expression_stack_size());
 701         tty->print_cr("  Interpreter oop map had %d expression stack elements", mask.expression_stack_size());
 702         tty->print_cr("  try_next_mask = %d", try_next_mask);
 703         tty->print_cr("  next_mask_expression_stack_size = %d", next_mask_expression_stack_size);
 704         tty->print_cr("  callee_size_of_parameters = %d", callee_size_of_parameters);
 705         tty->print_cr("  callee_max_locals = %d", callee_max_locals);
 706         tty->print_cr("  top_frame_expression_stack_adjustment = %d", top_frame_expression_stack_adjustment);
 707         tty->print_cr("  exec_mode = %d", exec_mode);
 708         tty->print_cr("  cur_invoke_parameter_size = %d", cur_invoke_parameter_size);
 709         tty->print_cr("  Thread = " INTPTR_FORMAT ", thread ID = " UINTX_FORMAT, thread, thread->osthread()->thread_id());
 710         tty->print_cr("  Interpreted frames:");
 711         for (int k = 0; k < cur_array->frames(); k++) {
 712           vframeArrayElement* el = cur_array->element(k);
 713           tty->print_cr("    %s (bci %d)", el->method()->name_and_sig_as_C_string(), el->bci());
 714         }
 715         cur_array->print_on_2(tty);
 716         guarantee(false, "wrong number of expression stack elements during deopt");
 717       }
 718       VerifyOopClosure verify;
 719       iframe->oops_interpreted_do(&verify, &rm, false);
 720       callee_size_of_parameters = mh->size_of_parameters();
 721       callee_max_locals = mh->max_locals();
 722       is_top_frame = false;
 723     }
 724   }
 725 #endif /* !PRODUCT */
 726 
 727 
 728   return bt;
 729 JRT_END
 730 
 731 
 732 int Deoptimization::deoptimize_dependents() {
 733   Threads::deoptimized_wrt_marked_nmethods();
 734   return 0;
 735 }
 736 
 737 
 738 #ifdef COMPILER2
 739 bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, GrowableArray<ScopeValue*>* objects, TRAPS) {
 740   Handle pending_exception(thread->pending_exception());
 741   const char* exception_file = thread->exception_file();
 742   int exception_line = thread->exception_line();
 743   thread->clear_pending_exception();
 744 
 745   for (int i = 0; i < objects->length(); i++) {
 746     assert(objects->at(i)->is_object(), "invalid debug information");
 747     ObjectValue* sv = (ObjectValue*) objects->at(i);
 748 
 749     KlassHandle k(((ConstantOopReadValue*) sv->klass())->value()());
 750     oop obj = NULL;
 751 
 752     if (k->oop_is_instance()) {
 753       instanceKlass* ik = instanceKlass::cast(k());
 754       obj = ik->allocate_instance(CHECK_(false));
 755     } else if (k->oop_is_typeArray()) {
 756       typeArrayKlass* ak = typeArrayKlass::cast(k());
 757       assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
 758       int len = sv->field_size() / type2size[ak->element_type()];
 759       obj = ak->allocate(len, CHECK_(false));
 760     } else if (k->oop_is_objArray()) {
 761       objArrayKlass* ak = objArrayKlass::cast(k());
 762       obj = ak->allocate(sv->field_size(), CHECK_(false));
 763     }
 764 
 765     assert(obj != NULL, "allocation failed");
 766     assert(sv->value().is_null(), "redundant reallocation");
 767     sv->set_value(obj);
 768   }
 769 
 770   if (pending_exception.not_null()) {
 771     thread->set_pending_exception(pending_exception(), exception_file, exception_line);
 772   }
 773 
 774   return true;
 775 }
 776 
 777 // This assumes that the fields are stored in ObjectValue in the same order
 778 // they are yielded by do_nonstatic_fields.
 779 class FieldReassigner: public FieldClosure {
 780   frame* _fr;
 781   RegisterMap* _reg_map;
 782   ObjectValue* _sv;
 783   instanceKlass* _ik;
 784   oop _obj;
 785 
 786   int _i;
 787 public:
 788   FieldReassigner(frame* fr, RegisterMap* reg_map, ObjectValue* sv, oop obj) :
 789     _fr(fr), _reg_map(reg_map), _sv(sv), _obj(obj), _i(0) {}
 790 
 791   int i() const { return _i; }
 792 
 793 
 794   void do_field(fieldDescriptor* fd) {
 795     intptr_t val;
 796     StackValue* value =
 797       StackValue::create_stack_value(_fr, _reg_map, _sv->field_at(i()));
 798     int offset = fd->offset();
 799     switch (fd->field_type()) {
 800     case T_OBJECT: case T_ARRAY:
 801       assert(value->type() == T_OBJECT, "Agreement.");
 802       _obj->obj_field_put(offset, value->get_obj()());
 803       break;
 804 
 805     case T_LONG: case T_DOUBLE: {
 806       assert(value->type() == T_INT, "Agreement.");
 807       StackValue* low =
 808         StackValue::create_stack_value(_fr, _reg_map, _sv->field_at(++_i));
 809 #ifdef _LP64
 810       jlong res = (jlong)low->get_int();
 811 #else
 812 #ifdef SPARC
 813       // For SPARC we have to swap high and low words.
 814       jlong res = jlong_from((jint)low->get_int(), (jint)value->get_int());
 815 #else
 816       jlong res = jlong_from((jint)value->get_int(), (jint)low->get_int());
 817 #endif //SPARC
 818 #endif
 819       _obj->long_field_put(offset, res);
 820       break;
 821     }
 822     // Have to cast to INT (32 bits) pointer to avoid little/big-endian problem.
 823     case T_INT: case T_FLOAT: // 4 bytes.
 824       assert(value->type() == T_INT, "Agreement.");
 825       val = value->get_int();
 826       _obj->int_field_put(offset, (jint)*((jint*)&val));
 827       break;
 828 
 829     case T_SHORT: case T_CHAR: // 2 bytes
 830       assert(value->type() == T_INT, "Agreement.");
 831       val = value->get_int();
 832       _obj->short_field_put(offset, (jshort)*((jint*)&val));
 833       break;
 834 
 835     case T_BOOLEAN: case T_BYTE: // 1 byte
 836       assert(value->type() == T_INT, "Agreement.");
 837       val = value->get_int();
 838       _obj->bool_field_put(offset, (jboolean)*((jint*)&val));
 839       break;
 840 
 841     default:
 842       ShouldNotReachHere();
 843     }
 844     _i++;
 845   }
 846 };
 847 
 848 // restore elements of an eliminated type array
 849 void Deoptimization::reassign_type_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, typeArrayOop obj, BasicType type) {
 850   int index = 0;
 851   intptr_t val;
 852 
 853   for (int i = 0; i < sv->field_size(); i++) {
 854     StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
 855     switch(type) {
 856     case T_LONG: case T_DOUBLE: {
 857       assert(value->type() == T_INT, "Agreement.");
 858       StackValue* low =
 859         StackValue::create_stack_value(fr, reg_map, sv->field_at(++i));
 860 #ifdef _LP64
 861       jlong res = (jlong)low->get_int();
 862 #else
 863 #ifdef SPARC
 864       // For SPARC we have to swap high and low words.
 865       jlong res = jlong_from((jint)low->get_int(), (jint)value->get_int());
 866 #else
 867       jlong res = jlong_from((jint)value->get_int(), (jint)low->get_int());
 868 #endif //SPARC
 869 #endif
 870       obj->long_at_put(index, res);
 871       break;
 872     }
 873 
 874     // Have to cast to INT (32 bits) pointer to avoid little/big-endian problem.
 875     case T_INT: case T_FLOAT: // 4 bytes.
 876       assert(value->type() == T_INT, "Agreement.");
 877       val = value->get_int();
 878       obj->int_at_put(index, (jint)*((jint*)&val));
 879       break;
 880 
 881     case T_SHORT: case T_CHAR: // 2 bytes
 882       assert(value->type() == T_INT, "Agreement.");
 883       val = value->get_int();
 884       obj->short_at_put(index, (jshort)*((jint*)&val));
 885       break;
 886 
 887     case T_BOOLEAN: case T_BYTE: // 1 byte
 888       assert(value->type() == T_INT, "Agreement.");
 889       val = value->get_int();
 890       obj->bool_at_put(index, (jboolean)*((jint*)&val));
 891       break;
 892 
 893       default:
 894         ShouldNotReachHere();
 895     }
 896     index++;
 897   }
 898 }
 899 
 900 
 901 // restore fields of an eliminated object array
 902 void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) {
 903   for (int i = 0; i < sv->field_size(); i++) {
 904     StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
 905     assert(value->type() == T_OBJECT, "object element expected");
 906     obj->obj_at_put(i, value->get_obj()());
 907   }
 908 }
 909 
 910 
 911 // restore fields of all eliminated objects and arrays
 912 void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects) {
 913   for (int i = 0; i < objects->length(); i++) {
 914     ObjectValue* sv = (ObjectValue*) objects->at(i);
 915     KlassHandle k(((ConstantOopReadValue*) sv->klass())->value()());
 916     Handle obj = sv->value();
 917     assert(obj.not_null(), "reallocation was missed");
 918 
 919     if (k->oop_is_instance()) {
 920       instanceKlass* ik = instanceKlass::cast(k());
 921       FieldReassigner reassign(fr, reg_map, sv, obj());
 922       ik->do_nonstatic_fields(&reassign);
 923     } else if (k->oop_is_typeArray()) {
 924       typeArrayKlass* ak = typeArrayKlass::cast(k());
 925       reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
 926     } else if (k->oop_is_objArray()) {
 927       reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
 928     }
 929   }
 930 }
 931 
 932 
 933 // relock objects for which synchronization was eliminated
 934 void Deoptimization::relock_objects(GrowableArray<MonitorInfo*>* monitors, JavaThread* thread) {
 935   for (int i = 0; i < monitors->length(); i++) {
 936     MonitorInfo* mon_info = monitors->at(i);
 937     if (mon_info->eliminated()) {
 938       assert(mon_info->owner() != NULL, "reallocation was missed");
 939       Handle obj = Handle(mon_info->owner());
 940       markOop mark = obj->mark();
 941       if (UseBiasedLocking && mark->has_bias_pattern()) {
 942         // New allocated objects may have the mark set to anonymously biased.
 943         // Also the deoptimized method may called methods with synchronization
 944         // where the thread-local object is bias locked to the current thread.
 945         assert(mark->is_biased_anonymously() ||
 946                mark->biased_locker() == thread, "should be locked to current thread");
 947         // Reset mark word to unbiased prototype.
 948         markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
 949         obj->set_mark(unbiased_prototype);
 950       }
 951       BasicLock* lock = mon_info->lock();
 952       ObjectSynchronizer::slow_enter(obj, lock, thread);
 953     }
 954     assert(mon_info->owner()->is_locked(), "object must be locked now");
 955   }
 956 }
 957 
 958 
 959 #ifndef PRODUCT
 960 // print information about reallocated objects
 961 void Deoptimization::print_objects(GrowableArray<ScopeValue*>* objects) {
 962   fieldDescriptor fd;
 963 
 964   for (int i = 0; i < objects->length(); i++) {
 965     ObjectValue* sv = (ObjectValue*) objects->at(i);
 966     KlassHandle k(((ConstantOopReadValue*) sv->klass())->value()());
 967     Handle obj = sv->value();
 968 
 969     tty->print("     object <" INTPTR_FORMAT "> of type ", sv->value()());
 970     k->as_klassOop()->print_value();
 971     tty->print(" allocated (%d bytes)", obj->size() * HeapWordSize);
 972     tty->cr();
 973 
 974     if (Verbose) {
 975       k->oop_print_on(obj(), tty);
 976     }
 977   }
 978 }
 979 #endif
 980 #endif // COMPILER2
 981 
 982 vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk) {
 983 
 984 #ifndef PRODUCT
 985   if (TraceDeoptimization) {
 986     ttyLocker ttyl;
 987     tty->print("DEOPT PACKING thread " INTPTR_FORMAT " ", thread);
 988     fr.print_on(tty);
 989     tty->print_cr("     Virtual frames (innermost first):");
 990     for (int index = 0; index < chunk->length(); index++) {
 991       compiledVFrame* vf = chunk->at(index);
 992       tty->print("       %2d - ", index);
 993       vf->print_value();
 994       int bci = chunk->at(index)->raw_bci();
 995       const char* code_name;
 996       if (bci == SynchronizationEntryBCI) {
 997         code_name = "sync entry";
 998       } else {
 999         Bytecodes::Code code = vf->method()->code_at(bci);
1000         code_name = Bytecodes::name(code);
1001       }
1002       tty->print(" - %s", code_name);
1003       tty->print_cr(" @ bci %d ", bci);
1004       if (Verbose) {
1005         vf->print();
1006         tty->cr();
1007       }
1008     }
1009   }
1010 #endif
1011 
1012   // Register map for next frame (used for stack crawl).  We capture
1013   // the state of the deopt'ing frame's caller.  Thus if we need to
1014   // stuff a C2I adapter we can properly fill in the callee-save
1015   // register locations.
1016   frame caller = fr.sender(reg_map);
1017   int frame_size = caller.sp() - fr.sp();
1018 
1019   frame sender = caller;
1020 
1021   // Since the Java thread being deoptimized will eventually adjust it's own stack,
1022   // the vframeArray containing the unpacking information is allocated in the C heap.
1023   // For Compiler1, the caller of the deoptimized frame is saved for use by unpack_frames().
1024   vframeArray* array = vframeArray::allocate(thread, frame_size, chunk, reg_map, sender, caller, fr);
1025 
1026   // Compare the vframeArray to the collected vframes
1027   assert(array->structural_compare(thread, chunk), "just checking");
1028   Events::log("# vframes = %d", (intptr_t)chunk->length());
1029 
1030 #ifndef PRODUCT
1031   if (TraceDeoptimization) {
1032     ttyLocker ttyl;
1033     tty->print_cr("     Created vframeArray " INTPTR_FORMAT, array);
1034   }
1035 #endif // PRODUCT
1036 
1037   return array;
1038 }
1039 
1040 
1041 static void collect_monitors(compiledVFrame* cvf, GrowableArray<Handle>* objects_to_revoke) {
1042   GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
1043   for (int i = 0; i < monitors->length(); i++) {
1044     MonitorInfo* mon_info = monitors->at(i);
1045     if (!mon_info->eliminated() && mon_info->owner() != NULL) {
1046       objects_to_revoke->append(Handle(mon_info->owner()));
1047     }
1048   }
1049 }
1050 
1051 
1052 void Deoptimization::revoke_biases_of_monitors(JavaThread* thread, frame fr, RegisterMap* map) {
1053   if (!UseBiasedLocking) {
1054     return;
1055   }
1056 
1057   GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>();
1058 
1059   // Unfortunately we don't have a RegisterMap available in most of
1060   // the places we want to call this routine so we need to walk the
1061   // stack again to update the register map.
1062   if (map == NULL || !map->update_map()) {
1063     StackFrameStream sfs(thread, true);
1064     bool found = false;
1065     while (!found && !sfs.is_done()) {
1066       frame* cur = sfs.current();
1067       sfs.next();
1068       found = cur->id() == fr.id();
1069     }
1070     assert(found, "frame to be deoptimized not found on target thread's stack");
1071     map = sfs.register_map();
1072   }
1073 
1074   vframe* vf = vframe::new_vframe(&fr, map, thread);
1075   compiledVFrame* cvf = compiledVFrame::cast(vf);
1076   // Revoke monitors' biases in all scopes
1077   while (!cvf->is_top()) {
1078     collect_monitors(cvf, objects_to_revoke);
1079     cvf = compiledVFrame::cast(cvf->sender());
1080   }
1081   collect_monitors(cvf, objects_to_revoke);
1082 
1083   if (SafepointSynchronize::is_at_safepoint()) {
1084     BiasedLocking::revoke_at_safepoint(objects_to_revoke);
1085   } else {
1086     BiasedLocking::revoke(objects_to_revoke);
1087   }
1088 }
1089 
1090 
1091 void Deoptimization::revoke_biases_of_monitors(CodeBlob* cb) {
1092   if (!UseBiasedLocking) {
1093     return;
1094   }
1095 
1096   assert(SafepointSynchronize::is_at_safepoint(), "must only be called from safepoint");
1097   GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>();
1098   for (JavaThread* jt = Threads::first(); jt != NULL ; jt = jt->next()) {
1099     if (jt->has_last_Java_frame()) {
1100       StackFrameStream sfs(jt, true);
1101       while (!sfs.is_done()) {
1102         frame* cur = sfs.current();
1103         if (cb->contains(cur->pc())) {
1104           vframe* vf = vframe::new_vframe(cur, sfs.register_map(), jt);
1105           compiledVFrame* cvf = compiledVFrame::cast(vf);
1106           // Revoke monitors' biases in all scopes
1107           while (!cvf->is_top()) {
1108             collect_monitors(cvf, objects_to_revoke);
1109             cvf = compiledVFrame::cast(cvf->sender());
1110           }
1111           collect_monitors(cvf, objects_to_revoke);
1112         }
1113         sfs.next();
1114       }
1115     }
1116   }
1117   BiasedLocking::revoke_at_safepoint(objects_to_revoke);
1118 }
1119 
1120 
1121 void Deoptimization::deoptimize_single_frame(JavaThread* thread, frame fr) {
1122   assert(fr.can_be_deoptimized(), "checking frame type");
1123 
1124   gather_statistics(Reason_constraint, Action_none, Bytecodes::_illegal);
1125 
1126   EventMark m("Deoptimization (pc=" INTPTR_FORMAT ", sp=" INTPTR_FORMAT ")", fr.pc(), fr.id());
1127 
1128   // Patch the nmethod so that when execution returns to it we will
1129   // deopt the execution state and return to the interpreter.
1130   fr.deoptimize(thread);
1131 }
1132 
1133 void Deoptimization::deoptimize(JavaThread* thread, frame fr, RegisterMap *map) {
1134   // Deoptimize only if the frame comes from compile code.
1135   // Do not deoptimize the frame which is already patched
1136   // during the execution of the loops below.
1137   if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
1138     return;
1139   }
1140   ResourceMark rm;
1141   DeoptimizationMarker dm;
1142   if (UseBiasedLocking) {
1143     revoke_biases_of_monitors(thread, fr, map);
1144   }
1145   deoptimize_single_frame(thread, fr);
1146 
1147 }
1148 
1149 
1150 void Deoptimization::deoptimize_frame_internal(JavaThread* thread, intptr_t* id) {
1151   assert(thread == Thread::current() || SafepointSynchronize::is_at_safepoint(),
1152          "can only deoptimize other thread at a safepoint");
1153   // Compute frame and register map based on thread and sp.
1154   RegisterMap reg_map(thread, UseBiasedLocking);
1155   frame fr = thread->last_frame();
1156   while (fr.id() != id) {
1157     fr = fr.sender(&reg_map);
1158   }
1159   deoptimize(thread, fr, &reg_map);
1160 }
1161 
1162 
1163 void Deoptimization::deoptimize_frame(JavaThread* thread, intptr_t* id) {
1164   if (thread == Thread::current()) {
1165     Deoptimization::deoptimize_frame_internal(thread, id);
1166   } else {
1167     VM_DeoptimizeFrame deopt(thread, id);
1168     VMThread::execute(&deopt);
1169   }
1170 }
1171 
1172 
1173 // JVMTI PopFrame support
1174 JRT_LEAF(void, Deoptimization::popframe_preserve_args(JavaThread* thread, int bytes_to_save, void* start_address))
1175 {
1176   thread->popframe_preserve_args(in_ByteSize(bytes_to_save), start_address);
1177 }
1178 JRT_END
1179 
1180 
1181 #if defined(COMPILER2) || defined(SHARK)
1182 void Deoptimization::load_class_by_index(constantPoolHandle constant_pool, int index, TRAPS) {
1183   // in case of an unresolved klass entry, load the class.
1184   if (constant_pool->tag_at(index).is_unresolved_klass()) {
1185     klassOop tk = constant_pool->klass_at(index, CHECK);
1186     return;
1187   }
1188 
1189   if (!constant_pool->tag_at(index).is_symbol()) return;
1190 
1191   Handle class_loader (THREAD, instanceKlass::cast(constant_pool->pool_holder())->class_loader());
1192   Symbol*  symbol  = constant_pool->symbol_at(index);
1193 
1194   // class name?
1195   if (symbol->byte_at(0) != '(') {
1196     Handle protection_domain (THREAD, Klass::cast(constant_pool->pool_holder())->protection_domain());
1197     SystemDictionary::resolve_or_null(symbol, class_loader, protection_domain, CHECK);
1198     return;
1199   }
1200 
1201   // then it must be a signature!
1202   ResourceMark rm(THREAD);
1203   for (SignatureStream ss(symbol); !ss.is_done(); ss.next()) {
1204     if (ss.is_object()) {
1205       Symbol* class_name = ss.as_symbol(CHECK);
1206       Handle protection_domain (THREAD, Klass::cast(constant_pool->pool_holder())->protection_domain());
1207       SystemDictionary::resolve_or_null(class_name, class_loader, protection_domain, CHECK);
1208     }
1209   }
1210 }
1211 
1212 
1213 void Deoptimization::load_class_by_index(constantPoolHandle constant_pool, int index) {
1214   EXCEPTION_MARK;
1215   load_class_by_index(constant_pool, index, THREAD);
1216   if (HAS_PENDING_EXCEPTION) {
1217     // Exception happened during classloading. We ignore the exception here, since it
1218     // is going to be rethrown since the current activation is going to be deoptimzied and
1219     // the interpreter will re-execute the bytecode.
1220     CLEAR_PENDING_EXCEPTION;
1221   }
1222 }
1223 
1224 JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* thread, jint trap_request)) {
1225   HandleMark hm;
1226 
1227   // uncommon_trap() is called at the beginning of the uncommon trap
1228   // handler. Note this fact before we start generating temporary frames
1229   // that can confuse an asynchronous stack walker. This counter is
1230   // decremented at the end of unpack_frames().
1231   thread->inc_in_deopt_handler();
1232 
1233   // We need to update the map if we have biased locking.
1234   RegisterMap reg_map(thread, UseBiasedLocking);
1235   frame stub_frame = thread->last_frame();
1236   frame fr = stub_frame.sender(&reg_map);
1237   // Make sure the calling nmethod is not getting deoptimized and removed
1238   // before we are done with it.
1239   nmethodLocker nl(fr.pc());
1240 
1241   {
1242     ResourceMark rm;
1243 
1244     // Revoke biases of any monitors in the frame to ensure we can migrate them
1245     revoke_biases_of_monitors(thread, fr, &reg_map);
1246 
1247     DeoptReason reason = trap_request_reason(trap_request);
1248     DeoptAction action = trap_request_action(trap_request);
1249     jint unloaded_class_index = trap_request_index(trap_request); // CP idx or -1
1250 
1251     Events::log("Uncommon trap occurred @" INTPTR_FORMAT " unloaded_class_index = %d", fr.pc(), (int) trap_request);
1252     vframe*  vf  = vframe::new_vframe(&fr, &reg_map, thread);
1253     compiledVFrame* cvf = compiledVFrame::cast(vf);
1254 
1255     nmethod* nm = cvf->code();
1256 
1257     ScopeDesc*      trap_scope  = cvf->scope();
1258     methodHandle    trap_method = trap_scope->method();
1259     int             trap_bci    = trap_scope->bci();
1260     Bytecodes::Code trap_bc     = trap_method->java_code_at(trap_bci);
1261 
1262     // Record this event in the histogram.
1263     gather_statistics(reason, action, trap_bc);
1264 
1265     // Ensure that we can record deopt. history:
1266     bool create_if_missing = ProfileTraps;
1267 
1268     methodDataHandle trap_mdo
1269       (THREAD, get_method_data(thread, trap_method, create_if_missing));
1270 
1271     // Print a bunch of diagnostics, if requested.
1272     if (TraceDeoptimization || LogCompilation) {
1273       ResourceMark rm;
1274       ttyLocker ttyl;
1275       char buf[100];
1276       if (xtty != NULL) {
1277         xtty->begin_head("uncommon_trap thread='" UINTX_FORMAT"' %s",
1278                          os::current_thread_id(),
1279                          format_trap_request(buf, sizeof(buf), trap_request));
1280         nm->log_identity(xtty);
1281       }
1282       Symbol* class_name = NULL;
1283       bool unresolved = false;
1284       if (unloaded_class_index >= 0) {
1285         constantPoolHandle constants (THREAD, trap_method->constants());
1286         if (constants->tag_at(unloaded_class_index).is_unresolved_klass()) {
1287           class_name = constants->klass_name_at(unloaded_class_index);
1288           unresolved = true;
1289           if (xtty != NULL)
1290             xtty->print(" unresolved='1'");
1291         } else if (constants->tag_at(unloaded_class_index).is_symbol()) {
1292           class_name = constants->symbol_at(unloaded_class_index);
1293         }
1294         if (xtty != NULL)
1295           xtty->name(class_name);
1296       }
1297       if (xtty != NULL && trap_mdo.not_null()) {
1298         // Dump the relevant MDO state.
1299         // This is the deopt count for the current reason, any previous
1300         // reasons or recompiles seen at this point.
1301         int dcnt = trap_mdo->trap_count(reason);
1302         if (dcnt != 0)
1303           xtty->print(" count='%d'", dcnt);
1304         ProfileData* pdata = trap_mdo->bci_to_data(trap_bci);
1305         int dos = (pdata == NULL)? 0: pdata->trap_state();
1306         if (dos != 0) {
1307           xtty->print(" state='%s'", format_trap_state(buf, sizeof(buf), dos));
1308           if (trap_state_is_recompiled(dos)) {
1309             int recnt2 = trap_mdo->overflow_recompile_count();
1310             if (recnt2 != 0)
1311               xtty->print(" recompiles2='%d'", recnt2);
1312           }
1313         }
1314       }
1315       if (xtty != NULL) {
1316         xtty->stamp();
1317         xtty->end_head();
1318       }
1319       if (TraceDeoptimization) {  // make noise on the tty
1320         tty->print("Uncommon trap occurred in");
1321         nm->method()->print_short_name(tty);
1322         tty->print(" (@" INTPTR_FORMAT ") thread=%d reason=%s action=%s unloaded_class_index=%d",
1323                    fr.pc(),
1324                    (int) os::current_thread_id(),
1325                    trap_reason_name(reason),
1326                    trap_action_name(action),
1327                    unloaded_class_index);
1328         if (class_name != NULL) {
1329           tty->print(unresolved ? " unresolved class: " : " symbol: ");
1330           class_name->print_symbol_on(tty);
1331         }
1332         tty->cr();
1333       }
1334       if (xtty != NULL) {
1335         // Log the precise location of the trap.
1336         for (ScopeDesc* sd = trap_scope; ; sd = sd->sender()) {
1337           xtty->begin_elem("jvms bci='%d'", sd->bci());
1338           xtty->method(sd->method());
1339           xtty->end_elem();
1340           if (sd->is_top())  break;
1341         }
1342         xtty->tail("uncommon_trap");
1343       }
1344     }
1345     // (End diagnostic printout.)
1346 
1347     // Load class if necessary
1348     if (unloaded_class_index >= 0) {
1349       constantPoolHandle constants(THREAD, trap_method->constants());
1350       load_class_by_index(constants, unloaded_class_index);
1351     }
1352 
1353     // Flush the nmethod if necessary and desirable.
1354     //
1355     // We need to avoid situations where we are re-flushing the nmethod
1356     // because of a hot deoptimization site.  Repeated flushes at the same
1357     // point need to be detected by the compiler and avoided.  If the compiler
1358     // cannot avoid them (or has a bug and "refuses" to avoid them), this
1359     // module must take measures to avoid an infinite cycle of recompilation
1360     // and deoptimization.  There are several such measures:
1361     //
1362     //   1. If a recompilation is ordered a second time at some site X
1363     //   and for the same reason R, the action is adjusted to 'reinterpret',
1364     //   to give the interpreter time to exercise the method more thoroughly.
1365     //   If this happens, the method's overflow_recompile_count is incremented.
1366     //
1367     //   2. If the compiler fails to reduce the deoptimization rate, then
1368     //   the method's overflow_recompile_count will begin to exceed the set
1369     //   limit PerBytecodeRecompilationCutoff.  If this happens, the action
1370     //   is adjusted to 'make_not_compilable', and the method is abandoned
1371     //   to the interpreter.  This is a performance hit for hot methods,
1372     //   but is better than a disastrous infinite cycle of recompilations.
1373     //   (Actually, only the method containing the site X is abandoned.)
1374     //
1375     //   3. In parallel with the previous measures, if the total number of
1376     //   recompilations of a method exceeds the much larger set limit
1377     //   PerMethodRecompilationCutoff, the method is abandoned.
1378     //   This should only happen if the method is very large and has
1379     //   many "lukewarm" deoptimizations.  The code which enforces this
1380     //   limit is elsewhere (class nmethod, class methodOopDesc).
1381     //
1382     // Note that the per-BCI 'is_recompiled' bit gives the compiler one chance
1383     // to recompile at each bytecode independently of the per-BCI cutoff.
1384     //
1385     // The decision to update code is up to the compiler, and is encoded
1386     // in the Action_xxx code.  If the compiler requests Action_none
1387     // no trap state is changed, no compiled code is changed, and the
1388     // computation suffers along in the interpreter.
1389     //
1390     // The other action codes specify various tactics for decompilation
1391     // and recompilation.  Action_maybe_recompile is the loosest, and
1392     // allows the compiled code to stay around until enough traps are seen,
1393     // and until the compiler gets around to recompiling the trapping method.
1394     //
1395     // The other actions cause immediate removal of the present code.
1396 
1397     bool update_trap_state = true;
1398     bool make_not_entrant = false;
1399     bool make_not_compilable = false;
1400     bool reprofile = false;
1401     switch (action) {
1402     case Action_none:
1403       // Keep the old code.
1404       update_trap_state = false;
1405       break;
1406     case Action_maybe_recompile:
1407       // Do not need to invalidate the present code, but we can
1408       // initiate another
1409       // Start compiler without (necessarily) invalidating the nmethod.
1410       // The system will tolerate the old code, but new code should be
1411       // generated when possible.
1412       break;
1413     case Action_reinterpret:
1414       // Go back into the interpreter for a while, and then consider
1415       // recompiling form scratch.
1416       make_not_entrant = true;
1417       // Reset invocation counter for outer most method.
1418       // This will allow the interpreter to exercise the bytecodes
1419       // for a while before recompiling.
1420       // By contrast, Action_make_not_entrant is immediate.
1421       //
1422       // Note that the compiler will track null_check, null_assert,
1423       // range_check, and class_check events and log them as if they
1424       // had been traps taken from compiled code.  This will update
1425       // the MDO trap history so that the next compilation will
1426       // properly detect hot trap sites.
1427       reprofile = true;
1428       break;
1429     case Action_make_not_entrant:
1430       // Request immediate recompilation, and get rid of the old code.
1431       // Make them not entrant, so next time they are called they get
1432       // recompiled.  Unloaded classes are loaded now so recompile before next
1433       // time they are called.  Same for uninitialized.  The interpreter will
1434       // link the missing class, if any.
1435       make_not_entrant = true;
1436       break;
1437     case Action_make_not_compilable:
1438       // Give up on compiling this method at all.
1439       make_not_entrant = true;
1440       make_not_compilable = true;
1441       break;
1442     default:
1443       ShouldNotReachHere();
1444     }
1445 
1446     // Setting +ProfileTraps fixes the following, on all platforms:
1447     // 4852688: ProfileInterpreter is off by default for ia64.  The result is
1448     // infinite heroic-opt-uncommon-trap/deopt/recompile cycles, since the
1449     // recompile relies on a methodDataOop to record heroic opt failures.
1450 
1451     // Whether the interpreter is producing MDO data or not, we also need
1452     // to use the MDO to detect hot deoptimization points and control
1453     // aggressive optimization.
1454     bool inc_recompile_count = false;
1455     ProfileData* pdata = NULL;
1456     if (ProfileTraps && update_trap_state && trap_mdo.not_null()) {
1457       assert(trap_mdo() == get_method_data(thread, trap_method, false), "sanity");
1458       uint this_trap_count = 0;
1459       bool maybe_prior_trap = false;
1460       bool maybe_prior_recompile = false;
1461       pdata = query_update_method_data(trap_mdo, trap_bci, reason,
1462                                    //outputs:
1463                                    this_trap_count,
1464                                    maybe_prior_trap,
1465                                    maybe_prior_recompile);
1466       // Because the interpreter also counts null, div0, range, and class
1467       // checks, these traps from compiled code are double-counted.
1468       // This is harmless; it just means that the PerXTrapLimit values
1469       // are in effect a little smaller than they look.
1470 
1471       DeoptReason per_bc_reason = reason_recorded_per_bytecode_if_any(reason);
1472       if (per_bc_reason != Reason_none) {
1473         // Now take action based on the partially known per-BCI history.
1474         if (maybe_prior_trap
1475             && this_trap_count >= (uint)PerBytecodeTrapLimit) {
1476           // If there are too many traps at this BCI, force a recompile.
1477           // This will allow the compiler to see the limit overflow, and
1478           // take corrective action, if possible.  The compiler generally
1479           // does not use the exact PerBytecodeTrapLimit value, but instead
1480           // changes its tactics if it sees any traps at all.  This provides
1481           // a little hysteresis, delaying a recompile until a trap happens
1482           // several times.
1483           //
1484           // Actually, since there is only one bit of counter per BCI,
1485           // the possible per-BCI counts are {0,1,(per-method count)}.
1486           // This produces accurate results if in fact there is only
1487           // one hot trap site, but begins to get fuzzy if there are
1488           // many sites.  For example, if there are ten sites each
1489           // trapping two or more times, they each get the blame for
1490           // all of their traps.
1491           make_not_entrant = true;
1492         }
1493 
1494         // Detect repeated recompilation at the same BCI, and enforce a limit.
1495         if (make_not_entrant && maybe_prior_recompile) {
1496           // More than one recompile at this point.
1497           inc_recompile_count = maybe_prior_trap;
1498         }
1499       } else {
1500         // For reasons which are not recorded per-bytecode, we simply
1501         // force recompiles unconditionally.
1502         // (Note that PerMethodRecompilationCutoff is enforced elsewhere.)
1503         make_not_entrant = true;
1504       }
1505 
1506       // Go back to the compiler if there are too many traps in this method.
1507       if (this_trap_count >= (uint)PerMethodTrapLimit) {
1508         // If there are too many traps in this method, force a recompile.
1509         // This will allow the compiler to see the limit overflow, and
1510         // take corrective action, if possible.
1511         // (This condition is an unlikely backstop only, because the
1512         // PerBytecodeTrapLimit is more likely to take effect first,
1513         // if it is applicable.)
1514         make_not_entrant = true;
1515       }
1516 
1517       // Here's more hysteresis:  If there has been a recompile at
1518       // this trap point already, run the method in the interpreter
1519       // for a while to exercise it more thoroughly.
1520       if (make_not_entrant && maybe_prior_recompile && maybe_prior_trap) {
1521         reprofile = true;
1522       }
1523 
1524     }
1525 
1526     // Take requested actions on the method:
1527 
1528     // Recompile
1529     if (make_not_entrant) {
1530       if (!nm->make_not_entrant()) {
1531         return; // the call did not change nmethod's state
1532       }
1533 
1534       if (pdata != NULL) {
1535         // Record the recompilation event, if any.
1536         int tstate0 = pdata->trap_state();
1537         int tstate1 = trap_state_set_recompiled(tstate0, true);
1538         if (tstate1 != tstate0)
1539           pdata->set_trap_state(tstate1);
1540       }
1541     }
1542 
1543     if (inc_recompile_count) {
1544       trap_mdo->inc_overflow_recompile_count();
1545       if ((uint)trap_mdo->overflow_recompile_count() >
1546           (uint)PerBytecodeRecompilationCutoff) {
1547         // Give up on the method containing the bad BCI.
1548         if (trap_method() == nm->method()) {
1549           make_not_compilable = true;
1550         } else {
1551           trap_method->set_not_compilable(CompLevel_full_optimization);
1552           // But give grace to the enclosing nm->method().
1553         }
1554       }
1555     }
1556 
1557     // Reprofile
1558     if (reprofile) {
1559       CompilationPolicy::policy()->reprofile(trap_scope, nm->is_osr_method());
1560     }
1561 
1562     // Give up compiling
1563     if (make_not_compilable && !nm->method()->is_not_compilable(CompLevel_full_optimization)) {
1564       assert(make_not_entrant, "consistent");
1565       nm->method()->set_not_compilable(CompLevel_full_optimization);
1566     }
1567 
1568   } // Free marked resources
1569 
1570 }
1571 JRT_END
1572 
1573 methodDataOop
1574 Deoptimization::get_method_data(JavaThread* thread, methodHandle m,
1575                                 bool create_if_missing) {
1576   Thread* THREAD = thread;
1577   methodDataOop mdo = m()->method_data();
1578   if (mdo == NULL && create_if_missing && !HAS_PENDING_EXCEPTION) {
1579     // Build an MDO.  Ignore errors like OutOfMemory;
1580     // that simply means we won't have an MDO to update.
1581     methodOopDesc::build_interpreter_method_data(m, THREAD);
1582     if (HAS_PENDING_EXCEPTION) {
1583       assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here");
1584       CLEAR_PENDING_EXCEPTION;
1585     }
1586     mdo = m()->method_data();
1587   }
1588   return mdo;
1589 }
1590 
1591 ProfileData*
1592 Deoptimization::query_update_method_data(methodDataHandle trap_mdo,
1593                                          int trap_bci,
1594                                          Deoptimization::DeoptReason reason,
1595                                          //outputs:
1596                                          uint& ret_this_trap_count,
1597                                          bool& ret_maybe_prior_trap,
1598                                          bool& ret_maybe_prior_recompile) {
1599   uint prior_trap_count = trap_mdo->trap_count(reason);
1600   uint this_trap_count  = trap_mdo->inc_trap_count(reason);
1601 
1602   // If the runtime cannot find a place to store trap history,
1603   // it is estimated based on the general condition of the method.
1604   // If the method has ever been recompiled, or has ever incurred
1605   // a trap with the present reason , then this BCI is assumed
1606   // (pessimistically) to be the culprit.
1607   bool maybe_prior_trap      = (prior_trap_count != 0);
1608   bool maybe_prior_recompile = (trap_mdo->decompile_count() != 0);
1609   ProfileData* pdata = NULL;
1610 
1611 
1612   // For reasons which are recorded per bytecode, we check per-BCI data.
1613   DeoptReason per_bc_reason = reason_recorded_per_bytecode_if_any(reason);
1614   if (per_bc_reason != Reason_none) {
1615     // Find the profile data for this BCI.  If there isn't one,
1616     // try to allocate one from the MDO's set of spares.
1617     // This will let us detect a repeated trap at this point.
1618     pdata = trap_mdo->allocate_bci_to_data(trap_bci);
1619 
1620     if (pdata != NULL) {
1621       // Query the trap state of this profile datum.
1622       int tstate0 = pdata->trap_state();
1623       if (!trap_state_has_reason(tstate0, per_bc_reason))
1624         maybe_prior_trap = false;
1625       if (!trap_state_is_recompiled(tstate0))
1626         maybe_prior_recompile = false;
1627 
1628       // Update the trap state of this profile datum.
1629       int tstate1 = tstate0;
1630       // Record the reason.
1631       tstate1 = trap_state_add_reason(tstate1, per_bc_reason);
1632       // Store the updated state on the MDO, for next time.
1633       if (tstate1 != tstate0)
1634         pdata->set_trap_state(tstate1);
1635     } else {
1636       if (LogCompilation && xtty != NULL) {
1637         ttyLocker ttyl;
1638         // Missing MDP?  Leave a small complaint in the log.
1639         xtty->elem("missing_mdp bci='%d'", trap_bci);
1640       }
1641     }
1642   }
1643 
1644   // Return results:
1645   ret_this_trap_count = this_trap_count;
1646   ret_maybe_prior_trap = maybe_prior_trap;
1647   ret_maybe_prior_recompile = maybe_prior_recompile;
1648   return pdata;
1649 }
1650 
1651 void
1652 Deoptimization::update_method_data_from_interpreter(methodDataHandle trap_mdo, int trap_bci, int reason) {
1653   ResourceMark rm;
1654   // Ignored outputs:
1655   uint ignore_this_trap_count;
1656   bool ignore_maybe_prior_trap;
1657   bool ignore_maybe_prior_recompile;
1658   query_update_method_data(trap_mdo, trap_bci,
1659                            (DeoptReason)reason,
1660                            ignore_this_trap_count,
1661                            ignore_maybe_prior_trap,
1662                            ignore_maybe_prior_recompile);
1663 }
1664 
1665 Deoptimization::UnrollBlock* Deoptimization::uncommon_trap(JavaThread* thread, jint trap_request) {
1666 
1667   // Still in Java no safepoints
1668   {
1669     // This enters VM and may safepoint
1670     uncommon_trap_inner(thread, trap_request);
1671   }
1672   return fetch_unroll_info_helper(thread);
1673 }
1674 
1675 // Local derived constants.
1676 // Further breakdown of DataLayout::trap_state, as promised by DataLayout.
1677 const int DS_REASON_MASK   = DataLayout::trap_mask >> 1;
1678 const int DS_RECOMPILE_BIT = DataLayout::trap_mask - DS_REASON_MASK;
1679 
1680 //---------------------------trap_state_reason---------------------------------
1681 Deoptimization::DeoptReason
1682 Deoptimization::trap_state_reason(int trap_state) {
1683   // This assert provides the link between the width of DataLayout::trap_bits
1684   // and the encoding of "recorded" reasons.  It ensures there are enough
1685   // bits to store all needed reasons in the per-BCI MDO profile.
1686   assert(DS_REASON_MASK >= Reason_RECORDED_LIMIT, "enough bits");
1687   int recompile_bit = (trap_state & DS_RECOMPILE_BIT);
1688   trap_state -= recompile_bit;
1689   if (trap_state == DS_REASON_MASK) {
1690     return Reason_many;
1691   } else {
1692     assert((int)Reason_none == 0, "state=0 => Reason_none");
1693     return (DeoptReason)trap_state;
1694   }
1695 }
1696 //-------------------------trap_state_has_reason-------------------------------
1697 int Deoptimization::trap_state_has_reason(int trap_state, int reason) {
1698   assert(reason_is_recorded_per_bytecode((DeoptReason)reason), "valid reason");
1699   assert(DS_REASON_MASK >= Reason_RECORDED_LIMIT, "enough bits");
1700   int recompile_bit = (trap_state & DS_RECOMPILE_BIT);
1701   trap_state -= recompile_bit;
1702   if (trap_state == DS_REASON_MASK) {
1703     return -1;  // true, unspecifically (bottom of state lattice)
1704   } else if (trap_state == reason) {
1705     return 1;   // true, definitely
1706   } else if (trap_state == 0) {
1707     return 0;   // false, definitely (top of state lattice)
1708   } else {
1709     return 0;   // false, definitely
1710   }
1711 }
1712 //-------------------------trap_state_add_reason-------------------------------
1713 int Deoptimization::trap_state_add_reason(int trap_state, int reason) {
1714   assert(reason_is_recorded_per_bytecode((DeoptReason)reason) || reason == Reason_many, "valid reason");
1715   int recompile_bit = (trap_state & DS_RECOMPILE_BIT);
1716   trap_state -= recompile_bit;
1717   if (trap_state == DS_REASON_MASK) {
1718     return trap_state + recompile_bit;     // already at state lattice bottom
1719   } else if (trap_state == reason) {
1720     return trap_state + recompile_bit;     // the condition is already true
1721   } else if (trap_state == 0) {
1722     return reason + recompile_bit;          // no condition has yet been true
1723   } else {
1724     return DS_REASON_MASK + recompile_bit;  // fall to state lattice bottom
1725   }
1726 }
1727 //-----------------------trap_state_is_recompiled------------------------------
1728 bool Deoptimization::trap_state_is_recompiled(int trap_state) {
1729   return (trap_state & DS_RECOMPILE_BIT) != 0;
1730 }
1731 //-----------------------trap_state_set_recompiled-----------------------------
1732 int Deoptimization::trap_state_set_recompiled(int trap_state, bool z) {
1733   if (z)  return trap_state |  DS_RECOMPILE_BIT;
1734   else    return trap_state & ~DS_RECOMPILE_BIT;
1735 }
1736 //---------------------------format_trap_state---------------------------------
1737 // This is used for debugging and diagnostics, including hotspot.log output.
1738 const char* Deoptimization::format_trap_state(char* buf, size_t buflen,
1739                                               int trap_state) {
1740   DeoptReason reason      = trap_state_reason(trap_state);
1741   bool        recomp_flag = trap_state_is_recompiled(trap_state);
1742   // Re-encode the state from its decoded components.
1743   int decoded_state = 0;
1744   if (reason_is_recorded_per_bytecode(reason) || reason == Reason_many)
1745     decoded_state = trap_state_add_reason(decoded_state, reason);
1746   if (recomp_flag)
1747     decoded_state = trap_state_set_recompiled(decoded_state, recomp_flag);
1748   // If the state re-encodes properly, format it symbolically.
1749   // Because this routine is used for debugging and diagnostics,
1750   // be robust even if the state is a strange value.
1751   size_t len;
1752   if (decoded_state != trap_state) {
1753     // Random buggy state that doesn't decode??
1754     len = jio_snprintf(buf, buflen, "#%d", trap_state);
1755   } else {
1756     len = jio_snprintf(buf, buflen, "%s%s",
1757                        trap_reason_name(reason),
1758                        recomp_flag ? " recompiled" : "");
1759   }
1760   if (len >= buflen)
1761     buf[buflen-1] = '\0';
1762   return buf;
1763 }
1764 
1765 
1766 //--------------------------------statics--------------------------------------
1767 Deoptimization::DeoptAction Deoptimization::_unloaded_action
1768   = Deoptimization::Action_reinterpret;
1769 const char* Deoptimization::_trap_reason_name[Reason_LIMIT] = {
1770   // Note:  Keep this in sync. with enum DeoptReason.
1771   "none",
1772   "null_check",
1773   "null_assert",
1774   "range_check",
1775   "class_check",
1776   "array_check",
1777   "intrinsic",
1778   "bimorphic",
1779   "unloaded",
1780   "uninitialized",
1781   "unreached",
1782   "unhandled",
1783   "constraint",
1784   "div0_check",
1785   "age",
1786   "predicate",
1787   "loop_limit_check"
1788 };
1789 const char* Deoptimization::_trap_action_name[Action_LIMIT] = {
1790   // Note:  Keep this in sync. with enum DeoptAction.
1791   "none",
1792   "maybe_recompile",
1793   "reinterpret",
1794   "make_not_entrant",
1795   "make_not_compilable"
1796 };
1797 
1798 const char* Deoptimization::trap_reason_name(int reason) {
1799   if (reason == Reason_many)  return "many";
1800   if ((uint)reason < Reason_LIMIT)
1801     return _trap_reason_name[reason];
1802   static char buf[20];
1803   sprintf(buf, "reason%d", reason);
1804   return buf;
1805 }
1806 const char* Deoptimization::trap_action_name(int action) {
1807   if ((uint)action < Action_LIMIT)
1808     return _trap_action_name[action];
1809   static char buf[20];
1810   sprintf(buf, "action%d", action);
1811   return buf;
1812 }
1813 
1814 // This is used for debugging and diagnostics, including hotspot.log output.
1815 const char* Deoptimization::format_trap_request(char* buf, size_t buflen,
1816                                                 int trap_request) {
1817   jint unloaded_class_index = trap_request_index(trap_request);
1818   const char* reason = trap_reason_name(trap_request_reason(trap_request));
1819   const char* action = trap_action_name(trap_request_action(trap_request));
1820   size_t len;
1821   if (unloaded_class_index < 0) {
1822     len = jio_snprintf(buf, buflen, "reason='%s' action='%s'",
1823                        reason, action);
1824   } else {
1825     len = jio_snprintf(buf, buflen, "reason='%s' action='%s' index='%d'",
1826                        reason, action, unloaded_class_index);
1827   }
1828   if (len >= buflen)
1829     buf[buflen-1] = '\0';
1830   return buf;
1831 }
1832 
1833 juint Deoptimization::_deoptimization_hist
1834         [Deoptimization::Reason_LIMIT]
1835     [1 + Deoptimization::Action_LIMIT]
1836         [Deoptimization::BC_CASE_LIMIT]
1837   = {0};
1838 
1839 enum {
1840   LSB_BITS = 8,
1841   LSB_MASK = right_n_bits(LSB_BITS)
1842 };
1843 
1844 void Deoptimization::gather_statistics(DeoptReason reason, DeoptAction action,
1845                                        Bytecodes::Code bc) {
1846   assert(reason >= 0 && reason < Reason_LIMIT, "oob");
1847   assert(action >= 0 && action < Action_LIMIT, "oob");
1848   _deoptimization_hist[Reason_none][0][0] += 1;  // total
1849   _deoptimization_hist[reason][0][0]      += 1;  // per-reason total
1850   juint* cases = _deoptimization_hist[reason][1+action];
1851   juint* bc_counter_addr = NULL;
1852   juint  bc_counter      = 0;
1853   // Look for an unused counter, or an exact match to this BC.
1854   if (bc != Bytecodes::_illegal) {
1855     for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) {
1856       juint* counter_addr = &cases[bc_case];
1857       juint  counter = *counter_addr;
1858       if ((counter == 0 && bc_counter_addr == NULL)
1859           || (Bytecodes::Code)(counter & LSB_MASK) == bc) {
1860         // this counter is either free or is already devoted to this BC
1861         bc_counter_addr = counter_addr;
1862         bc_counter = counter | bc;
1863       }
1864     }
1865   }
1866   if (bc_counter_addr == NULL) {
1867     // Overflow, or no given bytecode.
1868     bc_counter_addr = &cases[BC_CASE_LIMIT-1];
1869     bc_counter = (*bc_counter_addr & ~LSB_MASK);  // clear LSB
1870   }
1871   *bc_counter_addr = bc_counter + (1 << LSB_BITS);
1872 }
1873 
1874 jint Deoptimization::total_deoptimization_count() {
1875   return _deoptimization_hist[Reason_none][0][0];
1876 }
1877 
1878 jint Deoptimization::deoptimization_count(DeoptReason reason) {
1879   assert(reason >= 0 && reason < Reason_LIMIT, "oob");
1880   return _deoptimization_hist[reason][0][0];
1881 }
1882 
1883 void Deoptimization::print_statistics() {
1884   juint total = total_deoptimization_count();
1885   juint account = total;
1886   if (total != 0) {
1887     ttyLocker ttyl;
1888     if (xtty != NULL)  xtty->head("statistics type='deoptimization'");
1889     tty->print_cr("Deoptimization traps recorded:");
1890     #define PRINT_STAT_LINE(name, r) \
1891       tty->print_cr("  %4d (%4.1f%%) %s", (int)(r), ((r) * 100.0) / total, name);
1892     PRINT_STAT_LINE("total", total);
1893     // For each non-zero entry in the histogram, print the reason,
1894     // the action, and (if specifically known) the type of bytecode.
1895     for (int reason = 0; reason < Reason_LIMIT; reason++) {
1896       for (int action = 0; action < Action_LIMIT; action++) {
1897         juint* cases = _deoptimization_hist[reason][1+action];
1898         for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) {
1899           juint counter = cases[bc_case];
1900           if (counter != 0) {
1901             char name[1*K];
1902             Bytecodes::Code bc = (Bytecodes::Code)(counter & LSB_MASK);
1903             if (bc_case == BC_CASE_LIMIT && (int)bc == 0)
1904               bc = Bytecodes::_illegal;
1905             sprintf(name, "%s/%s/%s",
1906                     trap_reason_name(reason),
1907                     trap_action_name(action),
1908                     Bytecodes::is_defined(bc)? Bytecodes::name(bc): "other");
1909             juint r = counter >> LSB_BITS;
1910             tty->print_cr("  %40s: " UINT32_FORMAT " (%.1f%%)", name, r, (r * 100.0) / total);
1911             account -= r;
1912           }
1913         }
1914       }
1915     }
1916     if (account != 0) {
1917       PRINT_STAT_LINE("unaccounted", account);
1918     }
1919     #undef PRINT_STAT_LINE
1920     if (xtty != NULL)  xtty->tail("statistics");
1921   }
1922 }
1923 #else // COMPILER2 || SHARK
1924 
1925 
1926 // Stubs for C1 only system.
1927 bool Deoptimization::trap_state_is_recompiled(int trap_state) {
1928   return false;
1929 }
1930 
1931 const char* Deoptimization::trap_reason_name(int reason) {
1932   return "unknown";
1933 }
1934 
1935 void Deoptimization::print_statistics() {
1936   // no output
1937 }
1938 
1939 void
1940 Deoptimization::update_method_data_from_interpreter(methodDataHandle trap_mdo, int trap_bci, int reason) {
1941   // no udpate
1942 }
1943 
1944 int Deoptimization::trap_state_has_reason(int trap_state, int reason) {
1945   return 0;
1946 }
1947 
1948 void Deoptimization::gather_statistics(DeoptReason reason, DeoptAction action,
1949                                        Bytecodes::Code bc) {
1950   // no update
1951 }
1952 
1953 const char* Deoptimization::format_trap_state(char* buf, size_t buflen,
1954                                               int trap_state) {
1955   jio_snprintf(buf, buflen, "#%d", trap_state);
1956   return buf;
1957 }
1958 
1959 #endif // COMPILER2 || SHARK