< prev index next >

src/share/vm/runtime/deoptimization.cpp

Print this page




  51 #include "runtime/vframe.hpp"
  52 #include "runtime/vframeArray.hpp"
  53 #include "runtime/vframe_hp.hpp"
  54 #include "utilities/events.hpp"
  55 #include "utilities/xmlstream.hpp"
  56 
  57 #if INCLUDE_JVMCI
  58 #include "jvmci/jvmciRuntime.hpp"
  59 #include "jvmci/jvmciJavaClasses.hpp"
  60 #endif
  61 
  62 
  63 bool DeoptimizationMarker::_is_active = false;
  64 
  65 Deoptimization::UnrollBlock::UnrollBlock(int  size_of_deoptimized_frame,
  66                                          int  caller_adjustment,
  67                                          int  caller_actual_parameters,
  68                                          int  number_of_frames,
  69                                          intptr_t* frame_sizes,
  70                                          address* frame_pcs,
  71                                          BasicType return_type) {

  72   _size_of_deoptimized_frame = size_of_deoptimized_frame;
  73   _caller_adjustment         = caller_adjustment;
  74   _caller_actual_parameters  = caller_actual_parameters;
  75   _number_of_frames          = number_of_frames;
  76   _frame_sizes               = frame_sizes;
  77   _frame_pcs                 = frame_pcs;
  78   _register_block            = NEW_C_HEAP_ARRAY(intptr_t, RegisterMap::reg_count * 2, mtCompiler);
  79   _return_type               = return_type;
  80   _initial_info              = 0;
  81   // PD (x86 only)
  82   _counter_temp              = 0;
  83   _unpack_kind               = 0;
  84   _sender_sp_temp            = 0;
  85 
  86   _total_frame_sizes         = size_of_frames();

  87 }
  88 
  89 
  90 Deoptimization::UnrollBlock::~UnrollBlock() {
  91   FREE_C_HEAP_ARRAY(intptr_t, _frame_sizes);
  92   FREE_C_HEAP_ARRAY(intptr_t, _frame_pcs);
  93   FREE_C_HEAP_ARRAY(intptr_t, _register_block);
  94 }
  95 
  96 
  97 intptr_t* Deoptimization::UnrollBlock::value_addr_at(int register_number) const {
  98   assert(register_number < RegisterMap::reg_count, "checking register number");
  99   return &_register_block[register_number * 2];
 100 }
 101 
 102 
 103 
 104 int Deoptimization::UnrollBlock::size_of_frames() const {
 105   // Acount first for the adjustment of the initial frame
 106   int result = _caller_adjustment;


 111 }
 112 
 113 
 114 void Deoptimization::UnrollBlock::print() {
 115   ttyLocker ttyl;
 116   tty->print_cr("UnrollBlock");
 117   tty->print_cr("  size_of_deoptimized_frame = %d", _size_of_deoptimized_frame);
 118   tty->print(   "  frame_sizes: ");
 119   for (int index = 0; index < number_of_frames(); index++) {
 120     tty->print(INTX_FORMAT " ", frame_sizes()[index]);
 121   }
 122   tty->cr();
 123 }
 124 
 125 
 126 // In order to make fetch_unroll_info work properly with escape
 127 // analysis, The method was changed from JRT_LEAF to JRT_BLOCK_ENTRY and
 128 // ResetNoHandleMark and HandleMark were removed from it. The actual reallocation
 129 // of previously eliminated objects occurs in realloc_objects, which is
 130 // called from the method fetch_unroll_info_helper below.
 131 JRT_BLOCK_ENTRY(Deoptimization::UnrollBlock*, Deoptimization::fetch_unroll_info(JavaThread* thread))
 132   // It is actually ok to allocate handles in a leaf method. It causes no safepoints,
 133   // but makes the entry a little slower. There is however a little dance we have to
 134   // do in debug mode to get around the NoHandleMark code in the JRT_LEAF macro
 135 
 136   // fetch_unroll_info() is called at the beginning of the deoptimization
 137   // handler. Note this fact before we start generating temporary frames
 138   // that can confuse an asynchronous stack walker. This counter is
 139   // decremented at the end of unpack_frames().
 140   if (TraceDeoptimization) {
 141     tty->print_cr("Deoptimizing thread " INTPTR_FORMAT, p2i(thread));
 142   }
 143   thread->inc_in_deopt_handler();
 144 
 145   return fetch_unroll_info_helper(thread);
 146 JRT_END
 147 
 148 
 149 // This is factored, since it is both called from a JRT_LEAF (deoptimization) and a JRT_ENTRY (uncommon_trap)
 150 Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread* thread) {
 151 
 152   // Note: there is a safepoint safety issue here. No matter whether we enter
 153   // via vanilla deopt or uncommon trap we MUST NOT stop at a safepoint once
 154   // the vframeArray is created.
 155   //
 156 
 157   // Allocate our special deoptimization ResourceMark
 158   DeoptResourceMark* dmark = new DeoptResourceMark(thread);
 159   assert(thread->deopt_mark() == NULL, "Pending deopt!");
 160   thread->set_deopt_mark(dmark);
 161 
 162   frame stub_frame = thread->last_frame(); // Makes stack walkable as side effect
 163   RegisterMap map(thread, true);
 164   RegisterMap dummy_map(thread, false);
 165   // Now get the deoptee with a valid map
 166   frame deoptee = stub_frame.sender(&map);
 167   // Set the deoptee nmethod
 168   assert(thread->deopt_nmethod() == NULL, "Pending deopt!");
 169   thread->set_deopt_nmethod(deoptee.cb()->as_nmethod_or_null());
 170   bool skip_internal = thread->deopt_nmethod() != NULL && !thread->deopt_nmethod()->compiler()->is_jvmci();
 171 
 172   if (VerifyStack) {
 173     thread->validate_frame_layout();
 174   }
 175 
 176   // Create a growable array of VFrames where each VFrame represents an inlined
 177   // Java frame.  This storage is allocated with the usual system arena.
 178   assert(deoptee.is_compiled_frame(), "Wrong frame type");
 179   GrowableArray<compiledVFrame*>* chunk = new GrowableArray<compiledVFrame*>(10);
 180   vframe* vf = vframe::new_vframe(&deoptee, &map, thread);
 181   while (!vf->is_top()) {
 182     assert(vf->is_compiled_frame(), "Wrong frame type");
 183     chunk->push(compiledVFrame::cast(vf));
 184     vf = vf->sender();
 185   }
 186   assert(vf->is_compiled_frame(), "Wrong frame type");
 187   chunk->push(compiledVFrame::cast(vf));
 188 













 189   bool realloc_failures = false;
 190 
 191 #if defined(COMPILER2) || INCLUDE_JVMCI
 192   // Reallocate the non-escaping objects and restore their fields. Then
 193   // relock objects if synchronization on them was eliminated.
 194 #ifndef INCLUDE_JVMCI
 195   if (DoEscapeAnalysis || EliminateNestedLocks) {
 196     if (EliminateAllocations) {
 197 #endif // INCLUDE_JVMCI
 198       assert (chunk->at(0)->scope() != NULL,"expect only compiled java frames");
 199       GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects();
 200 
 201       // The flag return_oop() indicates call sites which return oop
 202       // in compiled code. Such sites include java method calls,
 203       // runtime calls (for example, used to allocate new objects/arrays
 204       // on slow code path) and any other calls generated in compiled code.
 205       // It is not guaranteed that we can get such information here only
 206       // by analyzing bytecode in deoptimized frames. This is why this flag
 207       // is set during method compilation (see Compile::Process_OopMap_Node()).
 208       // If the previous frame was popped, we don't have a result.


 457 
 458   if (deopt_sender.is_compiled_frame() || caller_was_method_handle) {
 459     caller_adjustment = last_frame_adjust(0, callee_locals);
 460   } else if (callee_locals > callee_parameters) {
 461     // The caller frame may need extending to accommodate
 462     // non-parameter locals of the first unpacked interpreted frame.
 463     // Compute that adjustment.
 464     caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
 465   }
 466 
 467   // If the sender is deoptimized the we must retrieve the address of the handler
 468   // since the frame will "magically" show the original pc before the deopt
 469   // and we'd undo the deopt.
 470 
 471   frame_pcs[0] = deopt_sender.raw_pc();
 472 
 473 #ifndef SHARK
 474   assert(CodeCache::find_blob_unsafe(frame_pcs[0]) != NULL, "bad pc");
 475 #endif // SHARK
 476 







 477   UnrollBlock* info = new UnrollBlock(array->frame_size() * BytesPerWord,
 478                                       caller_adjustment * BytesPerWord,
 479                                       caller_was_method_handle ? 0 : callee_parameters,
 480                                       number_of_frames,
 481                                       frame_sizes,
 482                                       frame_pcs,
 483                                       return_type);

 484   // On some platforms, we need a way to pass some platform dependent
 485   // information to the unpacking code so the skeletal frames come out
 486   // correct (initial fp value, unextended sp, ...)
 487   info->set_initial_info((intptr_t) array->sender().initial_deoptimization_info());
 488 
 489   if (array->frames() > 1) {
 490     if (VerifyStack && TraceDeoptimization) {
 491       ttyLocker ttyl;
 492       tty->print_cr("Deoptimizing method containing inlining");
 493     }
 494   }
 495 
 496   array->set_unroll_block(info);
 497   return info;
 498 }
 499 
 500 // Called to cleanup deoptimization data structures in normal case
 501 // after unpacking to stack and when stack overflow error occurs
 502 void Deoptimization::cleanup_deopt_info(JavaThread *thread,
 503                                         vframeArray *array) {


1478       } else {
1479         if (TraceDeoptimization) {
1480           tty->print_cr("No speculation");
1481         }
1482       }
1483     } else {
1484       assert(speculation == NULL, "There should not be a speculation for method compiled by non-JVMCI compilers");
1485     }
1486 
1487     if (trap_bci == SynchronizationEntryBCI) {
1488       trap_bci = 0;
1489       thread->set_pending_monitorenter(true);
1490     }
1491 
1492     if (reason == Deoptimization::Reason_transfer_to_interpreter) {
1493       thread->set_pending_transfer_to_interpreter(true);
1494     }
1495 #endif
1496 
1497     Bytecodes::Code trap_bc     = trap_method->java_code_at(trap_bci);
1498 
1499     if (trap_scope->rethrow_exception()) {
1500       if (PrintDeoptimizationDetails) {
1501         tty->print_cr("Exception to be rethrown in the interpreter for method %s::%s at bci %d", trap_method->method_holder()->name()->as_C_string(), trap_method->name()->as_C_string(), trap_bci);
1502       }
1503       GrowableArray<ScopeValue*>* expressions = trap_scope->expressions();
1504       guarantee(expressions != NULL, "must have exception to throw");
1505       ScopeValue* topOfStack = expressions->top();
1506       Handle topOfStackObj = StackValue::create_stack_value(&fr, &reg_map, topOfStack)->get_obj();
1507       THREAD->set_pending_exception(topOfStackObj(), NULL, 0);
1508     }
1509 
1510     // Record this event in the histogram.
1511     gather_statistics(reason, action, trap_bc);
1512 
1513     // Ensure that we can record deopt. history:
1514     // Need MDO to record RTM code generation state.
1515     bool create_if_missing = ProfileTraps || UseCodeAging RTM_OPT_ONLY( || UseRTMLocking );
1516 
1517     methodHandle profiled_method;
1518 #if INCLUDE_JVMCI
1519     if (nm->is_compiled_by_jvmci()) {
1520       profiled_method = nm->method();
1521     } else {
1522       profiled_method = trap_method;
1523     }
1524 #else
1525     profiled_method = trap_method;
1526 #endif
1527 
1528     MethodData* trap_mdo =
1529       get_method_data(thread, profiled_method, create_if_missing);


1968   ResourceMark rm;
1969   // Ignored outputs:
1970   uint ignore_this_trap_count;
1971   bool ignore_maybe_prior_trap;
1972   bool ignore_maybe_prior_recompile;
1973   assert(!reason_is_speculate(reason), "reason speculate only used by compiler");
1974   // JVMCI uses the total counts to determine if deoptimizations are happening too frequently -> do not adjust total counts
1975   bool update_total_counts = JVMCI_ONLY(false) NOT_JVMCI(true);
1976   query_update_method_data(trap_mdo, trap_bci,
1977                            (DeoptReason)reason,
1978                            update_total_counts,
1979 #if INCLUDE_JVMCI
1980                            false,
1981 #endif
1982                            NULL,
1983                            ignore_this_trap_count,
1984                            ignore_maybe_prior_trap,
1985                            ignore_maybe_prior_recompile);
1986 }
1987 
1988 Deoptimization::UnrollBlock* Deoptimization::uncommon_trap(JavaThread* thread, jint trap_request) {
1989   if (TraceDeoptimization) {
1990     tty->print("Uncommon trap ");
1991   }
1992   // Still in Java no safepoints
1993   {
1994     // This enters VM and may safepoint
1995     uncommon_trap_inner(thread, trap_request);
1996   }
1997   return fetch_unroll_info_helper(thread);
1998 }
1999 
2000 // Local derived constants.
2001 // Further breakdown of DataLayout::trap_state, as promised by DataLayout.
2002 const int DS_REASON_MASK   = DataLayout::trap_mask >> 1;
2003 const int DS_RECOMPILE_BIT = DataLayout::trap_mask - DS_REASON_MASK;
2004 
2005 //---------------------------trap_state_reason---------------------------------
2006 Deoptimization::DeoptReason
2007 Deoptimization::trap_state_reason(int trap_state) {
2008   // This assert provides the link between the width of DataLayout::trap_bits
2009   // and the encoding of "recorded" reasons.  It ensures there are enough
2010   // bits to store all needed reasons in the per-BCI MDO profile.
2011   assert(DS_REASON_MASK >= Reason_RECORDED_LIMIT, "enough bits");
2012   int recompile_bit = (trap_state & DS_RECOMPILE_BIT);
2013   trap_state -= recompile_bit;
2014   if (trap_state == DS_REASON_MASK) {
2015     return Reason_many;
2016   } else {
2017     assert((int)Reason_none == 0, "state=0 => Reason_none");




  51 #include "runtime/vframe.hpp"
  52 #include "runtime/vframeArray.hpp"
  53 #include "runtime/vframe_hp.hpp"
  54 #include "utilities/events.hpp"
  55 #include "utilities/xmlstream.hpp"
  56 
  57 #if INCLUDE_JVMCI
  58 #include "jvmci/jvmciRuntime.hpp"
  59 #include "jvmci/jvmciJavaClasses.hpp"
  60 #endif
  61 
  62 
  63 bool DeoptimizationMarker::_is_active = false;
  64 
  65 Deoptimization::UnrollBlock::UnrollBlock(int  size_of_deoptimized_frame,
  66                                          int  caller_adjustment,
  67                                          int  caller_actual_parameters,
  68                                          int  number_of_frames,
  69                                          intptr_t* frame_sizes,
  70                                          address* frame_pcs,
  71                                          BasicType return_type,
  72                                          int exec_mode) {
  73   _size_of_deoptimized_frame = size_of_deoptimized_frame;
  74   _caller_adjustment         = caller_adjustment;
  75   _caller_actual_parameters  = caller_actual_parameters;
  76   _number_of_frames          = number_of_frames;
  77   _frame_sizes               = frame_sizes;
  78   _frame_pcs                 = frame_pcs;
  79   _register_block            = NEW_C_HEAP_ARRAY(intptr_t, RegisterMap::reg_count * 2, mtCompiler);
  80   _return_type               = return_type;
  81   _initial_info              = 0;
  82   // PD (x86 only)
  83   _counter_temp              = 0;
  84   _unpack_kind               = exec_mode;
  85   _sender_sp_temp            = 0;
  86 
  87   _total_frame_sizes         = size_of_frames();
  88   assert(exec_mode >= 0 && exec_mode < Unpack_LIMIT, "Unexpected exec_mode");
  89 }
  90 
  91 
  92 Deoptimization::UnrollBlock::~UnrollBlock() {
  93   FREE_C_HEAP_ARRAY(intptr_t, _frame_sizes);
  94   FREE_C_HEAP_ARRAY(intptr_t, _frame_pcs);
  95   FREE_C_HEAP_ARRAY(intptr_t, _register_block);
  96 }
  97 
  98 
  99 intptr_t* Deoptimization::UnrollBlock::value_addr_at(int register_number) const {
 100   assert(register_number < RegisterMap::reg_count, "checking register number");
 101   return &_register_block[register_number * 2];
 102 }
 103 
 104 
 105 
 106 int Deoptimization::UnrollBlock::size_of_frames() const {
 107   // Acount first for the adjustment of the initial frame
 108   int result = _caller_adjustment;


 113 }
 114 
 115 
 116 void Deoptimization::UnrollBlock::print() {
 117   ttyLocker ttyl;
 118   tty->print_cr("UnrollBlock");
 119   tty->print_cr("  size_of_deoptimized_frame = %d", _size_of_deoptimized_frame);
 120   tty->print(   "  frame_sizes: ");
 121   for (int index = 0; index < number_of_frames(); index++) {
 122     tty->print(INTX_FORMAT " ", frame_sizes()[index]);
 123   }
 124   tty->cr();
 125 }
 126 
 127 
 128 // In order to make fetch_unroll_info work properly with escape
 129 // analysis, The method was changed from JRT_LEAF to JRT_BLOCK_ENTRY and
 130 // ResetNoHandleMark and HandleMark were removed from it. The actual reallocation
 131 // of previously eliminated objects occurs in realloc_objects, which is
 132 // called from the method fetch_unroll_info_helper below.
 133 JRT_BLOCK_ENTRY(Deoptimization::UnrollBlock*, Deoptimization::fetch_unroll_info(JavaThread* thread, int exec_mode))
 134   // It is actually ok to allocate handles in a leaf method. It causes no safepoints,
 135   // but makes the entry a little slower. There is however a little dance we have to
 136   // do in debug mode to get around the NoHandleMark code in the JRT_LEAF macro
 137 
 138   // fetch_unroll_info() is called at the beginning of the deoptimization
 139   // handler. Note this fact before we start generating temporary frames
 140   // that can confuse an asynchronous stack walker. This counter is
 141   // decremented at the end of unpack_frames().
 142   if (TraceDeoptimization) {
 143     tty->print_cr("Deoptimizing thread " INTPTR_FORMAT, p2i(thread));
 144   }
 145   thread->inc_in_deopt_handler();
 146 
 147   return fetch_unroll_info_helper(thread, exec_mode);
 148 JRT_END
 149 
 150 
 151 // This is factored, since it is both called from a JRT_LEAF (deoptimization) and a JRT_ENTRY (uncommon_trap)
 152 Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread* thread, int exec_mode) {
 153 
 154   // Note: there is a safepoint safety issue here. No matter whether we enter
 155   // via vanilla deopt or uncommon trap we MUST NOT stop at a safepoint once
 156   // the vframeArray is created.
 157   //
 158 
 159   // Allocate our special deoptimization ResourceMark
 160   DeoptResourceMark* dmark = new DeoptResourceMark(thread);
 161   assert(thread->deopt_mark() == NULL, "Pending deopt!");
 162   thread->set_deopt_mark(dmark);
 163 
 164   frame stub_frame = thread->last_frame(); // Makes stack walkable as side effect
 165   RegisterMap map(thread, true);
 166   RegisterMap dummy_map(thread, false);
 167   // Now get the deoptee with a valid map
 168   frame deoptee = stub_frame.sender(&map);
 169   // Set the deoptee nmethod
 170   assert(thread->deopt_nmethod() == NULL, "Pending deopt!");
 171   thread->set_deopt_nmethod(deoptee.cb()->as_nmethod_or_null());
 172   bool skip_internal = thread->deopt_nmethod() != NULL && !thread->deopt_nmethod()->compiler()->is_jvmci();
 173 
 174   if (VerifyStack) {
 175     thread->validate_frame_layout();
 176   }
 177 
 178   // Create a growable array of VFrames where each VFrame represents an inlined
 179   // Java frame.  This storage is allocated with the usual system arena.
 180   assert(deoptee.is_compiled_frame(), "Wrong frame type");
 181   GrowableArray<compiledVFrame*>* chunk = new GrowableArray<compiledVFrame*>(10);
 182   vframe* vf = vframe::new_vframe(&deoptee, &map, thread);
 183   while (!vf->is_top()) {
 184     assert(vf->is_compiled_frame(), "Wrong frame type");
 185     chunk->push(compiledVFrame::cast(vf));
 186     vf = vf->sender();
 187   }
 188   assert(vf->is_compiled_frame(), "Wrong frame type");
 189   chunk->push(compiledVFrame::cast(vf));
 190 
 191   ScopeDesc* trap_scope = chunk->at(0)->scope();
 192   Handle exceptionObject;
 193   if (trap_scope->rethrow_exception()) {
 194     if (PrintDeoptimizationDetails) {
 195       tty->print_cr("Exception to be rethrown in the interpreter for method %s::%s at bci %d", trap_scope->method()->method_holder()->name()->as_C_string(), trap_scope->method()->name()->as_C_string(), trap_scope->bci());
 196     }
 197     GrowableArray<ScopeValue*>* expressions = trap_scope->expressions();
 198     guarantee(expressions != NULL && expressions->length() > 0, "must have exception to throw");
 199     ScopeValue* topOfStack = expressions->top();
 200     exceptionObject = StackValue::create_stack_value(&deoptee, &map, topOfStack)->get_obj();
 201     assert(exceptionObject() != NULL, "exception oop can not be null");
 202   }
 203 
 204   bool realloc_failures = false;
 205 
 206 #if defined(COMPILER2) || INCLUDE_JVMCI
 207   // Reallocate the non-escaping objects and restore their fields. Then
 208   // relock objects if synchronization on them was eliminated.
 209 #ifndef INCLUDE_JVMCI
 210   if (DoEscapeAnalysis || EliminateNestedLocks) {
 211     if (EliminateAllocations) {
 212 #endif // INCLUDE_JVMCI
 213       assert (chunk->at(0)->scope() != NULL,"expect only compiled java frames");
 214       GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects();
 215 
 216       // The flag return_oop() indicates call sites which return oop
 217       // in compiled code. Such sites include java method calls,
 218       // runtime calls (for example, used to allocate new objects/arrays
 219       // on slow code path) and any other calls generated in compiled code.
 220       // It is not guaranteed that we can get such information here only
 221       // by analyzing bytecode in deoptimized frames. This is why this flag
 222       // is set during method compilation (see Compile::Process_OopMap_Node()).
 223       // If the previous frame was popped, we don't have a result.


 472 
 473   if (deopt_sender.is_compiled_frame() || caller_was_method_handle) {
 474     caller_adjustment = last_frame_adjust(0, callee_locals);
 475   } else if (callee_locals > callee_parameters) {
 476     // The caller frame may need extending to accommodate
 477     // non-parameter locals of the first unpacked interpreted frame.
 478     // Compute that adjustment.
 479     caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
 480   }
 481 
 482   // If the sender is deoptimized the we must retrieve the address of the handler
 483   // since the frame will "magically" show the original pc before the deopt
 484   // and we'd undo the deopt.
 485 
 486   frame_pcs[0] = deopt_sender.raw_pc();
 487 
 488 #ifndef SHARK
 489   assert(CodeCache::find_blob_unsafe(frame_pcs[0]) != NULL, "bad pc");
 490 #endif // SHARK
 491 
 492 #ifdef INCLUDE_JVMCI
 493   if (exceptionObject() != NULL) {
 494     thread->set_exception_oop(exceptionObject());
 495     exec_mode = Unpack_exception;
 496   }
 497 #endif
 498 
 499   UnrollBlock* info = new UnrollBlock(array->frame_size() * BytesPerWord,
 500                                       caller_adjustment * BytesPerWord,
 501                                       caller_was_method_handle ? 0 : callee_parameters,
 502                                       number_of_frames,
 503                                       frame_sizes,
 504                                       frame_pcs,
 505                                       return_type,
 506                                       exec_mode);
 507   // On some platforms, we need a way to pass some platform dependent
 508   // information to the unpacking code so the skeletal frames come out
 509   // correct (initial fp value, unextended sp, ...)
 510   info->set_initial_info((intptr_t) array->sender().initial_deoptimization_info());
 511 
 512   if (array->frames() > 1) {
 513     if (VerifyStack && TraceDeoptimization) {
 514       ttyLocker ttyl;
 515       tty->print_cr("Deoptimizing method containing inlining");
 516     }
 517   }
 518 
 519   array->set_unroll_block(info);
 520   return info;
 521 }
 522 
 523 // Called to cleanup deoptimization data structures in normal case
 524 // after unpacking to stack and when stack overflow error occurs
 525 void Deoptimization::cleanup_deopt_info(JavaThread *thread,
 526                                         vframeArray *array) {


1501       } else {
1502         if (TraceDeoptimization) {
1503           tty->print_cr("No speculation");
1504         }
1505       }
1506     } else {
1507       assert(speculation == NULL, "There should not be a speculation for method compiled by non-JVMCI compilers");
1508     }
1509 
1510     if (trap_bci == SynchronizationEntryBCI) {
1511       trap_bci = 0;
1512       thread->set_pending_monitorenter(true);
1513     }
1514 
1515     if (reason == Deoptimization::Reason_transfer_to_interpreter) {
1516       thread->set_pending_transfer_to_interpreter(true);
1517     }
1518 #endif
1519 
1520     Bytecodes::Code trap_bc     = trap_method->java_code_at(trap_bci);












1521     // Record this event in the histogram.
1522     gather_statistics(reason, action, trap_bc);
1523 
1524     // Ensure that we can record deopt. history:
1525     // Need MDO to record RTM code generation state.
1526     bool create_if_missing = ProfileTraps || UseCodeAging RTM_OPT_ONLY( || UseRTMLocking );
1527 
1528     methodHandle profiled_method;
1529 #if INCLUDE_JVMCI
1530     if (nm->is_compiled_by_jvmci()) {
1531       profiled_method = nm->method();
1532     } else {
1533       profiled_method = trap_method;
1534     }
1535 #else
1536     profiled_method = trap_method;
1537 #endif
1538 
1539     MethodData* trap_mdo =
1540       get_method_data(thread, profiled_method, create_if_missing);


1979   ResourceMark rm;
1980   // Ignored outputs:
1981   uint ignore_this_trap_count;
1982   bool ignore_maybe_prior_trap;
1983   bool ignore_maybe_prior_recompile;
1984   assert(!reason_is_speculate(reason), "reason speculate only used by compiler");
1985   // JVMCI uses the total counts to determine if deoptimizations are happening too frequently -> do not adjust total counts
1986   bool update_total_counts = JVMCI_ONLY(false) NOT_JVMCI(true);
1987   query_update_method_data(trap_mdo, trap_bci,
1988                            (DeoptReason)reason,
1989                            update_total_counts,
1990 #if INCLUDE_JVMCI
1991                            false,
1992 #endif
1993                            NULL,
1994                            ignore_this_trap_count,
1995                            ignore_maybe_prior_trap,
1996                            ignore_maybe_prior_recompile);
1997 }
1998 
1999 Deoptimization::UnrollBlock* Deoptimization::uncommon_trap(JavaThread* thread, jint trap_request, jint exec_mode) {
2000   if (TraceDeoptimization) {
2001     tty->print("Uncommon trap ");
2002   }
2003   // Still in Java no safepoints
2004   {
2005     // This enters VM and may safepoint
2006     uncommon_trap_inner(thread, trap_request);
2007   }
2008   return fetch_unroll_info_helper(thread, exec_mode);
2009 }
2010 
2011 // Local derived constants.
2012 // Further breakdown of DataLayout::trap_state, as promised by DataLayout.
2013 const int DS_REASON_MASK   = DataLayout::trap_mask >> 1;
2014 const int DS_RECOMPILE_BIT = DataLayout::trap_mask - DS_REASON_MASK;
2015 
2016 //---------------------------trap_state_reason---------------------------------
2017 Deoptimization::DeoptReason
2018 Deoptimization::trap_state_reason(int trap_state) {
2019   // This assert provides the link between the width of DataLayout::trap_bits
2020   // and the encoding of "recorded" reasons.  It ensures there are enough
2021   // bits to store all needed reasons in the per-BCI MDO profile.
2022   assert(DS_REASON_MASK >= Reason_RECORDED_LIMIT, "enough bits");
2023   int recompile_bit = (trap_state & DS_RECOMPILE_BIT);
2024   trap_state -= recompile_bit;
2025   if (trap_state == DS_REASON_MASK) {
2026     return Reason_many;
2027   } else {
2028     assert((int)Reason_none == 0, "state=0 => Reason_none");


< prev index next >