237 do {
238 // Because of inlining we could have multiple vframes for a single frame
239 // and several of the vframes could have deferred writes. Find them all.
240 if (list->at(i)->id() == array->original().id()) {
241 jvmtiDeferredLocalVariableSet* dlv = list->at(i);
242 list->remove_at(i);
243 // individual jvmtiDeferredLocalVariableSet are CHeapObj's
244 delete dlv;
245 } else {
246 i++;
247 }
248 } while ( i < list->length() );
249 if (list->length() == 0) {
250 thread->set_deferred_locals(NULL);
251 // free the list and elements back to C heap.
252 delete list;
253 }
254
255 }
256
257 // Compute the caller frame based on the sender sp of stub_frame and stored frame sizes info.
258 CodeBlob* cb = stub_frame.cb();
259 // Verify we have the right vframeArray
260 assert(cb->frame_size() >= 0, "Unexpected frame size");
261 intptr_t* unpack_sp = stub_frame.sp() + cb->frame_size();
262
263 // If the deopt call site is a MethodHandle invoke call site we have
264 // to adjust the unpack_sp.
265 nmethod* deoptee_nm = deoptee.cb()->as_nmethod_or_null();
266 if (deoptee_nm != NULL && deoptee_nm->is_method_handle_return(deoptee.pc()))
267 unpack_sp = deoptee.unextended_sp();
268
269 #ifdef ASSERT
270 assert(cb->is_deoptimization_stub() || cb->is_uncommon_trap_stub(), "just checking");
271 Events::log("fetch unroll sp " INTPTR_FORMAT, unpack_sp);
272 #endif
273 // This is a guarantee instead of an assert because if vframe doesn't match
274 // we will unpack the wrong deoptimized frame and wind up in strange places
275 // where it will be very difficult to figure out what went wrong. Better
276 // to die an early death here than some very obscure death later when the
277 // trail is cold.
278 // Note: on ia64 this guarantee can be fooled by frames with no memory stack
279 // in that it will fail to detect a problem when there is one. This needs
280 // more work in tiger timeframe.
281 guarantee(array->unextended_sp() == unpack_sp, "vframe_array_head must contain the vframeArray to unpack");
282
283 int number_of_frames = array->frames();
284
285 // Compute the vframes' sizes. Note that frame_sizes[] entries are ordered from outermost to innermost
286 // virtual activation, which is the reverse of the elements in the vframes array.
287 intptr_t* frame_sizes = NEW_C_HEAP_ARRAY(intptr_t, number_of_frames);
288 // +1 because we always have an interpreter return address for the final slot.
289 address* frame_pcs = NEW_C_HEAP_ARRAY(address, number_of_frames + 1);
290 int callee_parameters = 0;
291 int callee_locals = 0;
292 int popframe_extra_args = 0;
363
364 // QQQ I'd rather see this pushed down into last_frame_adjust
365 // and have it take the sender (aka caller).
366
367 if (deopt_sender.is_compiled_frame()) {
368 caller_adjustment = last_frame_adjust(0, callee_locals);
369 } else if (callee_locals > callee_parameters) {
370 // The caller frame may need extending to accommodate
371 // non-parameter locals of the first unpacked interpreted frame.
372 // Compute that adjustment.
373 caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
374 }
375
376
377 // If the sender is deoptimized the we must retrieve the address of the handler
378 // since the frame will "magically" show the original pc before the deopt
379 // and we'd undo the deopt.
380
381 frame_pcs[0] = deopt_sender.raw_pc();
382
383 assert(CodeCache::find_blob_unsafe(frame_pcs[0]) != NULL, "bad pc");
384
385 UnrollBlock* info = new UnrollBlock(array->frame_size() * BytesPerWord,
386 caller_adjustment * BytesPerWord,
387 number_of_frames,
388 frame_sizes,
389 frame_pcs,
390 return_type);
391 #if defined(IA32) || defined(AMD64)
392 // We need a way to pass fp to the unpacking code so the skeletal frames
393 // come out correct. This is only needed for x86 because of c2 using ebp
394 // as an allocatable register. So this update is useless (and harmless)
395 // on the other platforms. It would be nice to do this in a different
396 // way but even the old style deoptimization had a problem with deriving
397 // this value. NEEDS_CLEANUP
398 // Note: now that c1 is using c2's deopt blob we must do this on all
399 // x86 based platforms
400 intptr_t** fp_addr = (intptr_t**) (((address)info) + info->initial_fp_offset_in_bytes());
401 *fp_addr = array->sender().fp(); // was adapter_caller
402 #endif /* IA32 || AMD64 */
403
1056
1057 void Deoptimization::deoptimize_frame(JavaThread* thread, intptr_t* id) {
1058 // Compute frame and register map based on thread and sp.
1059 RegisterMap reg_map(thread, UseBiasedLocking);
1060 frame fr = thread->last_frame();
1061 while (fr.id() != id) {
1062 fr = fr.sender(®_map);
1063 }
1064 deoptimize(thread, fr, ®_map);
1065 }
1066
1067
1068 // JVMTI PopFrame support
1069 JRT_LEAF(void, Deoptimization::popframe_preserve_args(JavaThread* thread, int bytes_to_save, void* start_address))
1070 {
1071 thread->popframe_preserve_args(in_ByteSize(bytes_to_save), start_address);
1072 }
1073 JRT_END
1074
1075
1076 #ifdef COMPILER2
1077 void Deoptimization::load_class_by_index(constantPoolHandle constant_pool, int index, TRAPS) {
1078 // in case of an unresolved klass entry, load the class.
1079 if (constant_pool->tag_at(index).is_unresolved_klass()) {
1080 klassOop tk = constant_pool->klass_at(index, CHECK);
1081 return;
1082 }
1083
1084 if (!constant_pool->tag_at(index).is_symbol()) return;
1085
1086 Handle class_loader (THREAD, instanceKlass::cast(constant_pool->pool_holder())->class_loader());
1087 symbolHandle symbol (THREAD, constant_pool->symbol_at(index));
1088
1089 // class name?
1090 if (symbol->byte_at(0) != '(') {
1091 Handle protection_domain (THREAD, Klass::cast(constant_pool->pool_holder())->protection_domain());
1092 SystemDictionary::resolve_or_null(symbol, class_loader, protection_domain, CHECK);
1093 return;
1094 }
1095
1096 // then it must be a signature!
1818 if (bc_case == BC_CASE_LIMIT && (int)bc == 0)
1819 bc = Bytecodes::_illegal;
1820 sprintf(name, "%s/%s/%s",
1821 trap_reason_name(reason),
1822 trap_action_name(action),
1823 Bytecodes::is_defined(bc)? Bytecodes::name(bc): "other");
1824 juint r = counter >> LSB_BITS;
1825 tty->print_cr(" %40s: " UINT32_FORMAT " (%.1f%%)", name, r, (r * 100.0) / total);
1826 account -= r;
1827 }
1828 }
1829 }
1830 }
1831 if (account != 0) {
1832 PRINT_STAT_LINE("unaccounted", account);
1833 }
1834 #undef PRINT_STAT_LINE
1835 if (xtty != NULL) xtty->tail("statistics");
1836 }
1837 }
1838 #else // COMPILER2
1839
1840
1841 // Stubs for C1 only system.
1842 bool Deoptimization::trap_state_is_recompiled(int trap_state) {
1843 return false;
1844 }
1845
1846 const char* Deoptimization::trap_reason_name(int reason) {
1847 return "unknown";
1848 }
1849
1850 void Deoptimization::print_statistics() {
1851 // no output
1852 }
1853
1854 void
1855 Deoptimization::update_method_data_from_interpreter(methodDataHandle trap_mdo, int trap_bci, int reason) {
1856 // no udpate
1857 }
1858
1859 int Deoptimization::trap_state_has_reason(int trap_state, int reason) {
1860 return 0;
1861 }
1862
1863 void Deoptimization::gather_statistics(DeoptReason reason, DeoptAction action,
1864 Bytecodes::Code bc) {
1865 // no update
1866 }
1867
1868 const char* Deoptimization::format_trap_state(char* buf, size_t buflen,
1869 int trap_state) {
1870 jio_snprintf(buf, buflen, "#%d", trap_state);
1871 return buf;
1872 }
1873
1874 #endif // COMPILER2
|
237 do {
238 // Because of inlining we could have multiple vframes for a single frame
239 // and several of the vframes could have deferred writes. Find them all.
240 if (list->at(i)->id() == array->original().id()) {
241 jvmtiDeferredLocalVariableSet* dlv = list->at(i);
242 list->remove_at(i);
243 // individual jvmtiDeferredLocalVariableSet are CHeapObj's
244 delete dlv;
245 } else {
246 i++;
247 }
248 } while ( i < list->length() );
249 if (list->length() == 0) {
250 thread->set_deferred_locals(NULL);
251 // free the list and elements back to C heap.
252 delete list;
253 }
254
255 }
256
257 #ifndef SHARK
258 // Compute the caller frame based on the sender sp of stub_frame and stored frame sizes info.
259 CodeBlob* cb = stub_frame.cb();
260 // Verify we have the right vframeArray
261 assert(cb->frame_size() >= 0, "Unexpected frame size");
262 intptr_t* unpack_sp = stub_frame.sp() + cb->frame_size();
263
264 // If the deopt call site is a MethodHandle invoke call site we have
265 // to adjust the unpack_sp.
266 nmethod* deoptee_nm = deoptee.cb()->as_nmethod_or_null();
267 if (deoptee_nm != NULL && deoptee_nm->is_method_handle_return(deoptee.pc()))
268 unpack_sp = deoptee.unextended_sp();
269
270 #ifdef ASSERT
271 assert(cb->is_deoptimization_stub() || cb->is_uncommon_trap_stub(), "just checking");
272 Events::log("fetch unroll sp " INTPTR_FORMAT, unpack_sp);
273 #endif
274 #else
275 intptr_t* unpack_sp = stub_frame.sender(&dummy_map).unextended_sp();
276 #endif // !SHARK
277
278 // This is a guarantee instead of an assert because if vframe doesn't match
279 // we will unpack the wrong deoptimized frame and wind up in strange places
280 // where it will be very difficult to figure out what went wrong. Better
281 // to die an early death here than some very obscure death later when the
282 // trail is cold.
283 // Note: on ia64 this guarantee can be fooled by frames with no memory stack
284 // in that it will fail to detect a problem when there is one. This needs
285 // more work in tiger timeframe.
286 guarantee(array->unextended_sp() == unpack_sp, "vframe_array_head must contain the vframeArray to unpack");
287
288 int number_of_frames = array->frames();
289
290 // Compute the vframes' sizes. Note that frame_sizes[] entries are ordered from outermost to innermost
291 // virtual activation, which is the reverse of the elements in the vframes array.
292 intptr_t* frame_sizes = NEW_C_HEAP_ARRAY(intptr_t, number_of_frames);
293 // +1 because we always have an interpreter return address for the final slot.
294 address* frame_pcs = NEW_C_HEAP_ARRAY(address, number_of_frames + 1);
295 int callee_parameters = 0;
296 int callee_locals = 0;
297 int popframe_extra_args = 0;
368
369 // QQQ I'd rather see this pushed down into last_frame_adjust
370 // and have it take the sender (aka caller).
371
372 if (deopt_sender.is_compiled_frame()) {
373 caller_adjustment = last_frame_adjust(0, callee_locals);
374 } else if (callee_locals > callee_parameters) {
375 // The caller frame may need extending to accommodate
376 // non-parameter locals of the first unpacked interpreted frame.
377 // Compute that adjustment.
378 caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
379 }
380
381
382 // If the sender is deoptimized the we must retrieve the address of the handler
383 // since the frame will "magically" show the original pc before the deopt
384 // and we'd undo the deopt.
385
386 frame_pcs[0] = deopt_sender.raw_pc();
387
388 #ifndef SHARK
389 assert(CodeCache::find_blob_unsafe(frame_pcs[0]) != NULL, "bad pc");
390 #endif // SHARK
391
392 UnrollBlock* info = new UnrollBlock(array->frame_size() * BytesPerWord,
393 caller_adjustment * BytesPerWord,
394 number_of_frames,
395 frame_sizes,
396 frame_pcs,
397 return_type);
398 #if defined(IA32) || defined(AMD64)
399 // We need a way to pass fp to the unpacking code so the skeletal frames
400 // come out correct. This is only needed for x86 because of c2 using ebp
401 // as an allocatable register. So this update is useless (and harmless)
402 // on the other platforms. It would be nice to do this in a different
403 // way but even the old style deoptimization had a problem with deriving
404 // this value. NEEDS_CLEANUP
405 // Note: now that c1 is using c2's deopt blob we must do this on all
406 // x86 based platforms
407 intptr_t** fp_addr = (intptr_t**) (((address)info) + info->initial_fp_offset_in_bytes());
408 *fp_addr = array->sender().fp(); // was adapter_caller
409 #endif /* IA32 || AMD64 */
410
1063
1064 void Deoptimization::deoptimize_frame(JavaThread* thread, intptr_t* id) {
1065 // Compute frame and register map based on thread and sp.
1066 RegisterMap reg_map(thread, UseBiasedLocking);
1067 frame fr = thread->last_frame();
1068 while (fr.id() != id) {
1069 fr = fr.sender(®_map);
1070 }
1071 deoptimize(thread, fr, ®_map);
1072 }
1073
1074
1075 // JVMTI PopFrame support
1076 JRT_LEAF(void, Deoptimization::popframe_preserve_args(JavaThread* thread, int bytes_to_save, void* start_address))
1077 {
1078 thread->popframe_preserve_args(in_ByteSize(bytes_to_save), start_address);
1079 }
1080 JRT_END
1081
1082
1083 #if defined(COMPILER2) || defined(SHARK)
1084 void Deoptimization::load_class_by_index(constantPoolHandle constant_pool, int index, TRAPS) {
1085 // in case of an unresolved klass entry, load the class.
1086 if (constant_pool->tag_at(index).is_unresolved_klass()) {
1087 klassOop tk = constant_pool->klass_at(index, CHECK);
1088 return;
1089 }
1090
1091 if (!constant_pool->tag_at(index).is_symbol()) return;
1092
1093 Handle class_loader (THREAD, instanceKlass::cast(constant_pool->pool_holder())->class_loader());
1094 symbolHandle symbol (THREAD, constant_pool->symbol_at(index));
1095
1096 // class name?
1097 if (symbol->byte_at(0) != '(') {
1098 Handle protection_domain (THREAD, Klass::cast(constant_pool->pool_holder())->protection_domain());
1099 SystemDictionary::resolve_or_null(symbol, class_loader, protection_domain, CHECK);
1100 return;
1101 }
1102
1103 // then it must be a signature!
1825 if (bc_case == BC_CASE_LIMIT && (int)bc == 0)
1826 bc = Bytecodes::_illegal;
1827 sprintf(name, "%s/%s/%s",
1828 trap_reason_name(reason),
1829 trap_action_name(action),
1830 Bytecodes::is_defined(bc)? Bytecodes::name(bc): "other");
1831 juint r = counter >> LSB_BITS;
1832 tty->print_cr(" %40s: " UINT32_FORMAT " (%.1f%%)", name, r, (r * 100.0) / total);
1833 account -= r;
1834 }
1835 }
1836 }
1837 }
1838 if (account != 0) {
1839 PRINT_STAT_LINE("unaccounted", account);
1840 }
1841 #undef PRINT_STAT_LINE
1842 if (xtty != NULL) xtty->tail("statistics");
1843 }
1844 }
1845 #else // COMPILER2 || SHARK
1846
1847
1848 // Stubs for C1 only system.
1849 bool Deoptimization::trap_state_is_recompiled(int trap_state) {
1850 return false;
1851 }
1852
1853 const char* Deoptimization::trap_reason_name(int reason) {
1854 return "unknown";
1855 }
1856
1857 void Deoptimization::print_statistics() {
1858 // no output
1859 }
1860
1861 void
1862 Deoptimization::update_method_data_from_interpreter(methodDataHandle trap_mdo, int trap_bci, int reason) {
1863 // no udpate
1864 }
1865
1866 int Deoptimization::trap_state_has_reason(int trap_state, int reason) {
1867 return 0;
1868 }
1869
1870 void Deoptimization::gather_statistics(DeoptReason reason, DeoptAction action,
1871 Bytecodes::Code bc) {
1872 // no update
1873 }
1874
1875 const char* Deoptimization::format_trap_state(char* buf, size_t buflen,
1876 int trap_state) {
1877 jio_snprintf(buf, buflen, "#%d", trap_state);
1878 return buf;
1879 }
1880
1881 #endif // COMPILER2 || SHARK
|