1 /* 2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "jvm.h" 27 #include "classfile/systemDictionary.hpp" 28 #include "code/codeCache.hpp" 29 #include "code/debugInfoRec.hpp" 30 #include "code/nmethod.hpp" 31 #include "code/pcDesc.hpp" 32 #include "code/scopeDesc.hpp" 33 #include "interpreter/bytecode.hpp" 34 #include "interpreter/interpreter.hpp" 35 #include "interpreter/oopMapCache.hpp" 36 #include "memory/allocation.inline.hpp" 37 #include "memory/oopFactory.hpp" 38 #include "memory/resourceArea.hpp" 39 #include "oops/method.hpp" 40 #include "oops/objArrayOop.inline.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "oops/fieldStreams.hpp" 43 #include "oops/typeArrayOop.inline.hpp" 44 #include "oops/verifyOopClosure.hpp" 45 #include "prims/jvmtiThreadState.hpp" 46 #include "runtime/biasedLocking.hpp" 47 #include "runtime/compilationPolicy.hpp" 48 #include "runtime/deoptimization.hpp" 49 #include "runtime/interfaceSupport.inline.hpp" 50 #include "runtime/sharedRuntime.hpp" 51 #include "runtime/signature.hpp" 52 #include "runtime/stubRoutines.hpp" 53 #include "runtime/thread.hpp" 54 #include "runtime/threadSMR.hpp" 55 #include "runtime/vframe.hpp" 56 #include "runtime/vframeArray.hpp" 57 #include "runtime/vframe_hp.hpp" 58 #include "utilities/events.hpp" 59 #include "utilities/preserveException.hpp" 60 #include "utilities/xmlstream.hpp" 61 62 #if INCLUDE_JVMCI 63 #include "jvmci/jvmciRuntime.hpp" 64 #include "jvmci/jvmciJavaClasses.hpp" 65 #endif 66 67 68 bool DeoptimizationMarker::_is_active = false; 69 70 Deoptimization::UnrollBlock::UnrollBlock(int size_of_deoptimized_frame, 71 int caller_adjustment, 72 int caller_actual_parameters, 73 int number_of_frames, 74 intptr_t* frame_sizes, 75 address* frame_pcs, 76 BasicType return_type, 77 int exec_mode) { 78 _size_of_deoptimized_frame = size_of_deoptimized_frame; 79 _caller_adjustment = caller_adjustment; 80 _caller_actual_parameters = caller_actual_parameters; 81 _number_of_frames = number_of_frames; 82 _frame_sizes = frame_sizes; 83 _frame_pcs = frame_pcs; 84 _register_block = NEW_C_HEAP_ARRAY(intptr_t, RegisterMap::reg_count * 2, mtCompiler); 85 _return_type = return_type; 86 _initial_info = 0; 87 // PD (x86 only) 88 _counter_temp = 0; 89 _unpack_kind = exec_mode; 90 _sender_sp_temp = 0; 91 92 _total_frame_sizes = size_of_frames(); 93 assert(exec_mode >= 0 && exec_mode < Unpack_LIMIT, "Unexpected exec_mode"); 94 } 95 96 97 Deoptimization::UnrollBlock::~UnrollBlock() { 98 FREE_C_HEAP_ARRAY(intptr_t, _frame_sizes); 99 FREE_C_HEAP_ARRAY(intptr_t, _frame_pcs); 100 FREE_C_HEAP_ARRAY(intptr_t, _register_block); 101 } 102 103 104 intptr_t* Deoptimization::UnrollBlock::value_addr_at(int register_number) const { 105 assert(register_number < RegisterMap::reg_count, "checking register number"); 106 return &_register_block[register_number * 2]; 107 } 108 109 110 111 int Deoptimization::UnrollBlock::size_of_frames() const { 112 // Acount first for the adjustment of the initial frame 113 int result = _caller_adjustment; 114 for (int index = 0; index < number_of_frames(); index++) { 115 result += frame_sizes()[index]; 116 } 117 return result; 118 } 119 120 121 void Deoptimization::UnrollBlock::print() { 122 ttyLocker ttyl; 123 tty->print_cr("UnrollBlock"); 124 tty->print_cr(" size_of_deoptimized_frame = %d", _size_of_deoptimized_frame); 125 tty->print( " frame_sizes: "); 126 for (int index = 0; index < number_of_frames(); index++) { 127 tty->print(INTX_FORMAT " ", frame_sizes()[index]); 128 } 129 tty->cr(); 130 } 131 132 133 // In order to make fetch_unroll_info work properly with escape 134 // analysis, The method was changed from JRT_LEAF to JRT_BLOCK_ENTRY and 135 // ResetNoHandleMark and HandleMark were removed from it. The actual reallocation 136 // of previously eliminated objects occurs in realloc_objects, which is 137 // called from the method fetch_unroll_info_helper below. 138 JRT_BLOCK_ENTRY(Deoptimization::UnrollBlock*, Deoptimization::fetch_unroll_info(JavaThread* thread, int exec_mode)) 139 // It is actually ok to allocate handles in a leaf method. It causes no safepoints, 140 // but makes the entry a little slower. There is however a little dance we have to 141 // do in debug mode to get around the NoHandleMark code in the JRT_LEAF macro 142 143 // fetch_unroll_info() is called at the beginning of the deoptimization 144 // handler. Note this fact before we start generating temporary frames 145 // that can confuse an asynchronous stack walker. This counter is 146 // decremented at the end of unpack_frames(). 147 if (TraceDeoptimization) { 148 tty->print_cr("Deoptimizing thread " INTPTR_FORMAT, p2i(thread)); 149 } 150 thread->inc_in_deopt_handler(); 151 152 return fetch_unroll_info_helper(thread, exec_mode); 153 JRT_END 154 155 156 // This is factored, since it is both called from a JRT_LEAF (deoptimization) and a JRT_ENTRY (uncommon_trap) 157 Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread* thread, int exec_mode) { 158 159 // Note: there is a safepoint safety issue here. No matter whether we enter 160 // via vanilla deopt or uncommon trap we MUST NOT stop at a safepoint once 161 // the vframeArray is created. 162 // 163 164 // Allocate our special deoptimization ResourceMark 165 DeoptResourceMark* dmark = new DeoptResourceMark(thread); 166 assert(thread->deopt_mark() == NULL, "Pending deopt!"); 167 thread->set_deopt_mark(dmark); 168 169 frame stub_frame = thread->last_frame(); // Makes stack walkable as side effect 170 RegisterMap map(thread, true); 171 RegisterMap dummy_map(thread, false); 172 // Now get the deoptee with a valid map 173 frame deoptee = stub_frame.sender(&map); 174 // Set the deoptee nmethod 175 assert(thread->deopt_compiled_method() == NULL, "Pending deopt!"); 176 CompiledMethod* cm = deoptee.cb()->as_compiled_method_or_null(); 177 thread->set_deopt_compiled_method(cm); 178 179 if (VerifyStack) { 180 thread->validate_frame_layout(); 181 } 182 183 // Create a growable array of VFrames where each VFrame represents an inlined 184 // Java frame. This storage is allocated with the usual system arena. 185 assert(deoptee.is_compiled_frame(), "Wrong frame type"); 186 GrowableArray<compiledVFrame*>* chunk = new GrowableArray<compiledVFrame*>(10); 187 vframe* vf = vframe::new_vframe(&deoptee, &map, thread); 188 while (!vf->is_top()) { 189 assert(vf->is_compiled_frame(), "Wrong frame type"); 190 chunk->push(compiledVFrame::cast(vf)); 191 vf = vf->sender(); 192 } 193 assert(vf->is_compiled_frame(), "Wrong frame type"); 194 chunk->push(compiledVFrame::cast(vf)); 195 196 bool realloc_failures = false; 197 198 #if COMPILER2_OR_JVMCI 199 // Reallocate the non-escaping objects and restore their fields. Then 200 // relock objects if synchronization on them was eliminated. 201 #ifndef INCLUDE_JVMCI 202 if (DoEscapeAnalysis || EliminateNestedLocks) { 203 if (EliminateAllocations) { 204 #endif // INCLUDE_JVMCI 205 assert (chunk->at(0)->scope() != NULL,"expect only compiled java frames"); 206 GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects(); 207 208 // The flag return_oop() indicates call sites which return oop 209 // in compiled code. Such sites include java method calls, 210 // runtime calls (for example, used to allocate new objects/arrays 211 // on slow code path) and any other calls generated in compiled code. 212 // It is not guaranteed that we can get such information here only 213 // by analyzing bytecode in deoptimized frames. This is why this flag 214 // is set during method compilation (see Compile::Process_OopMap_Node()). 215 // If the previous frame was popped or if we are dispatching an exception, 216 // we don't have an oop result. 217 bool save_oop_result = chunk->at(0)->scope()->return_oop() && !thread->popframe_forcing_deopt_reexecution() && (exec_mode == Unpack_deopt); 218 Handle return_value; 219 if (save_oop_result) { 220 // Reallocation may trigger GC. If deoptimization happened on return from 221 // call which returns oop we need to save it since it is not in oopmap. 222 oop result = deoptee.saved_oop_result(&map); 223 assert(oopDesc::is_oop_or_null(result), "must be oop"); 224 return_value = Handle(thread, result); 225 assert(Universe::heap()->is_in_or_null(result), "must be heap pointer"); 226 if (TraceDeoptimization) { 227 ttyLocker ttyl; 228 tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, p2i(result), p2i(thread)); 229 } 230 } 231 if (objects != NULL) { 232 JRT_BLOCK 233 realloc_failures = realloc_objects(thread, &deoptee, objects, THREAD); 234 JRT_END 235 bool skip_internal = (cm != NULL) && !cm->is_compiled_by_jvmci(); 236 reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal); 237 #ifndef PRODUCT 238 if (TraceDeoptimization) { 239 ttyLocker ttyl; 240 tty->print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, p2i(thread)); 241 print_objects(objects, realloc_failures); 242 } 243 #endif 244 } 245 if (save_oop_result) { 246 // Restore result. 247 deoptee.set_saved_oop_result(&map, return_value()); 248 } 249 #ifndef INCLUDE_JVMCI 250 } 251 if (EliminateLocks) { 252 #endif // INCLUDE_JVMCI 253 #ifndef PRODUCT 254 bool first = true; 255 #endif 256 for (int i = 0; i < chunk->length(); i++) { 257 compiledVFrame* cvf = chunk->at(i); 258 assert (cvf->scope() != NULL,"expect only compiled java frames"); 259 GrowableArray<MonitorInfo*>* monitors = cvf->monitors(); 260 if (monitors->is_nonempty()) { 261 relock_objects(monitors, thread, realloc_failures); 262 #ifndef PRODUCT 263 if (PrintDeoptimizationDetails) { 264 ttyLocker ttyl; 265 for (int j = 0; j < monitors->length(); j++) { 266 MonitorInfo* mi = monitors->at(j); 267 if (mi->eliminated()) { 268 if (first) { 269 first = false; 270 tty->print_cr("RELOCK OBJECTS in thread " INTPTR_FORMAT, p2i(thread)); 271 } 272 if (mi->owner_is_scalar_replaced()) { 273 Klass* k = java_lang_Class::as_Klass(mi->owner_klass()); 274 tty->print_cr(" failed reallocation for klass %s", k->external_name()); 275 } else { 276 tty->print_cr(" object <" INTPTR_FORMAT "> locked", p2i(mi->owner())); 277 } 278 } 279 } 280 } 281 #endif // !PRODUCT 282 } 283 } 284 #ifndef INCLUDE_JVMCI 285 } 286 } 287 #endif // INCLUDE_JVMCI 288 #endif // COMPILER2_OR_JVMCI 289 290 ScopeDesc* trap_scope = chunk->at(0)->scope(); 291 Handle exceptionObject; 292 if (trap_scope->rethrow_exception()) { 293 if (PrintDeoptimizationDetails) { 294 tty->print_cr("Exception to be rethrown in the interpreter for method %s::%s at bci %d", trap_scope->method()->method_holder()->name()->as_C_string(), trap_scope->method()->name()->as_C_string(), trap_scope->bci()); 295 } 296 GrowableArray<ScopeValue*>* expressions = trap_scope->expressions(); 297 guarantee(expressions != NULL && expressions->length() > 0, "must have exception to throw"); 298 ScopeValue* topOfStack = expressions->top(); 299 exceptionObject = StackValue::create_stack_value(&deoptee, &map, topOfStack)->get_obj(); 300 guarantee(exceptionObject() != NULL, "exception oop can not be null"); 301 } 302 303 // Ensure that no safepoint is taken after pointers have been stored 304 // in fields of rematerialized objects. If a safepoint occurs from here on 305 // out the java state residing in the vframeArray will be missed. 306 NoSafepointVerifier no_safepoint; 307 308 vframeArray* array = create_vframeArray(thread, deoptee, &map, chunk, realloc_failures); 309 #if COMPILER2_OR_JVMCI 310 if (realloc_failures) { 311 pop_frames_failed_reallocs(thread, array); 312 } 313 #endif 314 315 assert(thread->vframe_array_head() == NULL, "Pending deopt!"); 316 thread->set_vframe_array_head(array); 317 318 // Now that the vframeArray has been created if we have any deferred local writes 319 // added by jvmti then we can free up that structure as the data is now in the 320 // vframeArray 321 322 if (thread->deferred_locals() != NULL) { 323 GrowableArray<jvmtiDeferredLocalVariableSet*>* list = thread->deferred_locals(); 324 int i = 0; 325 do { 326 // Because of inlining we could have multiple vframes for a single frame 327 // and several of the vframes could have deferred writes. Find them all. 328 if (list->at(i)->id() == array->original().id()) { 329 jvmtiDeferredLocalVariableSet* dlv = list->at(i); 330 list->remove_at(i); 331 // individual jvmtiDeferredLocalVariableSet are CHeapObj's 332 delete dlv; 333 } else { 334 i++; 335 } 336 } while ( i < list->length() ); 337 if (list->length() == 0) { 338 thread->set_deferred_locals(NULL); 339 // free the list and elements back to C heap. 340 delete list; 341 } 342 343 } 344 345 // Compute the caller frame based on the sender sp of stub_frame and stored frame sizes info. 346 CodeBlob* cb = stub_frame.cb(); 347 // Verify we have the right vframeArray 348 assert(cb->frame_size() >= 0, "Unexpected frame size"); 349 intptr_t* unpack_sp = stub_frame.sp() + cb->frame_size(); 350 351 // If the deopt call site is a MethodHandle invoke call site we have 352 // to adjust the unpack_sp. 353 nmethod* deoptee_nm = deoptee.cb()->as_nmethod_or_null(); 354 if (deoptee_nm != NULL && deoptee_nm->is_method_handle_return(deoptee.pc())) 355 unpack_sp = deoptee.unextended_sp(); 356 357 #ifdef ASSERT 358 assert(cb->is_deoptimization_stub() || 359 cb->is_uncommon_trap_stub() || 360 strcmp("Stub<DeoptimizationStub.deoptimizationHandler>", cb->name()) == 0 || 361 strcmp("Stub<UncommonTrapStub.uncommonTrapHandler>", cb->name()) == 0, 362 "unexpected code blob: %s", cb->name()); 363 #endif 364 365 // This is a guarantee instead of an assert because if vframe doesn't match 366 // we will unpack the wrong deoptimized frame and wind up in strange places 367 // where it will be very difficult to figure out what went wrong. Better 368 // to die an early death here than some very obscure death later when the 369 // trail is cold. 370 // Note: on ia64 this guarantee can be fooled by frames with no memory stack 371 // in that it will fail to detect a problem when there is one. This needs 372 // more work in tiger timeframe. 373 guarantee(array->unextended_sp() == unpack_sp, "vframe_array_head must contain the vframeArray to unpack"); 374 375 int number_of_frames = array->frames(); 376 377 // Compute the vframes' sizes. Note that frame_sizes[] entries are ordered from outermost to innermost 378 // virtual activation, which is the reverse of the elements in the vframes array. 379 intptr_t* frame_sizes = NEW_C_HEAP_ARRAY(intptr_t, number_of_frames, mtCompiler); 380 // +1 because we always have an interpreter return address for the final slot. 381 address* frame_pcs = NEW_C_HEAP_ARRAY(address, number_of_frames + 1, mtCompiler); 382 int popframe_extra_args = 0; 383 // Create an interpreter return address for the stub to use as its return 384 // address so the skeletal frames are perfectly walkable 385 frame_pcs[number_of_frames] = Interpreter::deopt_entry(vtos, 0); 386 387 // PopFrame requires that the preserved incoming arguments from the recently-popped topmost 388 // activation be put back on the expression stack of the caller for reexecution 389 if (JvmtiExport::can_pop_frame() && thread->popframe_forcing_deopt_reexecution()) { 390 popframe_extra_args = in_words(thread->popframe_preserved_args_size_in_words()); 391 } 392 393 // Find the current pc for sender of the deoptee. Since the sender may have been deoptimized 394 // itself since the deoptee vframeArray was created we must get a fresh value of the pc rather 395 // than simply use array->sender.pc(). This requires us to walk the current set of frames 396 // 397 frame deopt_sender = stub_frame.sender(&dummy_map); // First is the deoptee frame 398 deopt_sender = deopt_sender.sender(&dummy_map); // Now deoptee caller 399 400 // It's possible that the number of parameters at the call site is 401 // different than number of arguments in the callee when method 402 // handles are used. If the caller is interpreted get the real 403 // value so that the proper amount of space can be added to it's 404 // frame. 405 bool caller_was_method_handle = false; 406 if (deopt_sender.is_interpreted_frame()) { 407 methodHandle method = deopt_sender.interpreter_frame_method(); 408 Bytecode_invoke cur = Bytecode_invoke_check(method, deopt_sender.interpreter_frame_bci()); 409 if (cur.is_invokedynamic() || cur.is_invokehandle()) { 410 // Method handle invokes may involve fairly arbitrary chains of 411 // calls so it's impossible to know how much actual space the 412 // caller has for locals. 413 caller_was_method_handle = true; 414 } 415 } 416 417 // 418 // frame_sizes/frame_pcs[0] oldest frame (int or c2i) 419 // frame_sizes/frame_pcs[1] next oldest frame (int) 420 // frame_sizes/frame_pcs[n] youngest frame (int) 421 // 422 // Now a pc in frame_pcs is actually the return address to the frame's caller (a frame 423 // owns the space for the return address to it's caller). Confusing ain't it. 424 // 425 // The vframe array can address vframes with indices running from 426 // 0.._frames-1. Index 0 is the youngest frame and _frame - 1 is the oldest (root) frame. 427 // When we create the skeletal frames we need the oldest frame to be in the zero slot 428 // in the frame_sizes/frame_pcs so the assembly code can do a trivial walk. 429 // so things look a little strange in this loop. 430 // 431 int callee_parameters = 0; 432 int callee_locals = 0; 433 for (int index = 0; index < array->frames(); index++ ) { 434 // frame[number_of_frames - 1 ] = on_stack_size(youngest) 435 // frame[number_of_frames - 2 ] = on_stack_size(sender(youngest)) 436 // frame[number_of_frames - 3 ] = on_stack_size(sender(sender(youngest))) 437 frame_sizes[number_of_frames - 1 - index] = BytesPerWord * array->element(index)->on_stack_size(callee_parameters, 438 callee_locals, 439 index == 0, 440 popframe_extra_args); 441 // This pc doesn't have to be perfect just good enough to identify the frame 442 // as interpreted so the skeleton frame will be walkable 443 // The correct pc will be set when the skeleton frame is completely filled out 444 // The final pc we store in the loop is wrong and will be overwritten below 445 frame_pcs[number_of_frames - 1 - index ] = Interpreter::deopt_entry(vtos, 0) - frame::pc_return_offset; 446 447 callee_parameters = array->element(index)->method()->size_of_parameters(); 448 callee_locals = array->element(index)->method()->max_locals(); 449 popframe_extra_args = 0; 450 } 451 452 // Compute whether the root vframe returns a float or double value. 453 BasicType return_type; 454 { 455 methodHandle method(thread, array->element(0)->method()); 456 Bytecode_invoke invoke = Bytecode_invoke_check(method, array->element(0)->bci()); 457 return_type = invoke.is_valid() ? invoke.result_type() : T_ILLEGAL; 458 } 459 460 // Compute information for handling adapters and adjusting the frame size of the caller. 461 int caller_adjustment = 0; 462 463 // Compute the amount the oldest interpreter frame will have to adjust 464 // its caller's stack by. If the caller is a compiled frame then 465 // we pretend that the callee has no parameters so that the 466 // extension counts for the full amount of locals and not just 467 // locals-parms. This is because without a c2i adapter the parm 468 // area as created by the compiled frame will not be usable by 469 // the interpreter. (Depending on the calling convention there 470 // may not even be enough space). 471 472 // QQQ I'd rather see this pushed down into last_frame_adjust 473 // and have it take the sender (aka caller). 474 475 if (deopt_sender.is_compiled_frame() || caller_was_method_handle) { 476 caller_adjustment = last_frame_adjust(0, callee_locals); 477 } else if (callee_locals > callee_parameters) { 478 // The caller frame may need extending to accommodate 479 // non-parameter locals of the first unpacked interpreted frame. 480 // Compute that adjustment. 481 caller_adjustment = last_frame_adjust(callee_parameters, callee_locals); 482 } 483 484 // If the sender is deoptimized the we must retrieve the address of the handler 485 // since the frame will "magically" show the original pc before the deopt 486 // and we'd undo the deopt. 487 488 frame_pcs[0] = deopt_sender.raw_pc(); 489 490 assert(CodeCache::find_blob_unsafe(frame_pcs[0]) != NULL, "bad pc"); 491 492 #ifdef INCLUDE_JVMCI 493 if (exceptionObject() != NULL) { 494 thread->set_exception_oop(exceptionObject()); 495 exec_mode = Unpack_exception; 496 } 497 #endif 498 499 if (thread->frames_to_pop_failed_realloc() > 0 && exec_mode != Unpack_uncommon_trap) { 500 assert(thread->has_pending_exception(), "should have thrown OOME"); 501 thread->set_exception_oop(thread->pending_exception()); 502 thread->clear_pending_exception(); 503 exec_mode = Unpack_exception; 504 } 505 506 #if INCLUDE_JVMCI 507 if (thread->frames_to_pop_failed_realloc() > 0) { 508 thread->set_pending_monitorenter(false); 509 } 510 #endif 511 512 UnrollBlock* info = new UnrollBlock(array->frame_size() * BytesPerWord, 513 caller_adjustment * BytesPerWord, 514 caller_was_method_handle ? 0 : callee_parameters, 515 number_of_frames, 516 frame_sizes, 517 frame_pcs, 518 return_type, 519 exec_mode); 520 // On some platforms, we need a way to pass some platform dependent 521 // information to the unpacking code so the skeletal frames come out 522 // correct (initial fp value, unextended sp, ...) 523 info->set_initial_info((intptr_t) array->sender().initial_deoptimization_info()); 524 525 if (array->frames() > 1) { 526 if (VerifyStack && TraceDeoptimization) { 527 ttyLocker ttyl; 528 tty->print_cr("Deoptimizing method containing inlining"); 529 } 530 } 531 532 array->set_unroll_block(info); 533 return info; 534 } 535 536 // Called to cleanup deoptimization data structures in normal case 537 // after unpacking to stack and when stack overflow error occurs 538 void Deoptimization::cleanup_deopt_info(JavaThread *thread, 539 vframeArray *array) { 540 541 // Get array if coming from exception 542 if (array == NULL) { 543 array = thread->vframe_array_head(); 544 } 545 thread->set_vframe_array_head(NULL); 546 547 // Free the previous UnrollBlock 548 vframeArray* old_array = thread->vframe_array_last(); 549 thread->set_vframe_array_last(array); 550 551 if (old_array != NULL) { 552 UnrollBlock* old_info = old_array->unroll_block(); 553 old_array->set_unroll_block(NULL); 554 delete old_info; 555 delete old_array; 556 } 557 558 // Deallocate any resource creating in this routine and any ResourceObjs allocated 559 // inside the vframeArray (StackValueCollections) 560 561 delete thread->deopt_mark(); 562 thread->set_deopt_mark(NULL); 563 thread->set_deopt_compiled_method(NULL); 564 565 566 if (JvmtiExport::can_pop_frame()) { 567 #ifndef CC_INTERP 568 // Regardless of whether we entered this routine with the pending 569 // popframe condition bit set, we should always clear it now 570 thread->clear_popframe_condition(); 571 #else 572 // C++ interpreter will clear has_pending_popframe when it enters 573 // with method_resume. For deopt_resume2 we clear it now. 574 if (thread->popframe_forcing_deopt_reexecution()) 575 thread->clear_popframe_condition(); 576 #endif /* CC_INTERP */ 577 } 578 579 // unpack_frames() is called at the end of the deoptimization handler 580 // and (in C2) at the end of the uncommon trap handler. Note this fact 581 // so that an asynchronous stack walker can work again. This counter is 582 // incremented at the beginning of fetch_unroll_info() and (in C2) at 583 // the beginning of uncommon_trap(). 584 thread->dec_in_deopt_handler(); 585 } 586 587 // Moved from cpu directories because none of the cpus has callee save values. 588 // If a cpu implements callee save values, move this to deoptimization_<cpu>.cpp. 589 void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) { 590 591 // This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in 592 // the days we had adapter frames. When we deoptimize a situation where a 593 // compiled caller calls a compiled caller will have registers it expects 594 // to survive the call to the callee. If we deoptimize the callee the only 595 // way we can restore these registers is to have the oldest interpreter 596 // frame that we create restore these values. That is what this routine 597 // will accomplish. 598 599 // At the moment we have modified c2 to not have any callee save registers 600 // so this problem does not exist and this routine is just a place holder. 601 602 assert(f->is_interpreted_frame(), "must be interpreted"); 603 } 604 605 // Return BasicType of value being returned 606 JRT_LEAF(BasicType, Deoptimization::unpack_frames(JavaThread* thread, int exec_mode)) 607 608 // We are already active in the special DeoptResourceMark any ResourceObj's we 609 // allocate will be freed at the end of the routine. 610 611 // It is actually ok to allocate handles in a leaf method. It causes no safepoints, 612 // but makes the entry a little slower. There is however a little dance we have to 613 // do in debug mode to get around the NoHandleMark code in the JRT_LEAF macro 614 ResetNoHandleMark rnhm; // No-op in release/product versions 615 HandleMark hm; 616 617 frame stub_frame = thread->last_frame(); 618 619 // Since the frame to unpack is the top frame of this thread, the vframe_array_head 620 // must point to the vframeArray for the unpack frame. 621 vframeArray* array = thread->vframe_array_head(); 622 623 #ifndef PRODUCT 624 if (TraceDeoptimization) { 625 ttyLocker ttyl; 626 tty->print_cr("DEOPT UNPACKING thread " INTPTR_FORMAT " vframeArray " INTPTR_FORMAT " mode %d", 627 p2i(thread), p2i(array), exec_mode); 628 } 629 #endif 630 Events::log(thread, "DEOPT UNPACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT " mode %d", 631 p2i(stub_frame.pc()), p2i(stub_frame.sp()), exec_mode); 632 633 UnrollBlock* info = array->unroll_block(); 634 635 // Unpack the interpreter frames and any adapter frame (c2 only) we might create. 636 array->unpack_to_stack(stub_frame, exec_mode, info->caller_actual_parameters()); 637 638 BasicType bt = info->return_type(); 639 640 // If we have an exception pending, claim that the return type is an oop 641 // so the deopt_blob does not overwrite the exception_oop. 642 643 if (exec_mode == Unpack_exception) 644 bt = T_OBJECT; 645 646 // Cleanup thread deopt data 647 cleanup_deopt_info(thread, array); 648 649 #ifndef PRODUCT 650 if (VerifyStack) { 651 ResourceMark res_mark; 652 // Clear pending exception to not break verification code (restored afterwards) 653 PRESERVE_EXCEPTION_MARK; 654 655 thread->validate_frame_layout(); 656 657 // Verify that the just-unpacked frames match the interpreter's 658 // notions of expression stack and locals 659 vframeArray* cur_array = thread->vframe_array_last(); 660 RegisterMap rm(thread, false); 661 rm.set_include_argument_oops(false); 662 bool is_top_frame = true; 663 int callee_size_of_parameters = 0; 664 int callee_max_locals = 0; 665 for (int i = 0; i < cur_array->frames(); i++) { 666 vframeArrayElement* el = cur_array->element(i); 667 frame* iframe = el->iframe(); 668 guarantee(iframe->is_interpreted_frame(), "Wrong frame type"); 669 670 // Get the oop map for this bci 671 InterpreterOopMap mask; 672 int cur_invoke_parameter_size = 0; 673 bool try_next_mask = false; 674 int next_mask_expression_stack_size = -1; 675 int top_frame_expression_stack_adjustment = 0; 676 methodHandle mh(thread, iframe->interpreter_frame_method()); 677 OopMapCache::compute_one_oop_map(mh, iframe->interpreter_frame_bci(), &mask); 678 BytecodeStream str(mh); 679 str.set_start(iframe->interpreter_frame_bci()); 680 int max_bci = mh->code_size(); 681 // Get to the next bytecode if possible 682 assert(str.bci() < max_bci, "bci in interpreter frame out of bounds"); 683 // Check to see if we can grab the number of outgoing arguments 684 // at an uncommon trap for an invoke (where the compiler 685 // generates debug info before the invoke has executed) 686 Bytecodes::Code cur_code = str.next(); 687 if (Bytecodes::is_invoke(cur_code)) { 688 Bytecode_invoke invoke(mh, iframe->interpreter_frame_bci()); 689 cur_invoke_parameter_size = invoke.size_of_parameters(); 690 if (i != 0 && !invoke.is_invokedynamic() && MethodHandles::has_member_arg(invoke.klass(), invoke.name())) { 691 callee_size_of_parameters++; 692 } 693 } 694 if (str.bci() < max_bci) { 695 Bytecodes::Code next_code = str.next(); 696 if (next_code >= 0) { 697 // The interpreter oop map generator reports results before 698 // the current bytecode has executed except in the case of 699 // calls. It seems to be hard to tell whether the compiler 700 // has emitted debug information matching the "state before" 701 // a given bytecode or the state after, so we try both 702 if (!Bytecodes::is_invoke(cur_code) && cur_code != Bytecodes::_athrow) { 703 // Get expression stack size for the next bytecode 704 if (Bytecodes::is_invoke(next_code)) { 705 Bytecode_invoke invoke(mh, str.bci()); 706 next_mask_expression_stack_size = invoke.size_of_parameters(); 707 } else { 708 InterpreterOopMap next_mask; 709 OopMapCache::compute_one_oop_map(mh, str.bci(), &next_mask); 710 next_mask_expression_stack_size = next_mask.expression_stack_size(); 711 } 712 // Need to subtract off the size of the result type of 713 // the bytecode because this is not described in the 714 // debug info but returned to the interpreter in the TOS 715 // caching register 716 BasicType bytecode_result_type = Bytecodes::result_type(cur_code); 717 if (bytecode_result_type != T_ILLEGAL) { 718 top_frame_expression_stack_adjustment = type2size[bytecode_result_type]; 719 } 720 assert(top_frame_expression_stack_adjustment >= 0, "stack adjustment must be positive"); 721 try_next_mask = true; 722 } 723 } 724 } 725 726 // Verify stack depth and oops in frame 727 // This assertion may be dependent on the platform we're running on and may need modification (tested on x86 and sparc) 728 if (!( 729 /* SPARC */ 730 (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + callee_size_of_parameters) || 731 /* x86 */ 732 (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + callee_max_locals) || 733 (try_next_mask && 734 (iframe->interpreter_frame_expression_stack_size() == (next_mask_expression_stack_size - 735 top_frame_expression_stack_adjustment))) || 736 (is_top_frame && (exec_mode == Unpack_exception) && iframe->interpreter_frame_expression_stack_size() == 0) || 737 (is_top_frame && (exec_mode == Unpack_uncommon_trap || exec_mode == Unpack_reexecute || el->should_reexecute()) && 738 (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + cur_invoke_parameter_size)) 739 )) { 740 ttyLocker ttyl; 741 742 // Print out some information that will help us debug the problem 743 tty->print_cr("Wrong number of expression stack elements during deoptimization"); 744 tty->print_cr(" Error occurred while verifying frame %d (0..%d, 0 is topmost)", i, cur_array->frames() - 1); 745 tty->print_cr(" Fabricated interpreter frame had %d expression stack elements", 746 iframe->interpreter_frame_expression_stack_size()); 747 tty->print_cr(" Interpreter oop map had %d expression stack elements", mask.expression_stack_size()); 748 tty->print_cr(" try_next_mask = %d", try_next_mask); 749 tty->print_cr(" next_mask_expression_stack_size = %d", next_mask_expression_stack_size); 750 tty->print_cr(" callee_size_of_parameters = %d", callee_size_of_parameters); 751 tty->print_cr(" callee_max_locals = %d", callee_max_locals); 752 tty->print_cr(" top_frame_expression_stack_adjustment = %d", top_frame_expression_stack_adjustment); 753 tty->print_cr(" exec_mode = %d", exec_mode); 754 tty->print_cr(" cur_invoke_parameter_size = %d", cur_invoke_parameter_size); 755 tty->print_cr(" Thread = " INTPTR_FORMAT ", thread ID = %d", p2i(thread), thread->osthread()->thread_id()); 756 tty->print_cr(" Interpreted frames:"); 757 for (int k = 0; k < cur_array->frames(); k++) { 758 vframeArrayElement* el = cur_array->element(k); 759 tty->print_cr(" %s (bci %d)", el->method()->name_and_sig_as_C_string(), el->bci()); 760 } 761 cur_array->print_on_2(tty); 762 guarantee(false, "wrong number of expression stack elements during deopt"); 763 } 764 VerifyOopClosure verify; 765 iframe->oops_interpreted_do(&verify, &rm, false); 766 callee_size_of_parameters = mh->size_of_parameters(); 767 callee_max_locals = mh->max_locals(); 768 is_top_frame = false; 769 } 770 } 771 #endif /* !PRODUCT */ 772 773 774 return bt; 775 JRT_END 776 777 778 int Deoptimization::deoptimize_dependents() { 779 Threads::deoptimized_wrt_marked_nmethods(); 780 return 0; 781 } 782 783 Deoptimization::DeoptAction Deoptimization::_unloaded_action 784 = Deoptimization::Action_reinterpret; 785 786 #if COMPILER2_OR_JVMCI 787 bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, GrowableArray<ScopeValue*>* objects, TRAPS) { 788 Handle pending_exception(THREAD, thread->pending_exception()); 789 const char* exception_file = thread->exception_file(); 790 int exception_line = thread->exception_line(); 791 thread->clear_pending_exception(); 792 793 bool failures = false; 794 795 for (int i = 0; i < objects->length(); i++) { 796 assert(objects->at(i)->is_object(), "invalid debug information"); 797 ObjectValue* sv = (ObjectValue*) objects->at(i); 798 799 Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()()); 800 oop obj = NULL; 801 802 if (k->is_instance_klass()) { 803 InstanceKlass* ik = InstanceKlass::cast(k); 804 obj = ik->allocate_instance(THREAD); 805 } else if (k->is_typeArray_klass()) { 806 TypeArrayKlass* ak = TypeArrayKlass::cast(k); 807 assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length"); 808 int len = sv->field_size() / type2size[ak->element_type()]; 809 obj = ak->allocate(len, THREAD); 810 } else if (k->is_objArray_klass()) { 811 ObjArrayKlass* ak = ObjArrayKlass::cast(k); 812 obj = ak->allocate(sv->field_size(), THREAD); 813 } 814 815 if (obj == NULL) { 816 failures = true; 817 } 818 819 assert(sv->value().is_null(), "redundant reallocation"); 820 assert(obj != NULL || HAS_PENDING_EXCEPTION, "allocation should succeed or we should get an exception"); 821 CLEAR_PENDING_EXCEPTION; 822 sv->set_value(obj); 823 } 824 825 if (failures) { 826 THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures); 827 } else if (pending_exception.not_null()) { 828 thread->set_pending_exception(pending_exception(), exception_file, exception_line); 829 } 830 831 return failures; 832 } 833 834 // restore elements of an eliminated type array 835 void Deoptimization::reassign_type_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, typeArrayOop obj, BasicType type) { 836 int index = 0; 837 intptr_t val; 838 839 for (int i = 0; i < sv->field_size(); i++) { 840 StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i)); 841 switch(type) { 842 case T_LONG: case T_DOUBLE: { 843 assert(value->type() == T_INT, "Agreement."); 844 StackValue* low = 845 StackValue::create_stack_value(fr, reg_map, sv->field_at(++i)); 846 #ifdef _LP64 847 jlong res = (jlong)low->get_int(); 848 #else 849 #ifdef SPARC 850 // For SPARC we have to swap high and low words. 851 jlong res = jlong_from((jint)low->get_int(), (jint)value->get_int()); 852 #else 853 jlong res = jlong_from((jint)value->get_int(), (jint)low->get_int()); 854 #endif //SPARC 855 #endif 856 obj->long_at_put(index, res); 857 break; 858 } 859 860 // Have to cast to INT (32 bits) pointer to avoid little/big-endian problem. 861 case T_INT: case T_FLOAT: { // 4 bytes. 862 assert(value->type() == T_INT, "Agreement."); 863 bool big_value = false; 864 if (i + 1 < sv->field_size() && type == T_INT) { 865 if (sv->field_at(i)->is_location()) { 866 Location::Type type = ((LocationValue*) sv->field_at(i))->location().type(); 867 if (type == Location::dbl || type == Location::lng) { 868 big_value = true; 869 } 870 } else if (sv->field_at(i)->is_constant_int()) { 871 ScopeValue* next_scope_field = sv->field_at(i + 1); 872 if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) { 873 big_value = true; 874 } 875 } 876 } 877 878 if (big_value) { 879 StackValue* low = StackValue::create_stack_value(fr, reg_map, sv->field_at(++i)); 880 #ifdef _LP64 881 jlong res = (jlong)low->get_int(); 882 #else 883 #ifdef SPARC 884 // For SPARC we have to swap high and low words. 885 jlong res = jlong_from((jint)low->get_int(), (jint)value->get_int()); 886 #else 887 jlong res = jlong_from((jint)value->get_int(), (jint)low->get_int()); 888 #endif //SPARC 889 #endif 890 obj->int_at_put(index, (jint)*((jint*)&res)); 891 obj->int_at_put(++index, (jint)*(((jint*)&res) + 1)); 892 } else { 893 val = value->get_int(); 894 obj->int_at_put(index, (jint)*((jint*)&val)); 895 } 896 break; 897 } 898 899 case T_SHORT: 900 assert(value->type() == T_INT, "Agreement."); 901 val = value->get_int(); 902 obj->short_at_put(index, (jshort)*((jint*)&val)); 903 break; 904 905 case T_CHAR: 906 assert(value->type() == T_INT, "Agreement."); 907 val = value->get_int(); 908 obj->char_at_put(index, (jchar)*((jint*)&val)); 909 break; 910 911 case T_BYTE: 912 assert(value->type() == T_INT, "Agreement."); 913 val = value->get_int(); 914 obj->byte_at_put(index, (jbyte)*((jint*)&val)); 915 break; 916 917 case T_BOOLEAN: 918 assert(value->type() == T_INT, "Agreement."); 919 val = value->get_int(); 920 obj->bool_at_put(index, (jboolean)*((jint*)&val)); 921 break; 922 923 default: 924 ShouldNotReachHere(); 925 } 926 index++; 927 } 928 } 929 930 931 // restore fields of an eliminated object array 932 void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) { 933 for (int i = 0; i < sv->field_size(); i++) { 934 StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i)); 935 assert(value->type() == T_OBJECT, "object element expected"); 936 obj->obj_at_put(i, value->get_obj()()); 937 } 938 } 939 940 class ReassignedField { 941 public: 942 int _offset; 943 BasicType _type; 944 public: 945 ReassignedField() { 946 _offset = 0; 947 _type = T_ILLEGAL; 948 } 949 }; 950 951 int compare(ReassignedField* left, ReassignedField* right) { 952 return left->_offset - right->_offset; 953 } 954 955 // Restore fields of an eliminated instance object using the same field order 956 // returned by HotSpotResolvedObjectTypeImpl.getInstanceFields(true) 957 static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool skip_internal) { 958 if (klass->superklass() != NULL) { 959 svIndex = reassign_fields_by_klass(klass->superklass(), fr, reg_map, sv, svIndex, obj, skip_internal); 960 } 961 962 GrowableArray<ReassignedField>* fields = new GrowableArray<ReassignedField>(); 963 for (AllFieldStream fs(klass); !fs.done(); fs.next()) { 964 if (!fs.access_flags().is_static() && (!skip_internal || !fs.access_flags().is_internal())) { 965 ReassignedField field; 966 field._offset = fs.offset(); 967 field._type = FieldType::basic_type(fs.signature()); 968 fields->append(field); 969 } 970 } 971 fields->sort(compare); 972 for (int i = 0; i < fields->length(); i++) { 973 intptr_t val; 974 ScopeValue* scope_field = sv->field_at(svIndex); 975 StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field); 976 int offset = fields->at(i)._offset; 977 BasicType type = fields->at(i)._type; 978 switch (type) { 979 case T_OBJECT: case T_ARRAY: 980 assert(value->type() == T_OBJECT, "Agreement."); 981 obj->obj_field_put(offset, value->get_obj()()); 982 break; 983 984 // Have to cast to INT (32 bits) pointer to avoid little/big-endian problem. 985 case T_INT: case T_FLOAT: { // 4 bytes. 986 assert(value->type() == T_INT, "Agreement."); 987 bool big_value = false; 988 if (i+1 < fields->length() && fields->at(i+1)._type == T_INT) { 989 if (scope_field->is_location()) { 990 Location::Type type = ((LocationValue*) scope_field)->location().type(); 991 if (type == Location::dbl || type == Location::lng) { 992 big_value = true; 993 } 994 } 995 if (scope_field->is_constant_int()) { 996 ScopeValue* next_scope_field = sv->field_at(svIndex + 1); 997 if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) { 998 big_value = true; 999 } 1000 } 1001 } 1002 1003 if (big_value) { 1004 i++; 1005 assert(i < fields->length(), "second T_INT field needed"); 1006 assert(fields->at(i)._type == T_INT, "T_INT field needed"); 1007 } else { 1008 val = value->get_int(); 1009 obj->int_field_put(offset, (jint)*((jint*)&val)); 1010 break; 1011 } 1012 } 1013 /* no break */ 1014 1015 case T_LONG: case T_DOUBLE: { 1016 assert(value->type() == T_INT, "Agreement."); 1017 StackValue* low = StackValue::create_stack_value(fr, reg_map, sv->field_at(++svIndex)); 1018 #ifdef _LP64 1019 jlong res = (jlong)low->get_int(); 1020 #else 1021 #ifdef SPARC 1022 // For SPARC we have to swap high and low words. 1023 jlong res = jlong_from((jint)low->get_int(), (jint)value->get_int()); 1024 #else 1025 jlong res = jlong_from((jint)value->get_int(), (jint)low->get_int()); 1026 #endif //SPARC 1027 #endif 1028 obj->long_field_put(offset, res); 1029 break; 1030 } 1031 1032 case T_SHORT: 1033 assert(value->type() == T_INT, "Agreement."); 1034 val = value->get_int(); 1035 obj->short_field_put(offset, (jshort)*((jint*)&val)); 1036 break; 1037 1038 case T_CHAR: 1039 assert(value->type() == T_INT, "Agreement."); 1040 val = value->get_int(); 1041 obj->char_field_put(offset, (jchar)*((jint*)&val)); 1042 break; 1043 1044 case T_BYTE: 1045 assert(value->type() == T_INT, "Agreement."); 1046 val = value->get_int(); 1047 obj->byte_field_put(offset, (jbyte)*((jint*)&val)); 1048 break; 1049 1050 case T_BOOLEAN: 1051 assert(value->type() == T_INT, "Agreement."); 1052 val = value->get_int(); 1053 obj->bool_field_put(offset, (jboolean)*((jint*)&val)); 1054 break; 1055 1056 default: 1057 ShouldNotReachHere(); 1058 } 1059 svIndex++; 1060 } 1061 return svIndex; 1062 } 1063 1064 // restore fields of all eliminated objects and arrays 1065 void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures, bool skip_internal) { 1066 for (int i = 0; i < objects->length(); i++) { 1067 ObjectValue* sv = (ObjectValue*) objects->at(i); 1068 Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()()); 1069 Handle obj = sv->value(); 1070 assert(obj.not_null() || realloc_failures, "reallocation was missed"); 1071 if (PrintDeoptimizationDetails) { 1072 tty->print_cr("reassign fields for object of type %s!", k->name()->as_C_string()); 1073 } 1074 if (obj.is_null()) { 1075 continue; 1076 } 1077 1078 if (k->is_instance_klass()) { 1079 InstanceKlass* ik = InstanceKlass::cast(k); 1080 reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), skip_internal); 1081 } else if (k->is_typeArray_klass()) { 1082 TypeArrayKlass* ak = TypeArrayKlass::cast(k); 1083 reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type()); 1084 } else if (k->is_objArray_klass()) { 1085 reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj()); 1086 } 1087 } 1088 } 1089 1090 1091 // relock objects for which synchronization was eliminated 1092 void Deoptimization::relock_objects(GrowableArray<MonitorInfo*>* monitors, JavaThread* thread, bool realloc_failures) { 1093 for (int i = 0; i < monitors->length(); i++) { 1094 MonitorInfo* mon_info = monitors->at(i); 1095 if (mon_info->eliminated()) { 1096 assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed"); 1097 if (!mon_info->owner_is_scalar_replaced()) { 1098 Handle obj(thread, mon_info->owner()); 1099 markOop mark = obj->mark(); 1100 if (UseBiasedLocking && mark->has_bias_pattern()) { 1101 // New allocated objects may have the mark set to anonymously biased. 1102 // Also the deoptimized method may called methods with synchronization 1103 // where the thread-local object is bias locked to the current thread. 1104 assert(mark->is_biased_anonymously() || 1105 mark->biased_locker() == thread, "should be locked to current thread"); 1106 // Reset mark word to unbiased prototype. 1107 markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age()); 1108 obj->set_mark(unbiased_prototype); 1109 } 1110 BasicLock* lock = mon_info->lock(); 1111 ObjectSynchronizer::slow_enter(obj, lock, thread); 1112 assert(mon_info->owner()->is_locked(), "object must be locked now"); 1113 } 1114 } 1115 } 1116 } 1117 1118 1119 #ifndef PRODUCT 1120 // print information about reallocated objects 1121 void Deoptimization::print_objects(GrowableArray<ScopeValue*>* objects, bool realloc_failures) { 1122 fieldDescriptor fd; 1123 1124 for (int i = 0; i < objects->length(); i++) { 1125 ObjectValue* sv = (ObjectValue*) objects->at(i); 1126 Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()()); 1127 Handle obj = sv->value(); 1128 1129 tty->print(" object <" INTPTR_FORMAT "> of type ", p2i(sv->value()())); 1130 k->print_value(); 1131 assert(obj.not_null() || realloc_failures, "reallocation was missed"); 1132 if (obj.is_null()) { 1133 tty->print(" allocation failed"); 1134 } else { 1135 tty->print(" allocated (%d bytes)", obj->size() * HeapWordSize); 1136 } 1137 tty->cr(); 1138 1139 if (Verbose && !obj.is_null()) { 1140 k->oop_print_on(obj(), tty); 1141 } 1142 } 1143 } 1144 #endif 1145 #endif // COMPILER2_OR_JVMCI 1146 1147 vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures) { 1148 Events::log(thread, "DEOPT PACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT, p2i(fr.pc()), p2i(fr.sp())); 1149 1150 #ifndef PRODUCT 1151 if (PrintDeoptimizationDetails) { 1152 ttyLocker ttyl; 1153 tty->print("DEOPT PACKING thread " INTPTR_FORMAT " ", p2i(thread)); 1154 fr.print_on(tty); 1155 tty->print_cr(" Virtual frames (innermost first):"); 1156 for (int index = 0; index < chunk->length(); index++) { 1157 compiledVFrame* vf = chunk->at(index); 1158 tty->print(" %2d - ", index); 1159 vf->print_value(); 1160 int bci = chunk->at(index)->raw_bci(); 1161 const char* code_name; 1162 if (bci == SynchronizationEntryBCI) { 1163 code_name = "sync entry"; 1164 } else { 1165 Bytecodes::Code code = vf->method()->code_at(bci); 1166 code_name = Bytecodes::name(code); 1167 } 1168 tty->print(" - %s", code_name); 1169 tty->print_cr(" @ bci %d ", bci); 1170 if (Verbose) { 1171 vf->print(); 1172 tty->cr(); 1173 } 1174 } 1175 } 1176 #endif 1177 1178 // Register map for next frame (used for stack crawl). We capture 1179 // the state of the deopt'ing frame's caller. Thus if we need to 1180 // stuff a C2I adapter we can properly fill in the callee-save 1181 // register locations. 1182 frame caller = fr.sender(reg_map); 1183 int frame_size = caller.sp() - fr.sp(); 1184 1185 frame sender = caller; 1186 1187 // Since the Java thread being deoptimized will eventually adjust it's own stack, 1188 // the vframeArray containing the unpacking information is allocated in the C heap. 1189 // For Compiler1, the caller of the deoptimized frame is saved for use by unpack_frames(). 1190 vframeArray* array = vframeArray::allocate(thread, frame_size, chunk, reg_map, sender, caller, fr, realloc_failures); 1191 1192 // Compare the vframeArray to the collected vframes 1193 assert(array->structural_compare(thread, chunk), "just checking"); 1194 1195 #ifndef PRODUCT 1196 if (PrintDeoptimizationDetails) { 1197 ttyLocker ttyl; 1198 tty->print_cr(" Created vframeArray " INTPTR_FORMAT, p2i(array)); 1199 } 1200 #endif // PRODUCT 1201 1202 return array; 1203 } 1204 1205 #if COMPILER2_OR_JVMCI 1206 void Deoptimization::pop_frames_failed_reallocs(JavaThread* thread, vframeArray* array) { 1207 // Reallocation of some scalar replaced objects failed. Record 1208 // that we need to pop all the interpreter frames for the 1209 // deoptimized compiled frame. 1210 assert(thread->frames_to_pop_failed_realloc() == 0, "missed frames to pop?"); 1211 thread->set_frames_to_pop_failed_realloc(array->frames()); 1212 // Unlock all monitors here otherwise the interpreter will see a 1213 // mix of locked and unlocked monitors (because of failed 1214 // reallocations of synchronized objects) and be confused. 1215 for (int i = 0; i < array->frames(); i++) { 1216 MonitorChunk* monitors = array->element(i)->monitors(); 1217 if (monitors != NULL) { 1218 for (int j = 0; j < monitors->number_of_monitors(); j++) { 1219 BasicObjectLock* src = monitors->at(j); 1220 if (src->obj() != NULL) { 1221 ObjectSynchronizer::fast_exit(src->obj(), src->lock(), thread); 1222 } 1223 } 1224 array->element(i)->free_monitors(thread); 1225 #ifdef ASSERT 1226 array->element(i)->set_removed_monitors(); 1227 #endif 1228 } 1229 } 1230 } 1231 #endif 1232 1233 static void collect_monitors(compiledVFrame* cvf, GrowableArray<Handle>* objects_to_revoke) { 1234 GrowableArray<MonitorInfo*>* monitors = cvf->monitors(); 1235 Thread* thread = Thread::current(); 1236 for (int i = 0; i < monitors->length(); i++) { 1237 MonitorInfo* mon_info = monitors->at(i); 1238 if (!mon_info->eliminated() && mon_info->owner() != NULL) { 1239 objects_to_revoke->append(Handle(thread, mon_info->owner())); 1240 } 1241 } 1242 } 1243 1244 1245 void Deoptimization::revoke_biases_of_monitors(JavaThread* thread, frame fr, RegisterMap* map) { 1246 if (!UseBiasedLocking) { 1247 return; 1248 } 1249 1250 GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>(); 1251 1252 // Unfortunately we don't have a RegisterMap available in most of 1253 // the places we want to call this routine so we need to walk the 1254 // stack again to update the register map. 1255 if (map == NULL || !map->update_map()) { 1256 StackFrameStream sfs(thread, true); 1257 bool found = false; 1258 while (!found && !sfs.is_done()) { 1259 frame* cur = sfs.current(); 1260 sfs.next(); 1261 found = cur->id() == fr.id(); 1262 } 1263 assert(found, "frame to be deoptimized not found on target thread's stack"); 1264 map = sfs.register_map(); 1265 } 1266 1267 vframe* vf = vframe::new_vframe(&fr, map, thread); 1268 compiledVFrame* cvf = compiledVFrame::cast(vf); 1269 // Revoke monitors' biases in all scopes 1270 while (!cvf->is_top()) { 1271 collect_monitors(cvf, objects_to_revoke); 1272 cvf = compiledVFrame::cast(cvf->sender()); 1273 } 1274 collect_monitors(cvf, objects_to_revoke); 1275 1276 if (SafepointSynchronize::is_at_safepoint()) { 1277 BiasedLocking::revoke_at_safepoint(objects_to_revoke); 1278 } else { 1279 BiasedLocking::revoke(objects_to_revoke); 1280 } 1281 } 1282 1283 1284 void Deoptimization::revoke_biases_of_monitors(CodeBlob* cb) { 1285 if (!UseBiasedLocking) { 1286 return; 1287 } 1288 1289 assert(SafepointSynchronize::is_at_safepoint(), "must only be called from safepoint"); 1290 GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>(); 1291 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 1292 if (jt->has_last_Java_frame()) { 1293 StackFrameStream sfs(jt, true); 1294 while (!sfs.is_done()) { 1295 frame* cur = sfs.current(); 1296 if (cb->contains(cur->pc())) { 1297 vframe* vf = vframe::new_vframe(cur, sfs.register_map(), jt); 1298 compiledVFrame* cvf = compiledVFrame::cast(vf); 1299 // Revoke monitors' biases in all scopes 1300 while (!cvf->is_top()) { 1301 collect_monitors(cvf, objects_to_revoke); 1302 cvf = compiledVFrame::cast(cvf->sender()); 1303 } 1304 collect_monitors(cvf, objects_to_revoke); 1305 } 1306 sfs.next(); 1307 } 1308 } 1309 } 1310 BiasedLocking::revoke_at_safepoint(objects_to_revoke); 1311 } 1312 1313 1314 void Deoptimization::deoptimize_single_frame(JavaThread* thread, frame fr, Deoptimization::DeoptReason reason) { 1315 assert(fr.can_be_deoptimized(), "checking frame type"); 1316 1317 gather_statistics(reason, Action_none, Bytecodes::_illegal); 1318 1319 if (LogCompilation && xtty != NULL) { 1320 CompiledMethod* cm = fr.cb()->as_compiled_method_or_null(); 1321 assert(cm != NULL, "only compiled methods can deopt"); 1322 1323 ttyLocker ttyl; 1324 xtty->begin_head("deoptimized thread='" UINTX_FORMAT "' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc())); 1325 cm->log_identity(xtty); 1326 xtty->end_head(); 1327 for (ScopeDesc* sd = cm->scope_desc_at(fr.pc()); ; sd = sd->sender()) { 1328 xtty->begin_elem("jvms bci='%d'", sd->bci()); 1329 xtty->method(sd->method()); 1330 xtty->end_elem(); 1331 if (sd->is_top()) break; 1332 } 1333 xtty->tail("deoptimized"); 1334 } 1335 1336 // Patch the compiled method so that when execution returns to it we will 1337 // deopt the execution state and return to the interpreter. 1338 fr.deoptimize(thread); 1339 } 1340 1341 void Deoptimization::deoptimize(JavaThread* thread, frame fr, RegisterMap *map) { 1342 deoptimize(thread, fr, map, Reason_constraint); 1343 } 1344 1345 void Deoptimization::deoptimize(JavaThread* thread, frame fr, RegisterMap *map, DeoptReason reason) { 1346 // Deoptimize only if the frame comes from compile code. 1347 // Do not deoptimize the frame which is already patched 1348 // during the execution of the loops below. 1349 if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) { 1350 return; 1351 } 1352 ResourceMark rm; 1353 DeoptimizationMarker dm; 1354 if (UseBiasedLocking) { 1355 revoke_biases_of_monitors(thread, fr, map); 1356 } 1357 deoptimize_single_frame(thread, fr, reason); 1358 1359 } 1360 1361 #if INCLUDE_JVMCI 1362 address Deoptimization::deoptimize_for_missing_exception_handler(CompiledMethod* cm) { 1363 // there is no exception handler for this pc => deoptimize 1364 cm->make_not_entrant(); 1365 1366 // Use Deoptimization::deoptimize for all of its side-effects: 1367 // revoking biases of monitors, gathering traps statistics, logging... 1368 // it also patches the return pc but we do not care about that 1369 // since we return a continuation to the deopt_blob below. 1370 JavaThread* thread = JavaThread::current(); 1371 RegisterMap reg_map(thread, UseBiasedLocking); 1372 frame runtime_frame = thread->last_frame(); 1373 frame caller_frame = runtime_frame.sender(®_map); 1374 assert(caller_frame.cb()->as_compiled_method_or_null() == cm, "expect top frame compiled method"); 1375 Deoptimization::deoptimize(thread, caller_frame, ®_map, Deoptimization::Reason_not_compiled_exception_handler); 1376 1377 MethodData* trap_mdo = get_method_data(thread, cm->method(), true); 1378 if (trap_mdo != NULL) { 1379 trap_mdo->inc_trap_count(Deoptimization::Reason_not_compiled_exception_handler); 1380 } 1381 1382 return SharedRuntime::deopt_blob()->unpack_with_exception_in_tls(); 1383 } 1384 #endif 1385 1386 void Deoptimization::deoptimize_frame_internal(JavaThread* thread, intptr_t* id, DeoptReason reason) { 1387 assert(thread == Thread::current() || SafepointSynchronize::is_at_safepoint(), 1388 "can only deoptimize other thread at a safepoint"); 1389 // Compute frame and register map based on thread and sp. 1390 RegisterMap reg_map(thread, UseBiasedLocking); 1391 frame fr = thread->last_frame(); 1392 while (fr.id() != id) { 1393 fr = fr.sender(®_map); 1394 } 1395 deoptimize(thread, fr, ®_map, reason); 1396 } 1397 1398 1399 void Deoptimization::deoptimize_frame(JavaThread* thread, intptr_t* id, DeoptReason reason) { 1400 if (thread == Thread::current()) { 1401 Deoptimization::deoptimize_frame_internal(thread, id, reason); 1402 } else { 1403 VM_DeoptimizeFrame deopt(thread, id, reason); 1404 VMThread::execute(&deopt); 1405 } 1406 } 1407 1408 void Deoptimization::deoptimize_frame(JavaThread* thread, intptr_t* id) { 1409 deoptimize_frame(thread, id, Reason_constraint); 1410 } 1411 1412 // JVMTI PopFrame support 1413 JRT_LEAF(void, Deoptimization::popframe_preserve_args(JavaThread* thread, int bytes_to_save, void* start_address)) 1414 { 1415 thread->popframe_preserve_args(in_ByteSize(bytes_to_save), start_address); 1416 } 1417 JRT_END 1418 1419 MethodData* 1420 Deoptimization::get_method_data(JavaThread* thread, const methodHandle& m, 1421 bool create_if_missing) { 1422 Thread* THREAD = thread; 1423 MethodData* mdo = m()->method_data(); 1424 if (mdo == NULL && create_if_missing && !HAS_PENDING_EXCEPTION) { 1425 // Build an MDO. Ignore errors like OutOfMemory; 1426 // that simply means we won't have an MDO to update. 1427 Method::build_interpreter_method_data(m, THREAD); 1428 if (HAS_PENDING_EXCEPTION) { 1429 assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here"); 1430 CLEAR_PENDING_EXCEPTION; 1431 } 1432 mdo = m()->method_data(); 1433 } 1434 return mdo; 1435 } 1436 1437 #if COMPILER2_OR_JVMCI 1438 void Deoptimization::load_class_by_index(const constantPoolHandle& constant_pool, int index, TRAPS) { 1439 // in case of an unresolved klass entry, load the class. 1440 if (constant_pool->tag_at(index).is_unresolved_klass()) { 1441 Klass* tk = constant_pool->klass_at_ignore_error(index, CHECK); 1442 return; 1443 } 1444 1445 if (!constant_pool->tag_at(index).is_symbol()) return; 1446 1447 Handle class_loader (THREAD, constant_pool->pool_holder()->class_loader()); 1448 Symbol* symbol = constant_pool->symbol_at(index); 1449 1450 // class name? 1451 if (symbol->byte_at(0) != '(') { 1452 Handle protection_domain (THREAD, constant_pool->pool_holder()->protection_domain()); 1453 SystemDictionary::resolve_or_null(symbol, class_loader, protection_domain, CHECK); 1454 return; 1455 } 1456 1457 // then it must be a signature! 1458 ResourceMark rm(THREAD); 1459 for (SignatureStream ss(symbol); !ss.is_done(); ss.next()) { 1460 if (ss.is_object()) { 1461 Symbol* class_name = ss.as_symbol(CHECK); 1462 Handle protection_domain (THREAD, constant_pool->pool_holder()->protection_domain()); 1463 SystemDictionary::resolve_or_null(class_name, class_loader, protection_domain, CHECK); 1464 } 1465 } 1466 } 1467 1468 1469 void Deoptimization::load_class_by_index(const constantPoolHandle& constant_pool, int index) { 1470 EXCEPTION_MARK; 1471 load_class_by_index(constant_pool, index, THREAD); 1472 if (HAS_PENDING_EXCEPTION) { 1473 // Exception happened during classloading. We ignore the exception here, since it 1474 // is going to be rethrown since the current activation is going to be deoptimized and 1475 // the interpreter will re-execute the bytecode. 1476 CLEAR_PENDING_EXCEPTION; 1477 // Class loading called java code which may have caused a stack 1478 // overflow. If the exception was thrown right before the return 1479 // to the runtime the stack is no longer guarded. Reguard the 1480 // stack otherwise if we return to the uncommon trap blob and the 1481 // stack bang causes a stack overflow we crash. 1482 assert(THREAD->is_Java_thread(), "only a java thread can be here"); 1483 JavaThread* thread = (JavaThread*)THREAD; 1484 bool guard_pages_enabled = thread->stack_guards_enabled(); 1485 if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack(); 1486 assert(guard_pages_enabled, "stack banging in uncommon trap blob may cause crash"); 1487 } 1488 } 1489 1490 JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* thread, jint trap_request)) { 1491 HandleMark hm; 1492 1493 // uncommon_trap() is called at the beginning of the uncommon trap 1494 // handler. Note this fact before we start generating temporary frames 1495 // that can confuse an asynchronous stack walker. This counter is 1496 // decremented at the end of unpack_frames(). 1497 thread->inc_in_deopt_handler(); 1498 1499 // We need to update the map if we have biased locking. 1500 #if INCLUDE_JVMCI 1501 // JVMCI might need to get an exception from the stack, which in turn requires the register map to be valid 1502 RegisterMap reg_map(thread, true); 1503 #else 1504 RegisterMap reg_map(thread, UseBiasedLocking); 1505 #endif 1506 frame stub_frame = thread->last_frame(); 1507 frame fr = stub_frame.sender(®_map); 1508 // Make sure the calling nmethod is not getting deoptimized and removed 1509 // before we are done with it. 1510 nmethodLocker nl(fr.pc()); 1511 1512 // Log a message 1513 Events::log(thread, "Uncommon trap: trap_request=" PTR32_FORMAT " fr.pc=" INTPTR_FORMAT " relative=" INTPTR_FORMAT, 1514 trap_request, p2i(fr.pc()), fr.pc() - fr.cb()->code_begin()); 1515 1516 { 1517 ResourceMark rm; 1518 1519 // Revoke biases of any monitors in the frame to ensure we can migrate them 1520 revoke_biases_of_monitors(thread, fr, ®_map); 1521 1522 DeoptReason reason = trap_request_reason(trap_request); 1523 DeoptAction action = trap_request_action(trap_request); 1524 #if INCLUDE_JVMCI 1525 int debug_id = trap_request_debug_id(trap_request); 1526 #endif 1527 jint unloaded_class_index = trap_request_index(trap_request); // CP idx or -1 1528 1529 vframe* vf = vframe::new_vframe(&fr, ®_map, thread); 1530 compiledVFrame* cvf = compiledVFrame::cast(vf); 1531 1532 CompiledMethod* nm = cvf->code(); 1533 1534 ScopeDesc* trap_scope = cvf->scope(); 1535 1536 if (TraceDeoptimization) { 1537 ttyLocker ttyl; 1538 tty->print_cr(" bci=%d pc=" INTPTR_FORMAT ", relative_pc=" INTPTR_FORMAT ", method=%s" JVMCI_ONLY(", debug_id=%d"), trap_scope->bci(), p2i(fr.pc()), fr.pc() - nm->code_begin(), trap_scope->method()->name_and_sig_as_C_string() 1539 #if INCLUDE_JVMCI 1540 , debug_id 1541 #endif 1542 ); 1543 } 1544 1545 methodHandle trap_method = trap_scope->method(); 1546 int trap_bci = trap_scope->bci(); 1547 #if INCLUDE_JVMCI 1548 oop speculation = thread->pending_failed_speculation(); 1549 if (nm->is_compiled_by_jvmci()) { 1550 if (speculation != NULL) { 1551 oop speculation_log = nm->as_nmethod()->speculation_log(); 1552 if (speculation_log != NULL) { 1553 if (TraceDeoptimization || TraceUncollectedSpeculations) { 1554 if (HotSpotSpeculationLog::lastFailed(speculation_log) != NULL) { 1555 tty->print_cr("A speculation that was not collected by the compiler is being overwritten"); 1556 } 1557 } 1558 if (TraceDeoptimization) { 1559 tty->print_cr("Saving speculation to speculation log"); 1560 } 1561 HotSpotSpeculationLog::set_lastFailed(speculation_log, speculation); 1562 } else { 1563 if (TraceDeoptimization) { 1564 tty->print_cr("Speculation present but no speculation log"); 1565 } 1566 } 1567 thread->set_pending_failed_speculation(NULL); 1568 } else { 1569 if (TraceDeoptimization) { 1570 tty->print_cr("No speculation"); 1571 } 1572 } 1573 } else { 1574 assert(speculation == NULL, "There should not be a speculation for method compiled by non-JVMCI compilers"); 1575 } 1576 1577 if (trap_bci == SynchronizationEntryBCI) { 1578 trap_bci = 0; 1579 thread->set_pending_monitorenter(true); 1580 } 1581 1582 if (reason == Deoptimization::Reason_transfer_to_interpreter) { 1583 thread->set_pending_transfer_to_interpreter(true); 1584 } 1585 #endif 1586 1587 Bytecodes::Code trap_bc = trap_method->java_code_at(trap_bci); 1588 // Record this event in the histogram. 1589 gather_statistics(reason, action, trap_bc); 1590 1591 // Ensure that we can record deopt. history: 1592 // Need MDO to record RTM code generation state. 1593 bool create_if_missing = ProfileTraps || UseCodeAging RTM_OPT_ONLY( || UseRTMLocking ); 1594 1595 methodHandle profiled_method; 1596 #if INCLUDE_JVMCI 1597 if (nm->is_compiled_by_jvmci()) { 1598 profiled_method = nm->method(); 1599 } else { 1600 profiled_method = trap_method; 1601 } 1602 #else 1603 profiled_method = trap_method; 1604 #endif 1605 1606 MethodData* trap_mdo = 1607 get_method_data(thread, profiled_method, create_if_missing); 1608 1609 // Log a message 1610 Events::log_deopt_message(thread, "Uncommon trap: reason=%s action=%s pc=" INTPTR_FORMAT " method=%s @ %d %s", 1611 trap_reason_name(reason), trap_action_name(action), p2i(fr.pc()), 1612 trap_method->name_and_sig_as_C_string(), trap_bci, nm->compiler_name()); 1613 1614 // Print a bunch of diagnostics, if requested. 1615 if (TraceDeoptimization || LogCompilation) { 1616 ResourceMark rm; 1617 ttyLocker ttyl; 1618 char buf[100]; 1619 if (xtty != NULL) { 1620 xtty->begin_head("uncommon_trap thread='" UINTX_FORMAT "' %s", 1621 os::current_thread_id(), 1622 format_trap_request(buf, sizeof(buf), trap_request)); 1623 nm->log_identity(xtty); 1624 } 1625 Symbol* class_name = NULL; 1626 bool unresolved = false; 1627 if (unloaded_class_index >= 0) { 1628 constantPoolHandle constants (THREAD, trap_method->constants()); 1629 if (constants->tag_at(unloaded_class_index).is_unresolved_klass()) { 1630 class_name = constants->klass_name_at(unloaded_class_index); 1631 unresolved = true; 1632 if (xtty != NULL) 1633 xtty->print(" unresolved='1'"); 1634 } else if (constants->tag_at(unloaded_class_index).is_symbol()) { 1635 class_name = constants->symbol_at(unloaded_class_index); 1636 } 1637 if (xtty != NULL) 1638 xtty->name(class_name); 1639 } 1640 if (xtty != NULL && trap_mdo != NULL && (int)reason < (int)MethodData::_trap_hist_limit) { 1641 // Dump the relevant MDO state. 1642 // This is the deopt count for the current reason, any previous 1643 // reasons or recompiles seen at this point. 1644 int dcnt = trap_mdo->trap_count(reason); 1645 if (dcnt != 0) 1646 xtty->print(" count='%d'", dcnt); 1647 ProfileData* pdata = trap_mdo->bci_to_data(trap_bci); 1648 int dos = (pdata == NULL)? 0: pdata->trap_state(); 1649 if (dos != 0) { 1650 xtty->print(" state='%s'", format_trap_state(buf, sizeof(buf), dos)); 1651 if (trap_state_is_recompiled(dos)) { 1652 int recnt2 = trap_mdo->overflow_recompile_count(); 1653 if (recnt2 != 0) 1654 xtty->print(" recompiles2='%d'", recnt2); 1655 } 1656 } 1657 } 1658 if (xtty != NULL) { 1659 xtty->stamp(); 1660 xtty->end_head(); 1661 } 1662 if (TraceDeoptimization) { // make noise on the tty 1663 tty->print("Uncommon trap occurred in"); 1664 nm->method()->print_short_name(tty); 1665 tty->print(" compiler=%s compile_id=%d", nm->compiler_name(), nm->compile_id()); 1666 #if INCLUDE_JVMCI 1667 if (nm->is_nmethod()) { 1668 char* installed_code_name = nm->as_nmethod()->jvmci_installed_code_name(buf, sizeof(buf)); 1669 if (installed_code_name != NULL) { 1670 tty->print(" (JVMCI: installed code name=%s) ", installed_code_name); 1671 } 1672 } 1673 #endif 1674 tty->print(" (@" INTPTR_FORMAT ") thread=" UINTX_FORMAT " reason=%s action=%s unloaded_class_index=%d" JVMCI_ONLY(" debug_id=%d"), 1675 p2i(fr.pc()), 1676 os::current_thread_id(), 1677 trap_reason_name(reason), 1678 trap_action_name(action), 1679 unloaded_class_index 1680 #if INCLUDE_JVMCI 1681 , debug_id 1682 #endif 1683 ); 1684 if (class_name != NULL) { 1685 tty->print(unresolved ? " unresolved class: " : " symbol: "); 1686 class_name->print_symbol_on(tty); 1687 } 1688 tty->cr(); 1689 } 1690 if (xtty != NULL) { 1691 // Log the precise location of the trap. 1692 for (ScopeDesc* sd = trap_scope; ; sd = sd->sender()) { 1693 xtty->begin_elem("jvms bci='%d'", sd->bci()); 1694 xtty->method(sd->method()); 1695 xtty->end_elem(); 1696 if (sd->is_top()) break; 1697 } 1698 xtty->tail("uncommon_trap"); 1699 } 1700 } 1701 // (End diagnostic printout.) 1702 1703 // Load class if necessary 1704 if (unloaded_class_index >= 0) { 1705 constantPoolHandle constants(THREAD, trap_method->constants()); 1706 load_class_by_index(constants, unloaded_class_index); 1707 } 1708 1709 // Flush the nmethod if necessary and desirable. 1710 // 1711 // We need to avoid situations where we are re-flushing the nmethod 1712 // because of a hot deoptimization site. Repeated flushes at the same 1713 // point need to be detected by the compiler and avoided. If the compiler 1714 // cannot avoid them (or has a bug and "refuses" to avoid them), this 1715 // module must take measures to avoid an infinite cycle of recompilation 1716 // and deoptimization. There are several such measures: 1717 // 1718 // 1. If a recompilation is ordered a second time at some site X 1719 // and for the same reason R, the action is adjusted to 'reinterpret', 1720 // to give the interpreter time to exercise the method more thoroughly. 1721 // If this happens, the method's overflow_recompile_count is incremented. 1722 // 1723 // 2. If the compiler fails to reduce the deoptimization rate, then 1724 // the method's overflow_recompile_count will begin to exceed the set 1725 // limit PerBytecodeRecompilationCutoff. If this happens, the action 1726 // is adjusted to 'make_not_compilable', and the method is abandoned 1727 // to the interpreter. This is a performance hit for hot methods, 1728 // but is better than a disastrous infinite cycle of recompilations. 1729 // (Actually, only the method containing the site X is abandoned.) 1730 // 1731 // 3. In parallel with the previous measures, if the total number of 1732 // recompilations of a method exceeds the much larger set limit 1733 // PerMethodRecompilationCutoff, the method is abandoned. 1734 // This should only happen if the method is very large and has 1735 // many "lukewarm" deoptimizations. The code which enforces this 1736 // limit is elsewhere (class nmethod, class Method). 1737 // 1738 // Note that the per-BCI 'is_recompiled' bit gives the compiler one chance 1739 // to recompile at each bytecode independently of the per-BCI cutoff. 1740 // 1741 // The decision to update code is up to the compiler, and is encoded 1742 // in the Action_xxx code. If the compiler requests Action_none 1743 // no trap state is changed, no compiled code is changed, and the 1744 // computation suffers along in the interpreter. 1745 // 1746 // The other action codes specify various tactics for decompilation 1747 // and recompilation. Action_maybe_recompile is the loosest, and 1748 // allows the compiled code to stay around until enough traps are seen, 1749 // and until the compiler gets around to recompiling the trapping method. 1750 // 1751 // The other actions cause immediate removal of the present code. 1752 1753 // Traps caused by injected profile shouldn't pollute trap counts. 1754 bool injected_profile_trap = trap_method->has_injected_profile() && 1755 (reason == Reason_intrinsic || reason == Reason_unreached); 1756 1757 bool update_trap_state = (reason != Reason_tenured) && !injected_profile_trap; 1758 bool make_not_entrant = false; 1759 bool make_not_compilable = false; 1760 bool reprofile = false; 1761 switch (action) { 1762 case Action_none: 1763 // Keep the old code. 1764 update_trap_state = false; 1765 break; 1766 case Action_maybe_recompile: 1767 // Do not need to invalidate the present code, but we can 1768 // initiate another 1769 // Start compiler without (necessarily) invalidating the nmethod. 1770 // The system will tolerate the old code, but new code should be 1771 // generated when possible. 1772 break; 1773 case Action_reinterpret: 1774 // Go back into the interpreter for a while, and then consider 1775 // recompiling form scratch. 1776 make_not_entrant = true; 1777 // Reset invocation counter for outer most method. 1778 // This will allow the interpreter to exercise the bytecodes 1779 // for a while before recompiling. 1780 // By contrast, Action_make_not_entrant is immediate. 1781 // 1782 // Note that the compiler will track null_check, null_assert, 1783 // range_check, and class_check events and log them as if they 1784 // had been traps taken from compiled code. This will update 1785 // the MDO trap history so that the next compilation will 1786 // properly detect hot trap sites. 1787 reprofile = true; 1788 break; 1789 case Action_make_not_entrant: 1790 // Request immediate recompilation, and get rid of the old code. 1791 // Make them not entrant, so next time they are called they get 1792 // recompiled. Unloaded classes are loaded now so recompile before next 1793 // time they are called. Same for uninitialized. The interpreter will 1794 // link the missing class, if any. 1795 make_not_entrant = true; 1796 break; 1797 case Action_make_not_compilable: 1798 // Give up on compiling this method at all. 1799 make_not_entrant = true; 1800 make_not_compilable = true; 1801 break; 1802 default: 1803 ShouldNotReachHere(); 1804 } 1805 1806 // Setting +ProfileTraps fixes the following, on all platforms: 1807 // 4852688: ProfileInterpreter is off by default for ia64. The result is 1808 // infinite heroic-opt-uncommon-trap/deopt/recompile cycles, since the 1809 // recompile relies on a MethodData* to record heroic opt failures. 1810 1811 // Whether the interpreter is producing MDO data or not, we also need 1812 // to use the MDO to detect hot deoptimization points and control 1813 // aggressive optimization. 1814 bool inc_recompile_count = false; 1815 ProfileData* pdata = NULL; 1816 if (ProfileTraps && !is_client_compilation_mode_vm() && update_trap_state && trap_mdo != NULL) { 1817 assert(trap_mdo == get_method_data(thread, profiled_method, false), "sanity"); 1818 uint this_trap_count = 0; 1819 bool maybe_prior_trap = false; 1820 bool maybe_prior_recompile = false; 1821 pdata = query_update_method_data(trap_mdo, trap_bci, reason, true, 1822 #if INCLUDE_JVMCI 1823 nm->is_compiled_by_jvmci() && nm->is_osr_method(), 1824 #endif 1825 nm->method(), 1826 //outputs: 1827 this_trap_count, 1828 maybe_prior_trap, 1829 maybe_prior_recompile); 1830 // Because the interpreter also counts null, div0, range, and class 1831 // checks, these traps from compiled code are double-counted. 1832 // This is harmless; it just means that the PerXTrapLimit values 1833 // are in effect a little smaller than they look. 1834 1835 DeoptReason per_bc_reason = reason_recorded_per_bytecode_if_any(reason); 1836 if (per_bc_reason != Reason_none) { 1837 // Now take action based on the partially known per-BCI history. 1838 if (maybe_prior_trap 1839 && this_trap_count >= (uint)PerBytecodeTrapLimit) { 1840 // If there are too many traps at this BCI, force a recompile. 1841 // This will allow the compiler to see the limit overflow, and 1842 // take corrective action, if possible. The compiler generally 1843 // does not use the exact PerBytecodeTrapLimit value, but instead 1844 // changes its tactics if it sees any traps at all. This provides 1845 // a little hysteresis, delaying a recompile until a trap happens 1846 // several times. 1847 // 1848 // Actually, since there is only one bit of counter per BCI, 1849 // the possible per-BCI counts are {0,1,(per-method count)}. 1850 // This produces accurate results if in fact there is only 1851 // one hot trap site, but begins to get fuzzy if there are 1852 // many sites. For example, if there are ten sites each 1853 // trapping two or more times, they each get the blame for 1854 // all of their traps. 1855 make_not_entrant = true; 1856 } 1857 1858 // Detect repeated recompilation at the same BCI, and enforce a limit. 1859 if (make_not_entrant && maybe_prior_recompile) { 1860 // More than one recompile at this point. 1861 inc_recompile_count = maybe_prior_trap; 1862 } 1863 } else { 1864 // For reasons which are not recorded per-bytecode, we simply 1865 // force recompiles unconditionally. 1866 // (Note that PerMethodRecompilationCutoff is enforced elsewhere.) 1867 make_not_entrant = true; 1868 } 1869 1870 // Go back to the compiler if there are too many traps in this method. 1871 if (this_trap_count >= per_method_trap_limit(reason)) { 1872 // If there are too many traps in this method, force a recompile. 1873 // This will allow the compiler to see the limit overflow, and 1874 // take corrective action, if possible. 1875 // (This condition is an unlikely backstop only, because the 1876 // PerBytecodeTrapLimit is more likely to take effect first, 1877 // if it is applicable.) 1878 make_not_entrant = true; 1879 } 1880 1881 // Here's more hysteresis: If there has been a recompile at 1882 // this trap point already, run the method in the interpreter 1883 // for a while to exercise it more thoroughly. 1884 if (make_not_entrant && maybe_prior_recompile && maybe_prior_trap) { 1885 reprofile = true; 1886 } 1887 } 1888 1889 // Take requested actions on the method: 1890 1891 // Recompile 1892 if (make_not_entrant) { 1893 if (!nm->make_not_entrant()) { 1894 return; // the call did not change nmethod's state 1895 } 1896 1897 if (pdata != NULL) { 1898 // Record the recompilation event, if any. 1899 int tstate0 = pdata->trap_state(); 1900 int tstate1 = trap_state_set_recompiled(tstate0, true); 1901 if (tstate1 != tstate0) 1902 pdata->set_trap_state(tstate1); 1903 } 1904 1905 #if INCLUDE_RTM_OPT 1906 // Restart collecting RTM locking abort statistic if the method 1907 // is recompiled for a reason other than RTM state change. 1908 // Assume that in new recompiled code the statistic could be different, 1909 // for example, due to different inlining. 1910 if ((reason != Reason_rtm_state_change) && (trap_mdo != NULL) && 1911 UseRTMDeopt && (nm->as_nmethod()->rtm_state() != ProfileRTM)) { 1912 trap_mdo->atomic_set_rtm_state(ProfileRTM); 1913 } 1914 #endif 1915 // For code aging we count traps separately here, using make_not_entrant() 1916 // as a guard against simultaneous deopts in multiple threads. 1917 if (reason == Reason_tenured && trap_mdo != NULL) { 1918 trap_mdo->inc_tenure_traps(); 1919 } 1920 } 1921 1922 if (inc_recompile_count) { 1923 trap_mdo->inc_overflow_recompile_count(); 1924 if ((uint)trap_mdo->overflow_recompile_count() > 1925 (uint)PerBytecodeRecompilationCutoff) { 1926 // Give up on the method containing the bad BCI. 1927 if (trap_method() == nm->method()) { 1928 make_not_compilable = true; 1929 } else { 1930 trap_method->set_not_compilable(CompLevel_full_optimization, true, "overflow_recompile_count > PerBytecodeRecompilationCutoff"); 1931 // But give grace to the enclosing nm->method(). 1932 } 1933 } 1934 } 1935 1936 // Reprofile 1937 if (reprofile) { 1938 CompilationPolicy::policy()->reprofile(trap_scope, nm->is_osr_method()); 1939 } 1940 1941 // Give up compiling 1942 if (make_not_compilable && !nm->method()->is_not_compilable(CompLevel_full_optimization)) { 1943 assert(make_not_entrant, "consistent"); 1944 nm->method()->set_not_compilable(CompLevel_full_optimization); 1945 } 1946 1947 } // Free marked resources 1948 1949 } 1950 JRT_END 1951 1952 ProfileData* 1953 Deoptimization::query_update_method_data(MethodData* trap_mdo, 1954 int trap_bci, 1955 Deoptimization::DeoptReason reason, 1956 bool update_total_trap_count, 1957 #if INCLUDE_JVMCI 1958 bool is_osr, 1959 #endif 1960 Method* compiled_method, 1961 //outputs: 1962 uint& ret_this_trap_count, 1963 bool& ret_maybe_prior_trap, 1964 bool& ret_maybe_prior_recompile) { 1965 bool maybe_prior_trap = false; 1966 bool maybe_prior_recompile = false; 1967 uint this_trap_count = 0; 1968 if (update_total_trap_count) { 1969 uint idx = reason; 1970 #if INCLUDE_JVMCI 1971 if (is_osr) { 1972 idx += Reason_LIMIT; 1973 } 1974 #endif 1975 uint prior_trap_count = trap_mdo->trap_count(idx); 1976 this_trap_count = trap_mdo->inc_trap_count(idx); 1977 1978 // If the runtime cannot find a place to store trap history, 1979 // it is estimated based on the general condition of the method. 1980 // If the method has ever been recompiled, or has ever incurred 1981 // a trap with the present reason , then this BCI is assumed 1982 // (pessimistically) to be the culprit. 1983 maybe_prior_trap = (prior_trap_count != 0); 1984 maybe_prior_recompile = (trap_mdo->decompile_count() != 0); 1985 } 1986 ProfileData* pdata = NULL; 1987 1988 1989 // For reasons which are recorded per bytecode, we check per-BCI data. 1990 DeoptReason per_bc_reason = reason_recorded_per_bytecode_if_any(reason); 1991 assert(per_bc_reason != Reason_none || update_total_trap_count, "must be"); 1992 if (per_bc_reason != Reason_none) { 1993 // Find the profile data for this BCI. If there isn't one, 1994 // try to allocate one from the MDO's set of spares. 1995 // This will let us detect a repeated trap at this point. 1996 pdata = trap_mdo->allocate_bci_to_data(trap_bci, reason_is_speculate(reason) ? compiled_method : NULL); 1997 1998 if (pdata != NULL) { 1999 if (reason_is_speculate(reason) && !pdata->is_SpeculativeTrapData()) { 2000 if (LogCompilation && xtty != NULL) { 2001 ttyLocker ttyl; 2002 // no more room for speculative traps in this MDO 2003 xtty->elem("speculative_traps_oom"); 2004 } 2005 } 2006 // Query the trap state of this profile datum. 2007 int tstate0 = pdata->trap_state(); 2008 if (!trap_state_has_reason(tstate0, per_bc_reason)) 2009 maybe_prior_trap = false; 2010 if (!trap_state_is_recompiled(tstate0)) 2011 maybe_prior_recompile = false; 2012 2013 // Update the trap state of this profile datum. 2014 int tstate1 = tstate0; 2015 // Record the reason. 2016 tstate1 = trap_state_add_reason(tstate1, per_bc_reason); 2017 // Store the updated state on the MDO, for next time. 2018 if (tstate1 != tstate0) 2019 pdata->set_trap_state(tstate1); 2020 } else { 2021 if (LogCompilation && xtty != NULL) { 2022 ttyLocker ttyl; 2023 // Missing MDP? Leave a small complaint in the log. 2024 xtty->elem("missing_mdp bci='%d'", trap_bci); 2025 } 2026 } 2027 } 2028 2029 // Return results: 2030 ret_this_trap_count = this_trap_count; 2031 ret_maybe_prior_trap = maybe_prior_trap; 2032 ret_maybe_prior_recompile = maybe_prior_recompile; 2033 return pdata; 2034 } 2035 2036 void 2037 Deoptimization::update_method_data_from_interpreter(MethodData* trap_mdo, int trap_bci, int reason) { 2038 ResourceMark rm; 2039 // Ignored outputs: 2040 uint ignore_this_trap_count; 2041 bool ignore_maybe_prior_trap; 2042 bool ignore_maybe_prior_recompile; 2043 assert(!reason_is_speculate(reason), "reason speculate only used by compiler"); 2044 // JVMCI uses the total counts to determine if deoptimizations are happening too frequently -> do not adjust total counts 2045 bool update_total_counts = JVMCI_ONLY(false) NOT_JVMCI(true); 2046 query_update_method_data(trap_mdo, trap_bci, 2047 (DeoptReason)reason, 2048 update_total_counts, 2049 #if INCLUDE_JVMCI 2050 false, 2051 #endif 2052 NULL, 2053 ignore_this_trap_count, 2054 ignore_maybe_prior_trap, 2055 ignore_maybe_prior_recompile); 2056 } 2057 2058 Deoptimization::UnrollBlock* Deoptimization::uncommon_trap(JavaThread* thread, jint trap_request, jint exec_mode) { 2059 if (TraceDeoptimization) { 2060 tty->print("Uncommon trap "); 2061 } 2062 // Still in Java no safepoints 2063 { 2064 // This enters VM and may safepoint 2065 uncommon_trap_inner(thread, trap_request); 2066 } 2067 return fetch_unroll_info_helper(thread, exec_mode); 2068 } 2069 2070 // Local derived constants. 2071 // Further breakdown of DataLayout::trap_state, as promised by DataLayout. 2072 const int DS_REASON_MASK = DataLayout::trap_mask >> 1; 2073 const int DS_RECOMPILE_BIT = DataLayout::trap_mask - DS_REASON_MASK; 2074 2075 //---------------------------trap_state_reason--------------------------------- 2076 Deoptimization::DeoptReason 2077 Deoptimization::trap_state_reason(int trap_state) { 2078 // This assert provides the link between the width of DataLayout::trap_bits 2079 // and the encoding of "recorded" reasons. It ensures there are enough 2080 // bits to store all needed reasons in the per-BCI MDO profile. 2081 assert(DS_REASON_MASK >= Reason_RECORDED_LIMIT, "enough bits"); 2082 int recompile_bit = (trap_state & DS_RECOMPILE_BIT); 2083 trap_state -= recompile_bit; 2084 if (trap_state == DS_REASON_MASK) { 2085 return Reason_many; 2086 } else { 2087 assert((int)Reason_none == 0, "state=0 => Reason_none"); 2088 return (DeoptReason)trap_state; 2089 } 2090 } 2091 //-------------------------trap_state_has_reason------------------------------- 2092 int Deoptimization::trap_state_has_reason(int trap_state, int reason) { 2093 assert(reason_is_recorded_per_bytecode((DeoptReason)reason), "valid reason"); 2094 assert(DS_REASON_MASK >= Reason_RECORDED_LIMIT, "enough bits"); 2095 int recompile_bit = (trap_state & DS_RECOMPILE_BIT); 2096 trap_state -= recompile_bit; 2097 if (trap_state == DS_REASON_MASK) { 2098 return -1; // true, unspecifically (bottom of state lattice) 2099 } else if (trap_state == reason) { 2100 return 1; // true, definitely 2101 } else if (trap_state == 0) { 2102 return 0; // false, definitely (top of state lattice) 2103 } else { 2104 return 0; // false, definitely 2105 } 2106 } 2107 //-------------------------trap_state_add_reason------------------------------- 2108 int Deoptimization::trap_state_add_reason(int trap_state, int reason) { 2109 assert(reason_is_recorded_per_bytecode((DeoptReason)reason) || reason == Reason_many, "valid reason"); 2110 int recompile_bit = (trap_state & DS_RECOMPILE_BIT); 2111 trap_state -= recompile_bit; 2112 if (trap_state == DS_REASON_MASK) { 2113 return trap_state + recompile_bit; // already at state lattice bottom 2114 } else if (trap_state == reason) { 2115 return trap_state + recompile_bit; // the condition is already true 2116 } else if (trap_state == 0) { 2117 return reason + recompile_bit; // no condition has yet been true 2118 } else { 2119 return DS_REASON_MASK + recompile_bit; // fall to state lattice bottom 2120 } 2121 } 2122 //-----------------------trap_state_is_recompiled------------------------------ 2123 bool Deoptimization::trap_state_is_recompiled(int trap_state) { 2124 return (trap_state & DS_RECOMPILE_BIT) != 0; 2125 } 2126 //-----------------------trap_state_set_recompiled----------------------------- 2127 int Deoptimization::trap_state_set_recompiled(int trap_state, bool z) { 2128 if (z) return trap_state | DS_RECOMPILE_BIT; 2129 else return trap_state & ~DS_RECOMPILE_BIT; 2130 } 2131 //---------------------------format_trap_state--------------------------------- 2132 // This is used for debugging and diagnostics, including LogFile output. 2133 const char* Deoptimization::format_trap_state(char* buf, size_t buflen, 2134 int trap_state) { 2135 assert(buflen > 0, "sanity"); 2136 DeoptReason reason = trap_state_reason(trap_state); 2137 bool recomp_flag = trap_state_is_recompiled(trap_state); 2138 // Re-encode the state from its decoded components. 2139 int decoded_state = 0; 2140 if (reason_is_recorded_per_bytecode(reason) || reason == Reason_many) 2141 decoded_state = trap_state_add_reason(decoded_state, reason); 2142 if (recomp_flag) 2143 decoded_state = trap_state_set_recompiled(decoded_state, recomp_flag); 2144 // If the state re-encodes properly, format it symbolically. 2145 // Because this routine is used for debugging and diagnostics, 2146 // be robust even if the state is a strange value. 2147 size_t len; 2148 if (decoded_state != trap_state) { 2149 // Random buggy state that doesn't decode?? 2150 len = jio_snprintf(buf, buflen, "#%d", trap_state); 2151 } else { 2152 len = jio_snprintf(buf, buflen, "%s%s", 2153 trap_reason_name(reason), 2154 recomp_flag ? " recompiled" : ""); 2155 } 2156 return buf; 2157 } 2158 2159 2160 //--------------------------------statics-------------------------------------- 2161 const char* Deoptimization::_trap_reason_name[] = { 2162 // Note: Keep this in sync. with enum DeoptReason. 2163 "none", 2164 "null_check", 2165 "null_assert" JVMCI_ONLY("_or_unreached0"), 2166 "range_check", 2167 "class_check", 2168 "array_check", 2169 "intrinsic" JVMCI_ONLY("_or_type_checked_inlining"), 2170 "bimorphic" JVMCI_ONLY("_or_optimized_type_check"), 2171 "unloaded", 2172 "uninitialized", 2173 "unreached", 2174 "unhandled", 2175 "constraint", 2176 "div0_check", 2177 "age", 2178 "predicate", 2179 "loop_limit_check", 2180 "speculate_class_check", 2181 "speculate_null_check", 2182 "speculate_null_assert", 2183 "rtm_state_change", 2184 "unstable_if", 2185 "unstable_fused_if", 2186 #if INCLUDE_JVMCI 2187 "aliasing", 2188 "transfer_to_interpreter", 2189 "not_compiled_exception_handler", 2190 "unresolved", 2191 "jsr_mismatch", 2192 #endif 2193 "tenured" 2194 }; 2195 const char* Deoptimization::_trap_action_name[] = { 2196 // Note: Keep this in sync. with enum DeoptAction. 2197 "none", 2198 "maybe_recompile", 2199 "reinterpret", 2200 "make_not_entrant", 2201 "make_not_compilable" 2202 }; 2203 2204 const char* Deoptimization::trap_reason_name(int reason) { 2205 // Check that every reason has a name 2206 STATIC_ASSERT(sizeof(_trap_reason_name)/sizeof(const char*) == Reason_LIMIT); 2207 2208 if (reason == Reason_many) return "many"; 2209 if ((uint)reason < Reason_LIMIT) 2210 return _trap_reason_name[reason]; 2211 static char buf[20]; 2212 sprintf(buf, "reason%d", reason); 2213 return buf; 2214 } 2215 const char* Deoptimization::trap_action_name(int action) { 2216 // Check that every action has a name 2217 STATIC_ASSERT(sizeof(_trap_action_name)/sizeof(const char*) == Action_LIMIT); 2218 2219 if ((uint)action < Action_LIMIT) 2220 return _trap_action_name[action]; 2221 static char buf[20]; 2222 sprintf(buf, "action%d", action); 2223 return buf; 2224 } 2225 2226 // This is used for debugging and diagnostics, including LogFile output. 2227 const char* Deoptimization::format_trap_request(char* buf, size_t buflen, 2228 int trap_request) { 2229 jint unloaded_class_index = trap_request_index(trap_request); 2230 const char* reason = trap_reason_name(trap_request_reason(trap_request)); 2231 const char* action = trap_action_name(trap_request_action(trap_request)); 2232 #if INCLUDE_JVMCI 2233 int debug_id = trap_request_debug_id(trap_request); 2234 #endif 2235 size_t len; 2236 if (unloaded_class_index < 0) { 2237 len = jio_snprintf(buf, buflen, "reason='%s' action='%s'" JVMCI_ONLY(" debug_id='%d'"), 2238 reason, action 2239 #if INCLUDE_JVMCI 2240 ,debug_id 2241 #endif 2242 ); 2243 } else { 2244 len = jio_snprintf(buf, buflen, "reason='%s' action='%s' index='%d'" JVMCI_ONLY(" debug_id='%d'"), 2245 reason, action, unloaded_class_index 2246 #if INCLUDE_JVMCI 2247 ,debug_id 2248 #endif 2249 ); 2250 } 2251 return buf; 2252 } 2253 2254 juint Deoptimization::_deoptimization_hist 2255 [Deoptimization::Reason_LIMIT] 2256 [1 + Deoptimization::Action_LIMIT] 2257 [Deoptimization::BC_CASE_LIMIT] 2258 = {0}; 2259 2260 enum { 2261 LSB_BITS = 8, 2262 LSB_MASK = right_n_bits(LSB_BITS) 2263 }; 2264 2265 void Deoptimization::gather_statistics(DeoptReason reason, DeoptAction action, 2266 Bytecodes::Code bc) { 2267 assert(reason >= 0 && reason < Reason_LIMIT, "oob"); 2268 assert(action >= 0 && action < Action_LIMIT, "oob"); 2269 _deoptimization_hist[Reason_none][0][0] += 1; // total 2270 _deoptimization_hist[reason][0][0] += 1; // per-reason total 2271 juint* cases = _deoptimization_hist[reason][1+action]; 2272 juint* bc_counter_addr = NULL; 2273 juint bc_counter = 0; 2274 // Look for an unused counter, or an exact match to this BC. 2275 if (bc != Bytecodes::_illegal) { 2276 for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) { 2277 juint* counter_addr = &cases[bc_case]; 2278 juint counter = *counter_addr; 2279 if ((counter == 0 && bc_counter_addr == NULL) 2280 || (Bytecodes::Code)(counter & LSB_MASK) == bc) { 2281 // this counter is either free or is already devoted to this BC 2282 bc_counter_addr = counter_addr; 2283 bc_counter = counter | bc; 2284 } 2285 } 2286 } 2287 if (bc_counter_addr == NULL) { 2288 // Overflow, or no given bytecode. 2289 bc_counter_addr = &cases[BC_CASE_LIMIT-1]; 2290 bc_counter = (*bc_counter_addr & ~LSB_MASK); // clear LSB 2291 } 2292 *bc_counter_addr = bc_counter + (1 << LSB_BITS); 2293 } 2294 2295 jint Deoptimization::total_deoptimization_count() { 2296 return _deoptimization_hist[Reason_none][0][0]; 2297 } 2298 2299 jint Deoptimization::deoptimization_count(DeoptReason reason) { 2300 assert(reason >= 0 && reason < Reason_LIMIT, "oob"); 2301 return _deoptimization_hist[reason][0][0]; 2302 } 2303 2304 void Deoptimization::print_statistics() { 2305 juint total = total_deoptimization_count(); 2306 juint account = total; 2307 if (total != 0) { 2308 ttyLocker ttyl; 2309 if (xtty != NULL) xtty->head("statistics type='deoptimization'"); 2310 tty->print_cr("Deoptimization traps recorded:"); 2311 #define PRINT_STAT_LINE(name, r) \ 2312 tty->print_cr(" %4d (%4.1f%%) %s", (int)(r), ((r) * 100.0) / total, name); 2313 PRINT_STAT_LINE("total", total); 2314 // For each non-zero entry in the histogram, print the reason, 2315 // the action, and (if specifically known) the type of bytecode. 2316 for (int reason = 0; reason < Reason_LIMIT; reason++) { 2317 for (int action = 0; action < Action_LIMIT; action++) { 2318 juint* cases = _deoptimization_hist[reason][1+action]; 2319 for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) { 2320 juint counter = cases[bc_case]; 2321 if (counter != 0) { 2322 char name[1*K]; 2323 Bytecodes::Code bc = (Bytecodes::Code)(counter & LSB_MASK); 2324 if (bc_case == BC_CASE_LIMIT && (int)bc == 0) 2325 bc = Bytecodes::_illegal; 2326 sprintf(name, "%s/%s/%s", 2327 trap_reason_name(reason), 2328 trap_action_name(action), 2329 Bytecodes::is_defined(bc)? Bytecodes::name(bc): "other"); 2330 juint r = counter >> LSB_BITS; 2331 tty->print_cr(" %40s: " UINT32_FORMAT " (%.1f%%)", name, r, (r * 100.0) / total); 2332 account -= r; 2333 } 2334 } 2335 } 2336 } 2337 if (account != 0) { 2338 PRINT_STAT_LINE("unaccounted", account); 2339 } 2340 #undef PRINT_STAT_LINE 2341 if (xtty != NULL) xtty->tail("statistics"); 2342 } 2343 } 2344 #else // COMPILER2_OR_JVMCI 2345 2346 2347 // Stubs for C1 only system. 2348 bool Deoptimization::trap_state_is_recompiled(int trap_state) { 2349 return false; 2350 } 2351 2352 const char* Deoptimization::trap_reason_name(int reason) { 2353 return "unknown"; 2354 } 2355 2356 void Deoptimization::print_statistics() { 2357 // no output 2358 } 2359 2360 void 2361 Deoptimization::update_method_data_from_interpreter(MethodData* trap_mdo, int trap_bci, int reason) { 2362 // no udpate 2363 } 2364 2365 int Deoptimization::trap_state_has_reason(int trap_state, int reason) { 2366 return 0; 2367 } 2368 2369 void Deoptimization::gather_statistics(DeoptReason reason, DeoptAction action, 2370 Bytecodes::Code bc) { 2371 // no update 2372 } 2373 2374 const char* Deoptimization::format_trap_state(char* buf, size_t buflen, 2375 int trap_state) { 2376 jio_snprintf(buf, buflen, "#%d", trap_state); 2377 return buf; 2378 } 2379 2380 #endif // COMPILER2_OR_JVMCI