1 /* 2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "jvm.h" 27 #include "classfile/systemDictionary.hpp" 28 #include "code/codeCache.hpp" 29 #include "code/debugInfoRec.hpp" 30 #include "code/nmethod.hpp" 31 #include "code/pcDesc.hpp" 32 #include "code/scopeDesc.hpp" 33 #include "interpreter/bytecode.hpp" 34 #include "interpreter/interpreter.hpp" 35 #include "interpreter/oopMapCache.hpp" 36 #include "memory/allocation.inline.hpp" 37 #include "memory/oopFactory.hpp" 38 #include "memory/resourceArea.hpp" 39 #include "oops/method.hpp" 40 #include "oops/objArrayOop.inline.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "oops/fieldStreams.hpp" 43 #include "oops/verifyOopClosure.hpp" 44 #include "prims/jvmtiThreadState.hpp" 45 #include "runtime/biasedLocking.hpp" 46 #include "runtime/compilationPolicy.hpp" 47 #include "runtime/deoptimization.hpp" 48 #include "runtime/interfaceSupport.hpp" 49 #include "runtime/sharedRuntime.hpp" 50 #include "runtime/signature.hpp" 51 #include "runtime/stubRoutines.hpp" 52 #include "runtime/thread.hpp" 53 #include "runtime/threadSMR.hpp" 54 #include "runtime/vframe.hpp" 55 #include "runtime/vframeArray.hpp" 56 #include "runtime/vframe_hp.hpp" 57 #include "utilities/events.hpp" 58 #include "utilities/xmlstream.hpp" 59 60 #if INCLUDE_JVMCI 61 #include "jvmci/jvmciRuntime.hpp" 62 #include "jvmci/jvmciJavaClasses.hpp" 63 #endif 64 65 66 bool DeoptimizationMarker::_is_active = false; 67 68 Deoptimization::UnrollBlock::UnrollBlock(int size_of_deoptimized_frame, 69 int caller_adjustment, 70 int caller_actual_parameters, 71 int number_of_frames, 72 intptr_t* frame_sizes, 73 address* frame_pcs, 74 BasicType return_type, 75 int exec_mode) { 76 _size_of_deoptimized_frame = size_of_deoptimized_frame; 77 _caller_adjustment = caller_adjustment; 78 _caller_actual_parameters = caller_actual_parameters; 79 _number_of_frames = number_of_frames; 80 _frame_sizes = frame_sizes; 81 _frame_pcs = frame_pcs; 82 _register_block = NEW_C_HEAP_ARRAY(intptr_t, RegisterMap::reg_count * 2, mtCompiler); 83 _return_type = return_type; 84 _initial_info = 0; 85 // PD (x86 only) 86 _counter_temp = 0; 87 _unpack_kind = exec_mode; 88 _sender_sp_temp = 0; 89 90 _total_frame_sizes = size_of_frames(); 91 assert(exec_mode >= 0 && exec_mode < Unpack_LIMIT, "Unexpected exec_mode"); 92 } 93 94 95 Deoptimization::UnrollBlock::~UnrollBlock() { 96 FREE_C_HEAP_ARRAY(intptr_t, _frame_sizes); 97 FREE_C_HEAP_ARRAY(intptr_t, _frame_pcs); 98 FREE_C_HEAP_ARRAY(intptr_t, _register_block); 99 } 100 101 102 intptr_t* Deoptimization::UnrollBlock::value_addr_at(int register_number) const { 103 assert(register_number < RegisterMap::reg_count, "checking register number"); 104 return &_register_block[register_number * 2]; 105 } 106 107 108 109 int Deoptimization::UnrollBlock::size_of_frames() const { 110 // Acount first for the adjustment of the initial frame 111 int result = _caller_adjustment; 112 for (int index = 0; index < number_of_frames(); index++) { 113 result += frame_sizes()[index]; 114 } 115 return result; 116 } 117 118 119 void Deoptimization::UnrollBlock::print() { 120 ttyLocker ttyl; 121 tty->print_cr("UnrollBlock"); 122 tty->print_cr(" size_of_deoptimized_frame = %d", _size_of_deoptimized_frame); 123 tty->print( " frame_sizes: "); 124 for (int index = 0; index < number_of_frames(); index++) { 125 tty->print(INTX_FORMAT " ", frame_sizes()[index]); 126 } 127 tty->cr(); 128 } 129 130 131 // In order to make fetch_unroll_info work properly with escape 132 // analysis, The method was changed from JRT_LEAF to JRT_BLOCK_ENTRY and 133 // ResetNoHandleMark and HandleMark were removed from it. The actual reallocation 134 // of previously eliminated objects occurs in realloc_objects, which is 135 // called from the method fetch_unroll_info_helper below. 136 JRT_BLOCK_ENTRY(Deoptimization::UnrollBlock*, Deoptimization::fetch_unroll_info(JavaThread* thread, int exec_mode)) 137 // It is actually ok to allocate handles in a leaf method. It causes no safepoints, 138 // but makes the entry a little slower. There is however a little dance we have to 139 // do in debug mode to get around the NoHandleMark code in the JRT_LEAF macro 140 141 // fetch_unroll_info() is called at the beginning of the deoptimization 142 // handler. Note this fact before we start generating temporary frames 143 // that can confuse an asynchronous stack walker. This counter is 144 // decremented at the end of unpack_frames(). 145 if (TraceDeoptimization) { 146 tty->print_cr("Deoptimizing thread " INTPTR_FORMAT, p2i(thread)); 147 } 148 thread->inc_in_deopt_handler(); 149 150 return fetch_unroll_info_helper(thread, exec_mode); 151 JRT_END 152 153 154 // This is factored, since it is both called from a JRT_LEAF (deoptimization) and a JRT_ENTRY (uncommon_trap) 155 Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread* thread, int exec_mode) { 156 157 // Note: there is a safepoint safety issue here. No matter whether we enter 158 // via vanilla deopt or uncommon trap we MUST NOT stop at a safepoint once 159 // the vframeArray is created. 160 // 161 162 // Allocate our special deoptimization ResourceMark 163 DeoptResourceMark* dmark = new DeoptResourceMark(thread); 164 assert(thread->deopt_mark() == NULL, "Pending deopt!"); 165 thread->set_deopt_mark(dmark); 166 167 frame stub_frame = thread->last_frame(); // Makes stack walkable as side effect 168 RegisterMap map(thread, true); 169 RegisterMap dummy_map(thread, false); 170 // Now get the deoptee with a valid map 171 frame deoptee = stub_frame.sender(&map); 172 // Set the deoptee nmethod 173 assert(thread->deopt_compiled_method() == NULL, "Pending deopt!"); 174 CompiledMethod* cm = deoptee.cb()->as_compiled_method_or_null(); 175 thread->set_deopt_compiled_method(cm); 176 177 if (VerifyStack) { 178 thread->validate_frame_layout(); 179 } 180 181 // Create a growable array of VFrames where each VFrame represents an inlined 182 // Java frame. This storage is allocated with the usual system arena. 183 assert(deoptee.is_compiled_frame(), "Wrong frame type"); 184 GrowableArray<compiledVFrame*>* chunk = new GrowableArray<compiledVFrame*>(10); 185 vframe* vf = vframe::new_vframe(&deoptee, &map, thread); 186 while (!vf->is_top()) { 187 assert(vf->is_compiled_frame(), "Wrong frame type"); 188 chunk->push(compiledVFrame::cast(vf)); 189 vf = vf->sender(); 190 } 191 assert(vf->is_compiled_frame(), "Wrong frame type"); 192 chunk->push(compiledVFrame::cast(vf)); 193 194 bool realloc_failures = false; 195 196 #if COMPILER2_OR_JVMCI 197 // Reallocate the non-escaping objects and restore their fields. Then 198 // relock objects if synchronization on them was eliminated. 199 #ifndef INCLUDE_JVMCI 200 if (DoEscapeAnalysis || EliminateNestedLocks) { 201 if (EliminateAllocations) { 202 #endif // INCLUDE_JVMCI 203 assert (chunk->at(0)->scope() != NULL,"expect only compiled java frames"); 204 GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects(); 205 206 // The flag return_oop() indicates call sites which return oop 207 // in compiled code. Such sites include java method calls, 208 // runtime calls (for example, used to allocate new objects/arrays 209 // on slow code path) and any other calls generated in compiled code. 210 // It is not guaranteed that we can get such information here only 211 // by analyzing bytecode in deoptimized frames. This is why this flag 212 // is set during method compilation (see Compile::Process_OopMap_Node()). 213 // If the previous frame was popped or if we are dispatching an exception, 214 // we don't have an oop result. 215 bool save_oop_result = chunk->at(0)->scope()->return_oop() && !thread->popframe_forcing_deopt_reexecution() && (exec_mode == Unpack_deopt); 216 Handle return_value; 217 if (save_oop_result) { 218 // Reallocation may trigger GC. If deoptimization happened on return from 219 // call which returns oop we need to save it since it is not in oopmap. 220 oop result = deoptee.saved_oop_result(&map); 221 assert(oopDesc::is_oop_or_null(result), "must be oop"); 222 return_value = Handle(thread, result); 223 assert(Universe::heap()->is_in_or_null(result), "must be heap pointer"); 224 if (TraceDeoptimization) { 225 ttyLocker ttyl; 226 tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, p2i(result), p2i(thread)); 227 } 228 } 229 if (objects != NULL) { 230 JRT_BLOCK 231 realloc_failures = realloc_objects(thread, &deoptee, objects, THREAD); 232 JRT_END 233 bool skip_internal = (cm != NULL) && !cm->is_compiled_by_jvmci(); 234 reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal); 235 #ifndef PRODUCT 236 if (TraceDeoptimization) { 237 ttyLocker ttyl; 238 tty->print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, p2i(thread)); 239 print_objects(objects, realloc_failures); 240 } 241 #endif 242 } 243 if (save_oop_result) { 244 // Restore result. 245 deoptee.set_saved_oop_result(&map, return_value()); 246 } 247 #ifndef INCLUDE_JVMCI 248 } 249 if (EliminateLocks) { 250 #endif // INCLUDE_JVMCI 251 #ifndef PRODUCT 252 bool first = true; 253 #endif 254 for (int i = 0; i < chunk->length(); i++) { 255 compiledVFrame* cvf = chunk->at(i); 256 assert (cvf->scope() != NULL,"expect only compiled java frames"); 257 GrowableArray<MonitorInfo*>* monitors = cvf->monitors(); 258 if (monitors->is_nonempty()) { 259 relock_objects(monitors, thread, realloc_failures); 260 #ifndef PRODUCT 261 if (PrintDeoptimizationDetails) { 262 ttyLocker ttyl; 263 for (int j = 0; j < monitors->length(); j++) { 264 MonitorInfo* mi = monitors->at(j); 265 if (mi->eliminated()) { 266 if (first) { 267 first = false; 268 tty->print_cr("RELOCK OBJECTS in thread " INTPTR_FORMAT, p2i(thread)); 269 } 270 if (mi->owner_is_scalar_replaced()) { 271 Klass* k = java_lang_Class::as_Klass(mi->owner_klass()); 272 tty->print_cr(" failed reallocation for klass %s", k->external_name()); 273 } else { 274 tty->print_cr(" object <" INTPTR_FORMAT "> locked", p2i(mi->owner())); 275 } 276 } 277 } 278 } 279 #endif // !PRODUCT 280 } 281 } 282 #ifndef INCLUDE_JVMCI 283 } 284 } 285 #endif // INCLUDE_JVMCI 286 #endif // COMPILER2_OR_JVMCI 287 288 ScopeDesc* trap_scope = chunk->at(0)->scope(); 289 Handle exceptionObject; 290 if (trap_scope->rethrow_exception()) { 291 if (PrintDeoptimizationDetails) { 292 tty->print_cr("Exception to be rethrown in the interpreter for method %s::%s at bci %d", trap_scope->method()->method_holder()->name()->as_C_string(), trap_scope->method()->name()->as_C_string(), trap_scope->bci()); 293 } 294 GrowableArray<ScopeValue*>* expressions = trap_scope->expressions(); 295 guarantee(expressions != NULL && expressions->length() > 0, "must have exception to throw"); 296 ScopeValue* topOfStack = expressions->top(); 297 exceptionObject = StackValue::create_stack_value(&deoptee, &map, topOfStack)->get_obj(); 298 guarantee(exceptionObject() != NULL, "exception oop can not be null"); 299 } 300 301 // Ensure that no safepoint is taken after pointers have been stored 302 // in fields of rematerialized objects. If a safepoint occurs from here on 303 // out the java state residing in the vframeArray will be missed. 304 NoSafepointVerifier no_safepoint; 305 306 vframeArray* array = create_vframeArray(thread, deoptee, &map, chunk, realloc_failures); 307 #if COMPILER2_OR_JVMCI 308 if (realloc_failures) { 309 pop_frames_failed_reallocs(thread, array); 310 } 311 #endif 312 313 assert(thread->vframe_array_head() == NULL, "Pending deopt!"); 314 thread->set_vframe_array_head(array); 315 316 // Now that the vframeArray has been created if we have any deferred local writes 317 // added by jvmti then we can free up that structure as the data is now in the 318 // vframeArray 319 320 if (thread->deferred_locals() != NULL) { 321 GrowableArray<jvmtiDeferredLocalVariableSet*>* list = thread->deferred_locals(); 322 int i = 0; 323 do { 324 // Because of inlining we could have multiple vframes for a single frame 325 // and several of the vframes could have deferred writes. Find them all. 326 if (list->at(i)->id() == array->original().id()) { 327 jvmtiDeferredLocalVariableSet* dlv = list->at(i); 328 list->remove_at(i); 329 // individual jvmtiDeferredLocalVariableSet are CHeapObj's 330 delete dlv; 331 } else { 332 i++; 333 } 334 } while ( i < list->length() ); 335 if (list->length() == 0) { 336 thread->set_deferred_locals(NULL); 337 // free the list and elements back to C heap. 338 delete list; 339 } 340 341 } 342 343 // Compute the caller frame based on the sender sp of stub_frame and stored frame sizes info. 344 CodeBlob* cb = stub_frame.cb(); 345 // Verify we have the right vframeArray 346 assert(cb->frame_size() >= 0, "Unexpected frame size"); 347 intptr_t* unpack_sp = stub_frame.sp() + cb->frame_size(); 348 349 // If the deopt call site is a MethodHandle invoke call site we have 350 // to adjust the unpack_sp. 351 nmethod* deoptee_nm = deoptee.cb()->as_nmethod_or_null(); 352 if (deoptee_nm != NULL && deoptee_nm->is_method_handle_return(deoptee.pc())) 353 unpack_sp = deoptee.unextended_sp(); 354 355 #ifdef ASSERT 356 assert(cb->is_deoptimization_stub() || 357 cb->is_uncommon_trap_stub() || 358 strcmp("Stub<DeoptimizationStub.deoptimizationHandler>", cb->name()) == 0 || 359 strcmp("Stub<UncommonTrapStub.uncommonTrapHandler>", cb->name()) == 0, 360 "unexpected code blob: %s", cb->name()); 361 #endif 362 363 // This is a guarantee instead of an assert because if vframe doesn't match 364 // we will unpack the wrong deoptimized frame and wind up in strange places 365 // where it will be very difficult to figure out what went wrong. Better 366 // to die an early death here than some very obscure death later when the 367 // trail is cold. 368 // Note: on ia64 this guarantee can be fooled by frames with no memory stack 369 // in that it will fail to detect a problem when there is one. This needs 370 // more work in tiger timeframe. 371 guarantee(array->unextended_sp() == unpack_sp, "vframe_array_head must contain the vframeArray to unpack"); 372 373 int number_of_frames = array->frames(); 374 375 // Compute the vframes' sizes. Note that frame_sizes[] entries are ordered from outermost to innermost 376 // virtual activation, which is the reverse of the elements in the vframes array. 377 intptr_t* frame_sizes = NEW_C_HEAP_ARRAY(intptr_t, number_of_frames, mtCompiler); 378 // +1 because we always have an interpreter return address for the final slot. 379 address* frame_pcs = NEW_C_HEAP_ARRAY(address, number_of_frames + 1, mtCompiler); 380 int popframe_extra_args = 0; 381 // Create an interpreter return address for the stub to use as its return 382 // address so the skeletal frames are perfectly walkable 383 frame_pcs[number_of_frames] = Interpreter::deopt_entry(vtos, 0); 384 385 // PopFrame requires that the preserved incoming arguments from the recently-popped topmost 386 // activation be put back on the expression stack of the caller for reexecution 387 if (JvmtiExport::can_pop_frame() && thread->popframe_forcing_deopt_reexecution()) { 388 popframe_extra_args = in_words(thread->popframe_preserved_args_size_in_words()); 389 } 390 391 // Find the current pc for sender of the deoptee. Since the sender may have been deoptimized 392 // itself since the deoptee vframeArray was created we must get a fresh value of the pc rather 393 // than simply use array->sender.pc(). This requires us to walk the current set of frames 394 // 395 frame deopt_sender = stub_frame.sender(&dummy_map); // First is the deoptee frame 396 deopt_sender = deopt_sender.sender(&dummy_map); // Now deoptee caller 397 398 // It's possible that the number of parameters at the call site is 399 // different than number of arguments in the callee when method 400 // handles are used. If the caller is interpreted get the real 401 // value so that the proper amount of space can be added to it's 402 // frame. 403 bool caller_was_method_handle = false; 404 if (deopt_sender.is_interpreted_frame()) { 405 methodHandle method = deopt_sender.interpreter_frame_method(); 406 Bytecode_invoke cur = Bytecode_invoke_check(method, deopt_sender.interpreter_frame_bci()); 407 if (cur.is_invokedynamic() || cur.is_invokehandle()) { 408 // Method handle invokes may involve fairly arbitrary chains of 409 // calls so it's impossible to know how much actual space the 410 // caller has for locals. 411 caller_was_method_handle = true; 412 } 413 } 414 415 // 416 // frame_sizes/frame_pcs[0] oldest frame (int or c2i) 417 // frame_sizes/frame_pcs[1] next oldest frame (int) 418 // frame_sizes/frame_pcs[n] youngest frame (int) 419 // 420 // Now a pc in frame_pcs is actually the return address to the frame's caller (a frame 421 // owns the space for the return address to it's caller). Confusing ain't it. 422 // 423 // The vframe array can address vframes with indices running from 424 // 0.._frames-1. Index 0 is the youngest frame and _frame - 1 is the oldest (root) frame. 425 // When we create the skeletal frames we need the oldest frame to be in the zero slot 426 // in the frame_sizes/frame_pcs so the assembly code can do a trivial walk. 427 // so things look a little strange in this loop. 428 // 429 int callee_parameters = 0; 430 int callee_locals = 0; 431 for (int index = 0; index < array->frames(); index++ ) { 432 // frame[number_of_frames - 1 ] = on_stack_size(youngest) 433 // frame[number_of_frames - 2 ] = on_stack_size(sender(youngest)) 434 // frame[number_of_frames - 3 ] = on_stack_size(sender(sender(youngest))) 435 frame_sizes[number_of_frames - 1 - index] = BytesPerWord * array->element(index)->on_stack_size(callee_parameters, 436 callee_locals, 437 index == 0, 438 popframe_extra_args); 439 // This pc doesn't have to be perfect just good enough to identify the frame 440 // as interpreted so the skeleton frame will be walkable 441 // The correct pc will be set when the skeleton frame is completely filled out 442 // The final pc we store in the loop is wrong and will be overwritten below 443 frame_pcs[number_of_frames - 1 - index ] = Interpreter::deopt_entry(vtos, 0) - frame::pc_return_offset; 444 445 callee_parameters = array->element(index)->method()->size_of_parameters(); 446 callee_locals = array->element(index)->method()->max_locals(); 447 popframe_extra_args = 0; 448 } 449 450 // Compute whether the root vframe returns a float or double value. 451 BasicType return_type; 452 { 453 methodHandle method(thread, array->element(0)->method()); 454 Bytecode_invoke invoke = Bytecode_invoke_check(method, array->element(0)->bci()); 455 return_type = invoke.is_valid() ? invoke.result_type() : T_ILLEGAL; 456 } 457 458 // Compute information for handling adapters and adjusting the frame size of the caller. 459 int caller_adjustment = 0; 460 461 // Compute the amount the oldest interpreter frame will have to adjust 462 // its caller's stack by. If the caller is a compiled frame then 463 // we pretend that the callee has no parameters so that the 464 // extension counts for the full amount of locals and not just 465 // locals-parms. This is because without a c2i adapter the parm 466 // area as created by the compiled frame will not be usable by 467 // the interpreter. (Depending on the calling convention there 468 // may not even be enough space). 469 470 // QQQ I'd rather see this pushed down into last_frame_adjust 471 // and have it take the sender (aka caller). 472 473 if (deopt_sender.is_compiled_frame() || caller_was_method_handle) { 474 caller_adjustment = last_frame_adjust(0, callee_locals); 475 } else if (callee_locals > callee_parameters) { 476 // The caller frame may need extending to accommodate 477 // non-parameter locals of the first unpacked interpreted frame. 478 // Compute that adjustment. 479 caller_adjustment = last_frame_adjust(callee_parameters, callee_locals); 480 } 481 482 // If the sender is deoptimized the we must retrieve the address of the handler 483 // since the frame will "magically" show the original pc before the deopt 484 // and we'd undo the deopt. 485 486 frame_pcs[0] = deopt_sender.raw_pc(); 487 488 assert(CodeCache::find_blob_unsafe(frame_pcs[0]) != NULL, "bad pc"); 489 490 #ifdef INCLUDE_JVMCI 491 if (exceptionObject() != NULL) { 492 thread->set_exception_oop(exceptionObject()); 493 exec_mode = Unpack_exception; 494 } 495 #endif 496 497 if (thread->frames_to_pop_failed_realloc() > 0 && exec_mode != Unpack_uncommon_trap) { 498 assert(thread->has_pending_exception(), "should have thrown OOME"); 499 thread->set_exception_oop(thread->pending_exception()); 500 thread->clear_pending_exception(); 501 exec_mode = Unpack_exception; 502 } 503 504 #if INCLUDE_JVMCI 505 if (thread->frames_to_pop_failed_realloc() > 0) { 506 thread->set_pending_monitorenter(false); 507 } 508 #endif 509 510 UnrollBlock* info = new UnrollBlock(array->frame_size() * BytesPerWord, 511 caller_adjustment * BytesPerWord, 512 caller_was_method_handle ? 0 : callee_parameters, 513 number_of_frames, 514 frame_sizes, 515 frame_pcs, 516 return_type, 517 exec_mode); 518 // On some platforms, we need a way to pass some platform dependent 519 // information to the unpacking code so the skeletal frames come out 520 // correct (initial fp value, unextended sp, ...) 521 info->set_initial_info((intptr_t) array->sender().initial_deoptimization_info()); 522 523 if (array->frames() > 1) { 524 if (VerifyStack && TraceDeoptimization) { 525 ttyLocker ttyl; 526 tty->print_cr("Deoptimizing method containing inlining"); 527 } 528 } 529 530 array->set_unroll_block(info); 531 return info; 532 } 533 534 // Called to cleanup deoptimization data structures in normal case 535 // after unpacking to stack and when stack overflow error occurs 536 void Deoptimization::cleanup_deopt_info(JavaThread *thread, 537 vframeArray *array) { 538 539 // Get array if coming from exception 540 if (array == NULL) { 541 array = thread->vframe_array_head(); 542 } 543 thread->set_vframe_array_head(NULL); 544 545 // Free the previous UnrollBlock 546 vframeArray* old_array = thread->vframe_array_last(); 547 thread->set_vframe_array_last(array); 548 549 if (old_array != NULL) { 550 UnrollBlock* old_info = old_array->unroll_block(); 551 old_array->set_unroll_block(NULL); 552 delete old_info; 553 delete old_array; 554 } 555 556 // Deallocate any resource creating in this routine and any ResourceObjs allocated 557 // inside the vframeArray (StackValueCollections) 558 559 delete thread->deopt_mark(); 560 thread->set_deopt_mark(NULL); 561 thread->set_deopt_compiled_method(NULL); 562 563 564 if (JvmtiExport::can_pop_frame()) { 565 #ifndef CC_INTERP 566 // Regardless of whether we entered this routine with the pending 567 // popframe condition bit set, we should always clear it now 568 thread->clear_popframe_condition(); 569 #else 570 // C++ interpreter will clear has_pending_popframe when it enters 571 // with method_resume. For deopt_resume2 we clear it now. 572 if (thread->popframe_forcing_deopt_reexecution()) 573 thread->clear_popframe_condition(); 574 #endif /* CC_INTERP */ 575 } 576 577 // unpack_frames() is called at the end of the deoptimization handler 578 // and (in C2) at the end of the uncommon trap handler. Note this fact 579 // so that an asynchronous stack walker can work again. This counter is 580 // incremented at the beginning of fetch_unroll_info() and (in C2) at 581 // the beginning of uncommon_trap(). 582 thread->dec_in_deopt_handler(); 583 } 584 585 // Moved from cpu directories because none of the cpus has callee save values. 586 // If a cpu implements callee save values, move this to deoptimization_<cpu>.cpp. 587 void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) { 588 589 // This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in 590 // the days we had adapter frames. When we deoptimize a situation where a 591 // compiled caller calls a compiled caller will have registers it expects 592 // to survive the call to the callee. If we deoptimize the callee the only 593 // way we can restore these registers is to have the oldest interpreter 594 // frame that we create restore these values. That is what this routine 595 // will accomplish. 596 597 // At the moment we have modified c2 to not have any callee save registers 598 // so this problem does not exist and this routine is just a place holder. 599 600 assert(f->is_interpreted_frame(), "must be interpreted"); 601 } 602 603 // Return BasicType of value being returned 604 JRT_LEAF(BasicType, Deoptimization::unpack_frames(JavaThread* thread, int exec_mode)) 605 606 // We are already active int he special DeoptResourceMark any ResourceObj's we 607 // allocate will be freed at the end of the routine. 608 609 // It is actually ok to allocate handles in a leaf method. It causes no safepoints, 610 // but makes the entry a little slower. There is however a little dance we have to 611 // do in debug mode to get around the NoHandleMark code in the JRT_LEAF macro 612 ResetNoHandleMark rnhm; // No-op in release/product versions 613 HandleMark hm; 614 615 frame stub_frame = thread->last_frame(); 616 617 // Since the frame to unpack is the top frame of this thread, the vframe_array_head 618 // must point to the vframeArray for the unpack frame. 619 vframeArray* array = thread->vframe_array_head(); 620 621 #ifndef PRODUCT 622 if (TraceDeoptimization) { 623 ttyLocker ttyl; 624 tty->print_cr("DEOPT UNPACKING thread " INTPTR_FORMAT " vframeArray " INTPTR_FORMAT " mode %d", 625 p2i(thread), p2i(array), exec_mode); 626 } 627 #endif 628 Events::log(thread, "DEOPT UNPACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT " mode %d", 629 p2i(stub_frame.pc()), p2i(stub_frame.sp()), exec_mode); 630 631 UnrollBlock* info = array->unroll_block(); 632 633 // Unpack the interpreter frames and any adapter frame (c2 only) we might create. 634 array->unpack_to_stack(stub_frame, exec_mode, info->caller_actual_parameters()); 635 636 BasicType bt = info->return_type(); 637 638 // If we have an exception pending, claim that the return type is an oop 639 // so the deopt_blob does not overwrite the exception_oop. 640 641 if (exec_mode == Unpack_exception) 642 bt = T_OBJECT; 643 644 // Cleanup thread deopt data 645 cleanup_deopt_info(thread, array); 646 647 #ifndef PRODUCT 648 if (VerifyStack) { 649 ResourceMark res_mark; 650 651 thread->validate_frame_layout(); 652 653 // Verify that the just-unpacked frames match the interpreter's 654 // notions of expression stack and locals 655 vframeArray* cur_array = thread->vframe_array_last(); 656 RegisterMap rm(thread, false); 657 rm.set_include_argument_oops(false); 658 bool is_top_frame = true; 659 int callee_size_of_parameters = 0; 660 int callee_max_locals = 0; 661 for (int i = 0; i < cur_array->frames(); i++) { 662 vframeArrayElement* el = cur_array->element(i); 663 frame* iframe = el->iframe(); 664 guarantee(iframe->is_interpreted_frame(), "Wrong frame type"); 665 666 // Get the oop map for this bci 667 InterpreterOopMap mask; 668 int cur_invoke_parameter_size = 0; 669 bool try_next_mask = false; 670 int next_mask_expression_stack_size = -1; 671 int top_frame_expression_stack_adjustment = 0; 672 methodHandle mh(thread, iframe->interpreter_frame_method()); 673 OopMapCache::compute_one_oop_map(mh, iframe->interpreter_frame_bci(), &mask); 674 BytecodeStream str(mh); 675 str.set_start(iframe->interpreter_frame_bci()); 676 int max_bci = mh->code_size(); 677 // Get to the next bytecode if possible 678 assert(str.bci() < max_bci, "bci in interpreter frame out of bounds"); 679 // Check to see if we can grab the number of outgoing arguments 680 // at an uncommon trap for an invoke (where the compiler 681 // generates debug info before the invoke has executed) 682 Bytecodes::Code cur_code = str.next(); 683 if (cur_code == Bytecodes::_invokevirtual || 684 cur_code == Bytecodes::_invokespecial || 685 cur_code == Bytecodes::_invokestatic || 686 cur_code == Bytecodes::_invokeinterface || 687 cur_code == Bytecodes::_invokedynamic) { 688 Bytecode_invoke invoke(mh, iframe->interpreter_frame_bci()); 689 Symbol* signature = invoke.signature(); 690 ArgumentSizeComputer asc(signature); 691 cur_invoke_parameter_size = asc.size(); 692 if (invoke.has_receiver()) { 693 // Add in receiver 694 ++cur_invoke_parameter_size; 695 } 696 if (i != 0 && !invoke.is_invokedynamic() && MethodHandles::has_member_arg(invoke.klass(), invoke.name())) { 697 callee_size_of_parameters++; 698 } 699 } 700 if (str.bci() < max_bci) { 701 Bytecodes::Code bc = str.next(); 702 if (bc >= 0) { 703 // The interpreter oop map generator reports results before 704 // the current bytecode has executed except in the case of 705 // calls. It seems to be hard to tell whether the compiler 706 // has emitted debug information matching the "state before" 707 // a given bytecode or the state after, so we try both 708 switch (cur_code) { 709 case Bytecodes::_invokevirtual: 710 case Bytecodes::_invokespecial: 711 case Bytecodes::_invokestatic: 712 case Bytecodes::_invokeinterface: 713 case Bytecodes::_invokedynamic: 714 case Bytecodes::_athrow: 715 break; 716 default: { 717 InterpreterOopMap next_mask; 718 OopMapCache::compute_one_oop_map(mh, str.bci(), &next_mask); 719 next_mask_expression_stack_size = next_mask.expression_stack_size(); 720 // Need to subtract off the size of the result type of 721 // the bytecode because this is not described in the 722 // debug info but returned to the interpreter in the TOS 723 // caching register 724 BasicType bytecode_result_type = Bytecodes::result_type(cur_code); 725 if (bytecode_result_type != T_ILLEGAL) { 726 top_frame_expression_stack_adjustment = type2size[bytecode_result_type]; 727 } 728 assert(top_frame_expression_stack_adjustment >= 0, ""); 729 try_next_mask = true; 730 break; 731 } 732 } 733 } 734 } 735 736 // Verify stack depth and oops in frame 737 // This assertion may be dependent on the platform we're running on and may need modification (tested on x86 and sparc) 738 if (!( 739 /* SPARC */ 740 (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + callee_size_of_parameters) || 741 /* x86 */ 742 (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + callee_max_locals) || 743 (try_next_mask && 744 (iframe->interpreter_frame_expression_stack_size() == (next_mask_expression_stack_size - 745 top_frame_expression_stack_adjustment))) || 746 (is_top_frame && (exec_mode == Unpack_exception) && iframe->interpreter_frame_expression_stack_size() == 0) || 747 (is_top_frame && (exec_mode == Unpack_uncommon_trap || exec_mode == Unpack_reexecute || el->should_reexecute()) && 748 (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + cur_invoke_parameter_size)) 749 )) { 750 ttyLocker ttyl; 751 752 // Print out some information that will help us debug the problem 753 tty->print_cr("Wrong number of expression stack elements during deoptimization"); 754 tty->print_cr(" Error occurred while verifying frame %d (0..%d, 0 is topmost)", i, cur_array->frames() - 1); 755 tty->print_cr(" Fabricated interpreter frame had %d expression stack elements", 756 iframe->interpreter_frame_expression_stack_size()); 757 tty->print_cr(" Interpreter oop map had %d expression stack elements", mask.expression_stack_size()); 758 tty->print_cr(" try_next_mask = %d", try_next_mask); 759 tty->print_cr(" next_mask_expression_stack_size = %d", next_mask_expression_stack_size); 760 tty->print_cr(" callee_size_of_parameters = %d", callee_size_of_parameters); 761 tty->print_cr(" callee_max_locals = %d", callee_max_locals); 762 tty->print_cr(" top_frame_expression_stack_adjustment = %d", top_frame_expression_stack_adjustment); 763 tty->print_cr(" exec_mode = %d", exec_mode); 764 tty->print_cr(" cur_invoke_parameter_size = %d", cur_invoke_parameter_size); 765 tty->print_cr(" Thread = " INTPTR_FORMAT ", thread ID = %d", p2i(thread), thread->osthread()->thread_id()); 766 tty->print_cr(" Interpreted frames:"); 767 for (int k = 0; k < cur_array->frames(); k++) { 768 vframeArrayElement* el = cur_array->element(k); 769 tty->print_cr(" %s (bci %d)", el->method()->name_and_sig_as_C_string(), el->bci()); 770 } 771 cur_array->print_on_2(tty); 772 guarantee(false, "wrong number of expression stack elements during deopt"); 773 } 774 VerifyOopClosure verify; 775 iframe->oops_interpreted_do(&verify, &rm, false); 776 callee_size_of_parameters = mh->size_of_parameters(); 777 callee_max_locals = mh->max_locals(); 778 is_top_frame = false; 779 } 780 } 781 #endif /* !PRODUCT */ 782 783 784 return bt; 785 JRT_END 786 787 788 int Deoptimization::deoptimize_dependents() { 789 Threads::deoptimized_wrt_marked_nmethods(); 790 return 0; 791 } 792 793 Deoptimization::DeoptAction Deoptimization::_unloaded_action 794 = Deoptimization::Action_reinterpret; 795 796 #if COMPILER2_OR_JVMCI 797 bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, GrowableArray<ScopeValue*>* objects, TRAPS) { 798 Handle pending_exception(THREAD, thread->pending_exception()); 799 const char* exception_file = thread->exception_file(); 800 int exception_line = thread->exception_line(); 801 thread->clear_pending_exception(); 802 803 bool failures = false; 804 805 for (int i = 0; i < objects->length(); i++) { 806 assert(objects->at(i)->is_object(), "invalid debug information"); 807 ObjectValue* sv = (ObjectValue*) objects->at(i); 808 809 Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()()); 810 oop obj = NULL; 811 812 if (k->is_instance_klass()) { 813 InstanceKlass* ik = InstanceKlass::cast(k); 814 obj = ik->allocate_instance(THREAD); 815 } else if (k->is_typeArray_klass()) { 816 TypeArrayKlass* ak = TypeArrayKlass::cast(k); 817 assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length"); 818 int len = sv->field_size() / type2size[ak->element_type()]; 819 obj = ak->allocate(len, THREAD); 820 } else if (k->is_objArray_klass()) { 821 ObjArrayKlass* ak = ObjArrayKlass::cast(k); 822 obj = ak->allocate(sv->field_size(), THREAD); 823 } 824 825 if (obj == NULL) { 826 failures = true; 827 } 828 829 assert(sv->value().is_null(), "redundant reallocation"); 830 assert(obj != NULL || HAS_PENDING_EXCEPTION, "allocation should succeed or we should get an exception"); 831 CLEAR_PENDING_EXCEPTION; 832 sv->set_value(obj); 833 } 834 835 if (failures) { 836 THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures); 837 } else if (pending_exception.not_null()) { 838 thread->set_pending_exception(pending_exception(), exception_file, exception_line); 839 } 840 841 return failures; 842 } 843 844 // restore elements of an eliminated type array 845 void Deoptimization::reassign_type_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, typeArrayOop obj, BasicType type) { 846 int index = 0; 847 intptr_t val; 848 849 for (int i = 0; i < sv->field_size(); i++) { 850 StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i)); 851 switch(type) { 852 case T_LONG: case T_DOUBLE: { 853 assert(value->type() == T_INT, "Agreement."); 854 StackValue* low = 855 StackValue::create_stack_value(fr, reg_map, sv->field_at(++i)); 856 #ifdef _LP64 857 jlong res = (jlong)low->get_int(); 858 #else 859 #ifdef SPARC 860 // For SPARC we have to swap high and low words. 861 jlong res = jlong_from((jint)low->get_int(), (jint)value->get_int()); 862 #else 863 jlong res = jlong_from((jint)value->get_int(), (jint)low->get_int()); 864 #endif //SPARC 865 #endif 866 obj->long_at_put(index, res); 867 break; 868 } 869 870 // Have to cast to INT (32 bits) pointer to avoid little/big-endian problem. 871 case T_INT: case T_FLOAT: { // 4 bytes. 872 assert(value->type() == T_INT, "Agreement."); 873 bool big_value = false; 874 if (i + 1 < sv->field_size() && type == T_INT) { 875 if (sv->field_at(i)->is_location()) { 876 Location::Type type = ((LocationValue*) sv->field_at(i))->location().type(); 877 if (type == Location::dbl || type == Location::lng) { 878 big_value = true; 879 } 880 } else if (sv->field_at(i)->is_constant_int()) { 881 ScopeValue* next_scope_field = sv->field_at(i + 1); 882 if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) { 883 big_value = true; 884 } 885 } 886 } 887 888 if (big_value) { 889 StackValue* low = StackValue::create_stack_value(fr, reg_map, sv->field_at(++i)); 890 #ifdef _LP64 891 jlong res = (jlong)low->get_int(); 892 #else 893 #ifdef SPARC 894 // For SPARC we have to swap high and low words. 895 jlong res = jlong_from((jint)low->get_int(), (jint)value->get_int()); 896 #else 897 jlong res = jlong_from((jint)value->get_int(), (jint)low->get_int()); 898 #endif //SPARC 899 #endif 900 obj->int_at_put(index, (jint)*((jint*)&res)); 901 obj->int_at_put(++index, (jint)*(((jint*)&res) + 1)); 902 } else { 903 val = value->get_int(); 904 obj->int_at_put(index, (jint)*((jint*)&val)); 905 } 906 break; 907 } 908 909 case T_SHORT: 910 assert(value->type() == T_INT, "Agreement."); 911 val = value->get_int(); 912 obj->short_at_put(index, (jshort)*((jint*)&val)); 913 break; 914 915 case T_CHAR: 916 assert(value->type() == T_INT, "Agreement."); 917 val = value->get_int(); 918 obj->char_at_put(index, (jchar)*((jint*)&val)); 919 break; 920 921 case T_BYTE: 922 assert(value->type() == T_INT, "Agreement."); 923 val = value->get_int(); 924 obj->byte_at_put(index, (jbyte)*((jint*)&val)); 925 break; 926 927 case T_BOOLEAN: 928 assert(value->type() == T_INT, "Agreement."); 929 val = value->get_int(); 930 obj->bool_at_put(index, (jboolean)*((jint*)&val)); 931 break; 932 933 default: 934 ShouldNotReachHere(); 935 } 936 index++; 937 } 938 } 939 940 941 // restore fields of an eliminated object array 942 void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) { 943 for (int i = 0; i < sv->field_size(); i++) { 944 StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i)); 945 assert(value->type() == T_OBJECT, "object element expected"); 946 obj->obj_at_put(i, value->get_obj()()); 947 } 948 } 949 950 class ReassignedField { 951 public: 952 int _offset; 953 BasicType _type; 954 public: 955 ReassignedField() { 956 _offset = 0; 957 _type = T_ILLEGAL; 958 } 959 }; 960 961 int compare(ReassignedField* left, ReassignedField* right) { 962 return left->_offset - right->_offset; 963 } 964 965 // Restore fields of an eliminated instance object using the same field order 966 // returned by HotSpotResolvedObjectTypeImpl.getInstanceFields(true) 967 static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool skip_internal) { 968 if (klass->superklass() != NULL) { 969 svIndex = reassign_fields_by_klass(klass->superklass(), fr, reg_map, sv, svIndex, obj, skip_internal); 970 } 971 972 GrowableArray<ReassignedField>* fields = new GrowableArray<ReassignedField>(); 973 for (AllFieldStream fs(klass); !fs.done(); fs.next()) { 974 if (!fs.access_flags().is_static() && (!skip_internal || !fs.access_flags().is_internal())) { 975 ReassignedField field; 976 field._offset = fs.offset(); 977 field._type = FieldType::basic_type(fs.signature()); 978 fields->append(field); 979 } 980 } 981 fields->sort(compare); 982 for (int i = 0; i < fields->length(); i++) { 983 intptr_t val; 984 ScopeValue* scope_field = sv->field_at(svIndex); 985 StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field); 986 int offset = fields->at(i)._offset; 987 BasicType type = fields->at(i)._type; 988 switch (type) { 989 case T_OBJECT: case T_ARRAY: 990 assert(value->type() == T_OBJECT, "Agreement."); 991 obj->obj_field_put(offset, value->get_obj()()); 992 break; 993 994 // Have to cast to INT (32 bits) pointer to avoid little/big-endian problem. 995 case T_INT: case T_FLOAT: { // 4 bytes. 996 assert(value->type() == T_INT, "Agreement."); 997 bool big_value = false; 998 if (i+1 < fields->length() && fields->at(i+1)._type == T_INT) { 999 if (scope_field->is_location()) { 1000 Location::Type type = ((LocationValue*) scope_field)->location().type(); 1001 if (type == Location::dbl || type == Location::lng) { 1002 big_value = true; 1003 } 1004 } 1005 if (scope_field->is_constant_int()) { 1006 ScopeValue* next_scope_field = sv->field_at(svIndex + 1); 1007 if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) { 1008 big_value = true; 1009 } 1010 } 1011 } 1012 1013 if (big_value) { 1014 i++; 1015 assert(i < fields->length(), "second T_INT field needed"); 1016 assert(fields->at(i)._type == T_INT, "T_INT field needed"); 1017 } else { 1018 val = value->get_int(); 1019 obj->int_field_put(offset, (jint)*((jint*)&val)); 1020 break; 1021 } 1022 } 1023 /* no break */ 1024 1025 case T_LONG: case T_DOUBLE: { 1026 assert(value->type() == T_INT, "Agreement."); 1027 StackValue* low = StackValue::create_stack_value(fr, reg_map, sv->field_at(++svIndex)); 1028 #ifdef _LP64 1029 jlong res = (jlong)low->get_int(); 1030 #else 1031 #ifdef SPARC 1032 // For SPARC we have to swap high and low words. 1033 jlong res = jlong_from((jint)low->get_int(), (jint)value->get_int()); 1034 #else 1035 jlong res = jlong_from((jint)value->get_int(), (jint)low->get_int()); 1036 #endif //SPARC 1037 #endif 1038 obj->long_field_put(offset, res); 1039 break; 1040 } 1041 1042 case T_SHORT: 1043 assert(value->type() == T_INT, "Agreement."); 1044 val = value->get_int(); 1045 obj->short_field_put(offset, (jshort)*((jint*)&val)); 1046 break; 1047 1048 case T_CHAR: 1049 assert(value->type() == T_INT, "Agreement."); 1050 val = value->get_int(); 1051 obj->char_field_put(offset, (jchar)*((jint*)&val)); 1052 break; 1053 1054 case T_BYTE: 1055 assert(value->type() == T_INT, "Agreement."); 1056 val = value->get_int(); 1057 obj->byte_field_put(offset, (jbyte)*((jint*)&val)); 1058 break; 1059 1060 case T_BOOLEAN: 1061 assert(value->type() == T_INT, "Agreement."); 1062 val = value->get_int(); 1063 obj->bool_field_put(offset, (jboolean)*((jint*)&val)); 1064 break; 1065 1066 default: 1067 ShouldNotReachHere(); 1068 } 1069 svIndex++; 1070 } 1071 return svIndex; 1072 } 1073 1074 // restore fields of all eliminated objects and arrays 1075 void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures, bool skip_internal) { 1076 for (int i = 0; i < objects->length(); i++) { 1077 ObjectValue* sv = (ObjectValue*) objects->at(i); 1078 Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()()); 1079 Handle obj = sv->value(); 1080 assert(obj.not_null() || realloc_failures, "reallocation was missed"); 1081 if (PrintDeoptimizationDetails) { 1082 tty->print_cr("reassign fields for object of type %s!", k->name()->as_C_string()); 1083 } 1084 if (obj.is_null()) { 1085 continue; 1086 } 1087 1088 if (k->is_instance_klass()) { 1089 InstanceKlass* ik = InstanceKlass::cast(k); 1090 reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), skip_internal); 1091 } else if (k->is_typeArray_klass()) { 1092 TypeArrayKlass* ak = TypeArrayKlass::cast(k); 1093 reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type()); 1094 } else if (k->is_objArray_klass()) { 1095 reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj()); 1096 } 1097 } 1098 } 1099 1100 1101 // relock objects for which synchronization was eliminated 1102 void Deoptimization::relock_objects(GrowableArray<MonitorInfo*>* monitors, JavaThread* thread, bool realloc_failures) { 1103 for (int i = 0; i < monitors->length(); i++) { 1104 MonitorInfo* mon_info = monitors->at(i); 1105 if (mon_info->eliminated()) { 1106 assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed"); 1107 if (!mon_info->owner_is_scalar_replaced()) { 1108 Handle obj(thread, mon_info->owner()); 1109 markOop mark = obj->mark(); 1110 if (UseBiasedLocking && mark->has_bias_pattern()) { 1111 // New allocated objects may have the mark set to anonymously biased. 1112 // Also the deoptimized method may called methods with synchronization 1113 // where the thread-local object is bias locked to the current thread. 1114 assert(mark->is_biased_anonymously() || 1115 mark->biased_locker() == thread, "should be locked to current thread"); 1116 // Reset mark word to unbiased prototype. 1117 markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age()); 1118 obj->set_mark(unbiased_prototype); 1119 } 1120 BasicLock* lock = mon_info->lock(); 1121 ObjectSynchronizer::slow_enter(obj, lock, thread); 1122 assert(mon_info->owner()->is_locked(), "object must be locked now"); 1123 } 1124 } 1125 } 1126 } 1127 1128 1129 #ifndef PRODUCT 1130 // print information about reallocated objects 1131 void Deoptimization::print_objects(GrowableArray<ScopeValue*>* objects, bool realloc_failures) { 1132 fieldDescriptor fd; 1133 1134 for (int i = 0; i < objects->length(); i++) { 1135 ObjectValue* sv = (ObjectValue*) objects->at(i); 1136 Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()()); 1137 Handle obj = sv->value(); 1138 1139 tty->print(" object <" INTPTR_FORMAT "> of type ", p2i(sv->value()())); 1140 k->print_value(); 1141 assert(obj.not_null() || realloc_failures, "reallocation was missed"); 1142 if (obj.is_null()) { 1143 tty->print(" allocation failed"); 1144 } else { 1145 tty->print(" allocated (%d bytes)", obj->size() * HeapWordSize); 1146 } 1147 tty->cr(); 1148 1149 if (Verbose && !obj.is_null()) { 1150 k->oop_print_on(obj(), tty); 1151 } 1152 } 1153 } 1154 #endif 1155 #endif // COMPILER2_OR_JVMCI 1156 1157 vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures) { 1158 Events::log(thread, "DEOPT PACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT, p2i(fr.pc()), p2i(fr.sp())); 1159 1160 #ifndef PRODUCT 1161 if (PrintDeoptimizationDetails) { 1162 ttyLocker ttyl; 1163 tty->print("DEOPT PACKING thread " INTPTR_FORMAT " ", p2i(thread)); 1164 fr.print_on(tty); 1165 tty->print_cr(" Virtual frames (innermost first):"); 1166 for (int index = 0; index < chunk->length(); index++) { 1167 compiledVFrame* vf = chunk->at(index); 1168 tty->print(" %2d - ", index); 1169 vf->print_value(); 1170 int bci = chunk->at(index)->raw_bci(); 1171 const char* code_name; 1172 if (bci == SynchronizationEntryBCI) { 1173 code_name = "sync entry"; 1174 } else { 1175 Bytecodes::Code code = vf->method()->code_at(bci); 1176 code_name = Bytecodes::name(code); 1177 } 1178 tty->print(" - %s", code_name); 1179 tty->print_cr(" @ bci %d ", bci); 1180 if (Verbose) { 1181 vf->print(); 1182 tty->cr(); 1183 } 1184 } 1185 } 1186 #endif 1187 1188 // Register map for next frame (used for stack crawl). We capture 1189 // the state of the deopt'ing frame's caller. Thus if we need to 1190 // stuff a C2I adapter we can properly fill in the callee-save 1191 // register locations. 1192 frame caller = fr.sender(reg_map); 1193 int frame_size = caller.sp() - fr.sp(); 1194 1195 frame sender = caller; 1196 1197 // Since the Java thread being deoptimized will eventually adjust it's own stack, 1198 // the vframeArray containing the unpacking information is allocated in the C heap. 1199 // For Compiler1, the caller of the deoptimized frame is saved for use by unpack_frames(). 1200 vframeArray* array = vframeArray::allocate(thread, frame_size, chunk, reg_map, sender, caller, fr, realloc_failures); 1201 1202 // Compare the vframeArray to the collected vframes 1203 assert(array->structural_compare(thread, chunk), "just checking"); 1204 1205 #ifndef PRODUCT 1206 if (PrintDeoptimizationDetails) { 1207 ttyLocker ttyl; 1208 tty->print_cr(" Created vframeArray " INTPTR_FORMAT, p2i(array)); 1209 } 1210 #endif // PRODUCT 1211 1212 return array; 1213 } 1214 1215 #if COMPILER2_OR_JVMCI 1216 void Deoptimization::pop_frames_failed_reallocs(JavaThread* thread, vframeArray* array) { 1217 // Reallocation of some scalar replaced objects failed. Record 1218 // that we need to pop all the interpreter frames for the 1219 // deoptimized compiled frame. 1220 assert(thread->frames_to_pop_failed_realloc() == 0, "missed frames to pop?"); 1221 thread->set_frames_to_pop_failed_realloc(array->frames()); 1222 // Unlock all monitors here otherwise the interpreter will see a 1223 // mix of locked and unlocked monitors (because of failed 1224 // reallocations of synchronized objects) and be confused. 1225 for (int i = 0; i < array->frames(); i++) { 1226 MonitorChunk* monitors = array->element(i)->monitors(); 1227 if (monitors != NULL) { 1228 for (int j = 0; j < monitors->number_of_monitors(); j++) { 1229 BasicObjectLock* src = monitors->at(j); 1230 if (src->obj() != NULL) { 1231 ObjectSynchronizer::fast_exit(src->obj(), src->lock(), thread); 1232 } 1233 } 1234 array->element(i)->free_monitors(thread); 1235 #ifdef ASSERT 1236 array->element(i)->set_removed_monitors(); 1237 #endif 1238 } 1239 } 1240 } 1241 #endif 1242 1243 static void collect_monitors(compiledVFrame* cvf, GrowableArray<Handle>* objects_to_revoke) { 1244 GrowableArray<MonitorInfo*>* monitors = cvf->monitors(); 1245 Thread* thread = Thread::current(); 1246 for (int i = 0; i < monitors->length(); i++) { 1247 MonitorInfo* mon_info = monitors->at(i); 1248 if (!mon_info->eliminated() && mon_info->owner() != NULL) { 1249 objects_to_revoke->append(Handle(thread, mon_info->owner())); 1250 } 1251 } 1252 } 1253 1254 1255 void Deoptimization::revoke_biases_of_monitors(JavaThread* thread, frame fr, RegisterMap* map) { 1256 if (!UseBiasedLocking) { 1257 return; 1258 } 1259 1260 GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>(); 1261 1262 // Unfortunately we don't have a RegisterMap available in most of 1263 // the places we want to call this routine so we need to walk the 1264 // stack again to update the register map. 1265 if (map == NULL || !map->update_map()) { 1266 StackFrameStream sfs(thread, true); 1267 bool found = false; 1268 while (!found && !sfs.is_done()) { 1269 frame* cur = sfs.current(); 1270 sfs.next(); 1271 found = cur->id() == fr.id(); 1272 } 1273 assert(found, "frame to be deoptimized not found on target thread's stack"); 1274 map = sfs.register_map(); 1275 } 1276 1277 vframe* vf = vframe::new_vframe(&fr, map, thread); 1278 compiledVFrame* cvf = compiledVFrame::cast(vf); 1279 // Revoke monitors' biases in all scopes 1280 while (!cvf->is_top()) { 1281 collect_monitors(cvf, objects_to_revoke); 1282 cvf = compiledVFrame::cast(cvf->sender()); 1283 } 1284 collect_monitors(cvf, objects_to_revoke); 1285 1286 if (SafepointSynchronize::is_at_safepoint()) { 1287 BiasedLocking::revoke_at_safepoint(objects_to_revoke); 1288 } else { 1289 BiasedLocking::revoke(objects_to_revoke); 1290 } 1291 } 1292 1293 1294 void Deoptimization::revoke_biases_of_monitors(CodeBlob* cb) { 1295 if (!UseBiasedLocking) { 1296 return; 1297 } 1298 1299 assert(SafepointSynchronize::is_at_safepoint(), "must only be called from safepoint"); 1300 GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>(); 1301 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 1302 if (jt->has_last_Java_frame()) { 1303 StackFrameStream sfs(jt, true); 1304 while (!sfs.is_done()) { 1305 frame* cur = sfs.current(); 1306 if (cb->contains(cur->pc())) { 1307 vframe* vf = vframe::new_vframe(cur, sfs.register_map(), jt); 1308 compiledVFrame* cvf = compiledVFrame::cast(vf); 1309 // Revoke monitors' biases in all scopes 1310 while (!cvf->is_top()) { 1311 collect_monitors(cvf, objects_to_revoke); 1312 cvf = compiledVFrame::cast(cvf->sender()); 1313 } 1314 collect_monitors(cvf, objects_to_revoke); 1315 } 1316 sfs.next(); 1317 } 1318 } 1319 } 1320 BiasedLocking::revoke_at_safepoint(objects_to_revoke); 1321 } 1322 1323 1324 void Deoptimization::deoptimize_single_frame(JavaThread* thread, frame fr, Deoptimization::DeoptReason reason) { 1325 assert(fr.can_be_deoptimized(), "checking frame type"); 1326 1327 gather_statistics(reason, Action_none, Bytecodes::_illegal); 1328 1329 if (LogCompilation && xtty != NULL) { 1330 CompiledMethod* cm = fr.cb()->as_compiled_method_or_null(); 1331 assert(cm != NULL, "only compiled methods can deopt"); 1332 1333 ttyLocker ttyl; 1334 xtty->begin_head("deoptimized thread='" UINTX_FORMAT "' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc())); 1335 cm->log_identity(xtty); 1336 xtty->end_head(); 1337 for (ScopeDesc* sd = cm->scope_desc_at(fr.pc()); ; sd = sd->sender()) { 1338 xtty->begin_elem("jvms bci='%d'", sd->bci()); 1339 xtty->method(sd->method()); 1340 xtty->end_elem(); 1341 if (sd->is_top()) break; 1342 } 1343 xtty->tail("deoptimized"); 1344 } 1345 1346 // Patch the compiled method so that when execution returns to it we will 1347 // deopt the execution state and return to the interpreter. 1348 fr.deoptimize(thread); 1349 } 1350 1351 void Deoptimization::deoptimize(JavaThread* thread, frame fr, RegisterMap *map) { 1352 deoptimize(thread, fr, map, Reason_constraint); 1353 } 1354 1355 void Deoptimization::deoptimize(JavaThread* thread, frame fr, RegisterMap *map, DeoptReason reason) { 1356 // Deoptimize only if the frame comes from compile code. 1357 // Do not deoptimize the frame which is already patched 1358 // during the execution of the loops below. 1359 if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) { 1360 return; 1361 } 1362 ResourceMark rm; 1363 DeoptimizationMarker dm; 1364 if (UseBiasedLocking) { 1365 revoke_biases_of_monitors(thread, fr, map); 1366 } 1367 deoptimize_single_frame(thread, fr, reason); 1368 1369 } 1370 1371 #if INCLUDE_JVMCI 1372 address Deoptimization::deoptimize_for_missing_exception_handler(CompiledMethod* cm) { 1373 // there is no exception handler for this pc => deoptimize 1374 cm->make_not_entrant(); 1375 1376 // Use Deoptimization::deoptimize for all of its side-effects: 1377 // revoking biases of monitors, gathering traps statistics, logging... 1378 // it also patches the return pc but we do not care about that 1379 // since we return a continuation to the deopt_blob below. 1380 JavaThread* thread = JavaThread::current(); 1381 RegisterMap reg_map(thread, UseBiasedLocking); 1382 frame runtime_frame = thread->last_frame(); 1383 frame caller_frame = runtime_frame.sender(®_map); 1384 assert(caller_frame.cb()->as_compiled_method_or_null() == cm, "expect top frame compiled method"); 1385 Deoptimization::deoptimize(thread, caller_frame, ®_map, Deoptimization::Reason_not_compiled_exception_handler); 1386 1387 MethodData* trap_mdo = get_method_data(thread, cm->method(), true); 1388 if (trap_mdo != NULL) { 1389 trap_mdo->inc_trap_count(Deoptimization::Reason_not_compiled_exception_handler); 1390 } 1391 1392 return SharedRuntime::deopt_blob()->unpack_with_exception_in_tls(); 1393 } 1394 #endif 1395 1396 void Deoptimization::deoptimize_frame_internal(JavaThread* thread, intptr_t* id, DeoptReason reason) { 1397 assert(thread == Thread::current() || SafepointSynchronize::is_at_safepoint(), 1398 "can only deoptimize other thread at a safepoint"); 1399 // Compute frame and register map based on thread and sp. 1400 RegisterMap reg_map(thread, UseBiasedLocking); 1401 frame fr = thread->last_frame(); 1402 while (fr.id() != id) { 1403 fr = fr.sender(®_map); 1404 } 1405 deoptimize(thread, fr, ®_map, reason); 1406 } 1407 1408 1409 void Deoptimization::deoptimize_frame(JavaThread* thread, intptr_t* id, DeoptReason reason) { 1410 if (thread == Thread::current()) { 1411 Deoptimization::deoptimize_frame_internal(thread, id, reason); 1412 } else { 1413 VM_DeoptimizeFrame deopt(thread, id, reason); 1414 VMThread::execute(&deopt); 1415 } 1416 } 1417 1418 void Deoptimization::deoptimize_frame(JavaThread* thread, intptr_t* id) { 1419 deoptimize_frame(thread, id, Reason_constraint); 1420 } 1421 1422 // JVMTI PopFrame support 1423 JRT_LEAF(void, Deoptimization::popframe_preserve_args(JavaThread* thread, int bytes_to_save, void* start_address)) 1424 { 1425 thread->popframe_preserve_args(in_ByteSize(bytes_to_save), start_address); 1426 } 1427 JRT_END 1428 1429 MethodData* 1430 Deoptimization::get_method_data(JavaThread* thread, const methodHandle& m, 1431 bool create_if_missing) { 1432 Thread* THREAD = thread; 1433 MethodData* mdo = m()->method_data(); 1434 if (mdo == NULL && create_if_missing && !HAS_PENDING_EXCEPTION) { 1435 // Build an MDO. Ignore errors like OutOfMemory; 1436 // that simply means we won't have an MDO to update. 1437 Method::build_interpreter_method_data(m, THREAD); 1438 if (HAS_PENDING_EXCEPTION) { 1439 assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here"); 1440 CLEAR_PENDING_EXCEPTION; 1441 } 1442 mdo = m()->method_data(); 1443 } 1444 return mdo; 1445 } 1446 1447 #if COMPILER2_OR_JVMCI 1448 void Deoptimization::load_class_by_index(const constantPoolHandle& constant_pool, int index, TRAPS) { 1449 // in case of an unresolved klass entry, load the class. 1450 if (constant_pool->tag_at(index).is_unresolved_klass()) { 1451 Klass* tk = constant_pool->klass_at_ignore_error(index, CHECK); 1452 return; 1453 } 1454 1455 if (!constant_pool->tag_at(index).is_symbol()) return; 1456 1457 Handle class_loader (THREAD, constant_pool->pool_holder()->class_loader()); 1458 Symbol* symbol = constant_pool->symbol_at(index); 1459 1460 // class name? 1461 if (symbol->byte_at(0) != '(') { 1462 Handle protection_domain (THREAD, constant_pool->pool_holder()->protection_domain()); 1463 SystemDictionary::resolve_or_null(symbol, class_loader, protection_domain, CHECK); 1464 return; 1465 } 1466 1467 // then it must be a signature! 1468 ResourceMark rm(THREAD); 1469 for (SignatureStream ss(symbol); !ss.is_done(); ss.next()) { 1470 if (ss.is_object()) { 1471 Symbol* class_name = ss.as_symbol(CHECK); 1472 Handle protection_domain (THREAD, constant_pool->pool_holder()->protection_domain()); 1473 SystemDictionary::resolve_or_null(class_name, class_loader, protection_domain, CHECK); 1474 } 1475 } 1476 } 1477 1478 1479 void Deoptimization::load_class_by_index(const constantPoolHandle& constant_pool, int index) { 1480 EXCEPTION_MARK; 1481 load_class_by_index(constant_pool, index, THREAD); 1482 if (HAS_PENDING_EXCEPTION) { 1483 // Exception happened during classloading. We ignore the exception here, since it 1484 // is going to be rethrown since the current activation is going to be deoptimized and 1485 // the interpreter will re-execute the bytecode. 1486 CLEAR_PENDING_EXCEPTION; 1487 // Class loading called java code which may have caused a stack 1488 // overflow. If the exception was thrown right before the return 1489 // to the runtime the stack is no longer guarded. Reguard the 1490 // stack otherwise if we return to the uncommon trap blob and the 1491 // stack bang causes a stack overflow we crash. 1492 assert(THREAD->is_Java_thread(), "only a java thread can be here"); 1493 JavaThread* thread = (JavaThread*)THREAD; 1494 bool guard_pages_enabled = thread->stack_guards_enabled(); 1495 if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack(); 1496 assert(guard_pages_enabled, "stack banging in uncommon trap blob may cause crash"); 1497 } 1498 } 1499 1500 JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* thread, jint trap_request)) { 1501 HandleMark hm; 1502 1503 // uncommon_trap() is called at the beginning of the uncommon trap 1504 // handler. Note this fact before we start generating temporary frames 1505 // that can confuse an asynchronous stack walker. This counter is 1506 // decremented at the end of unpack_frames(). 1507 thread->inc_in_deopt_handler(); 1508 1509 // We need to update the map if we have biased locking. 1510 #if INCLUDE_JVMCI 1511 // JVMCI might need to get an exception from the stack, which in turn requires the register map to be valid 1512 RegisterMap reg_map(thread, true); 1513 #else 1514 RegisterMap reg_map(thread, UseBiasedLocking); 1515 #endif 1516 frame stub_frame = thread->last_frame(); 1517 frame fr = stub_frame.sender(®_map); 1518 // Make sure the calling nmethod is not getting deoptimized and removed 1519 // before we are done with it. 1520 nmethodLocker nl(fr.pc()); 1521 1522 // Log a message 1523 Events::log(thread, "Uncommon trap: trap_request=" PTR32_FORMAT " fr.pc=" INTPTR_FORMAT " relative=" INTPTR_FORMAT, 1524 trap_request, p2i(fr.pc()), fr.pc() - fr.cb()->code_begin()); 1525 1526 { 1527 ResourceMark rm; 1528 1529 // Revoke biases of any monitors in the frame to ensure we can migrate them 1530 revoke_biases_of_monitors(thread, fr, ®_map); 1531 1532 DeoptReason reason = trap_request_reason(trap_request); 1533 DeoptAction action = trap_request_action(trap_request); 1534 #if INCLUDE_JVMCI 1535 int debug_id = trap_request_debug_id(trap_request); 1536 #endif 1537 jint unloaded_class_index = trap_request_index(trap_request); // CP idx or -1 1538 1539 vframe* vf = vframe::new_vframe(&fr, ®_map, thread); 1540 compiledVFrame* cvf = compiledVFrame::cast(vf); 1541 1542 CompiledMethod* nm = cvf->code(); 1543 1544 ScopeDesc* trap_scope = cvf->scope(); 1545 1546 if (TraceDeoptimization) { 1547 ttyLocker ttyl; 1548 tty->print_cr(" bci=%d pc=" INTPTR_FORMAT ", relative_pc=" INTPTR_FORMAT ", method=%s" JVMCI_ONLY(", debug_id=%d"), trap_scope->bci(), p2i(fr.pc()), fr.pc() - nm->code_begin(), trap_scope->method()->name_and_sig_as_C_string() 1549 #if INCLUDE_JVMCI 1550 , debug_id 1551 #endif 1552 ); 1553 } 1554 1555 methodHandle trap_method = trap_scope->method(); 1556 int trap_bci = trap_scope->bci(); 1557 #if INCLUDE_JVMCI 1558 oop speculation = thread->pending_failed_speculation(); 1559 if (nm->is_compiled_by_jvmci()) { 1560 if (speculation != NULL) { 1561 oop speculation_log = nm->as_nmethod()->speculation_log(); 1562 if (speculation_log != NULL) { 1563 if (TraceDeoptimization || TraceUncollectedSpeculations) { 1564 if (HotSpotSpeculationLog::lastFailed(speculation_log) != NULL) { 1565 tty->print_cr("A speculation that was not collected by the compiler is being overwritten"); 1566 } 1567 } 1568 if (TraceDeoptimization) { 1569 tty->print_cr("Saving speculation to speculation log"); 1570 } 1571 HotSpotSpeculationLog::set_lastFailed(speculation_log, speculation); 1572 } else { 1573 if (TraceDeoptimization) { 1574 tty->print_cr("Speculation present but no speculation log"); 1575 } 1576 } 1577 thread->set_pending_failed_speculation(NULL); 1578 } else { 1579 if (TraceDeoptimization) { 1580 tty->print_cr("No speculation"); 1581 } 1582 } 1583 } else { 1584 assert(speculation == NULL, "There should not be a speculation for method compiled by non-JVMCI compilers"); 1585 } 1586 1587 if (trap_bci == SynchronizationEntryBCI) { 1588 trap_bci = 0; 1589 thread->set_pending_monitorenter(true); 1590 } 1591 1592 if (reason == Deoptimization::Reason_transfer_to_interpreter) { 1593 thread->set_pending_transfer_to_interpreter(true); 1594 } 1595 #endif 1596 1597 Bytecodes::Code trap_bc = trap_method->java_code_at(trap_bci); 1598 // Record this event in the histogram. 1599 gather_statistics(reason, action, trap_bc); 1600 1601 // Ensure that we can record deopt. history: 1602 // Need MDO to record RTM code generation state. 1603 bool create_if_missing = ProfileTraps || UseCodeAging RTM_OPT_ONLY( || UseRTMLocking ); 1604 1605 methodHandle profiled_method; 1606 #if INCLUDE_JVMCI 1607 if (nm->is_compiled_by_jvmci()) { 1608 profiled_method = nm->method(); 1609 } else { 1610 profiled_method = trap_method; 1611 } 1612 #else 1613 profiled_method = trap_method; 1614 #endif 1615 1616 MethodData* trap_mdo = 1617 get_method_data(thread, profiled_method, create_if_missing); 1618 1619 // Log a message 1620 Events::log_deopt_message(thread, "Uncommon trap: reason=%s action=%s pc=" INTPTR_FORMAT " method=%s @ %d %s", 1621 trap_reason_name(reason), trap_action_name(action), p2i(fr.pc()), 1622 trap_method->name_and_sig_as_C_string(), trap_bci, nm->compiler_name()); 1623 1624 // Print a bunch of diagnostics, if requested. 1625 if (TraceDeoptimization || LogCompilation) { 1626 ResourceMark rm; 1627 ttyLocker ttyl; 1628 char buf[100]; 1629 if (xtty != NULL) { 1630 xtty->begin_head("uncommon_trap thread='" UINTX_FORMAT "' %s", 1631 os::current_thread_id(), 1632 format_trap_request(buf, sizeof(buf), trap_request)); 1633 nm->log_identity(xtty); 1634 } 1635 Symbol* class_name = NULL; 1636 bool unresolved = false; 1637 if (unloaded_class_index >= 0) { 1638 constantPoolHandle constants (THREAD, trap_method->constants()); 1639 if (constants->tag_at(unloaded_class_index).is_unresolved_klass()) { 1640 class_name = constants->klass_name_at(unloaded_class_index); 1641 unresolved = true; 1642 if (xtty != NULL) 1643 xtty->print(" unresolved='1'"); 1644 } else if (constants->tag_at(unloaded_class_index).is_symbol()) { 1645 class_name = constants->symbol_at(unloaded_class_index); 1646 } 1647 if (xtty != NULL) 1648 xtty->name(class_name); 1649 } 1650 if (xtty != NULL && trap_mdo != NULL && (int)reason < (int)MethodData::_trap_hist_limit) { 1651 // Dump the relevant MDO state. 1652 // This is the deopt count for the current reason, any previous 1653 // reasons or recompiles seen at this point. 1654 int dcnt = trap_mdo->trap_count(reason); 1655 if (dcnt != 0) 1656 xtty->print(" count='%d'", dcnt); 1657 ProfileData* pdata = trap_mdo->bci_to_data(trap_bci); 1658 int dos = (pdata == NULL)? 0: pdata->trap_state(); 1659 if (dos != 0) { 1660 xtty->print(" state='%s'", format_trap_state(buf, sizeof(buf), dos)); 1661 if (trap_state_is_recompiled(dos)) { 1662 int recnt2 = trap_mdo->overflow_recompile_count(); 1663 if (recnt2 != 0) 1664 xtty->print(" recompiles2='%d'", recnt2); 1665 } 1666 } 1667 } 1668 if (xtty != NULL) { 1669 xtty->stamp(); 1670 xtty->end_head(); 1671 } 1672 if (TraceDeoptimization) { // make noise on the tty 1673 tty->print("Uncommon trap occurred in"); 1674 nm->method()->print_short_name(tty); 1675 tty->print(" compiler=%s compile_id=%d", nm->compiler_name(), nm->compile_id()); 1676 #if INCLUDE_JVMCI 1677 if (nm->is_nmethod()) { 1678 char* installed_code_name = nm->as_nmethod()->jvmci_installed_code_name(buf, sizeof(buf)); 1679 if (installed_code_name != NULL) { 1680 tty->print(" (JVMCI: installed code name=%s) ", installed_code_name); 1681 } 1682 } 1683 #endif 1684 tty->print(" (@" INTPTR_FORMAT ") thread=" UINTX_FORMAT " reason=%s action=%s unloaded_class_index=%d" JVMCI_ONLY(" debug_id=%d"), 1685 p2i(fr.pc()), 1686 os::current_thread_id(), 1687 trap_reason_name(reason), 1688 trap_action_name(action), 1689 unloaded_class_index 1690 #if INCLUDE_JVMCI 1691 , debug_id 1692 #endif 1693 ); 1694 if (class_name != NULL) { 1695 tty->print(unresolved ? " unresolved class: " : " symbol: "); 1696 class_name->print_symbol_on(tty); 1697 } 1698 tty->cr(); 1699 } 1700 if (xtty != NULL) { 1701 // Log the precise location of the trap. 1702 for (ScopeDesc* sd = trap_scope; ; sd = sd->sender()) { 1703 xtty->begin_elem("jvms bci='%d'", sd->bci()); 1704 xtty->method(sd->method()); 1705 xtty->end_elem(); 1706 if (sd->is_top()) break; 1707 } 1708 xtty->tail("uncommon_trap"); 1709 } 1710 } 1711 // (End diagnostic printout.) 1712 1713 // Load class if necessary 1714 if (unloaded_class_index >= 0) { 1715 constantPoolHandle constants(THREAD, trap_method->constants()); 1716 load_class_by_index(constants, unloaded_class_index); 1717 } 1718 1719 // Flush the nmethod if necessary and desirable. 1720 // 1721 // We need to avoid situations where we are re-flushing the nmethod 1722 // because of a hot deoptimization site. Repeated flushes at the same 1723 // point need to be detected by the compiler and avoided. If the compiler 1724 // cannot avoid them (or has a bug and "refuses" to avoid them), this 1725 // module must take measures to avoid an infinite cycle of recompilation 1726 // and deoptimization. There are several such measures: 1727 // 1728 // 1. If a recompilation is ordered a second time at some site X 1729 // and for the same reason R, the action is adjusted to 'reinterpret', 1730 // to give the interpreter time to exercise the method more thoroughly. 1731 // If this happens, the method's overflow_recompile_count is incremented. 1732 // 1733 // 2. If the compiler fails to reduce the deoptimization rate, then 1734 // the method's overflow_recompile_count will begin to exceed the set 1735 // limit PerBytecodeRecompilationCutoff. If this happens, the action 1736 // is adjusted to 'make_not_compilable', and the method is abandoned 1737 // to the interpreter. This is a performance hit for hot methods, 1738 // but is better than a disastrous infinite cycle of recompilations. 1739 // (Actually, only the method containing the site X is abandoned.) 1740 // 1741 // 3. In parallel with the previous measures, if the total number of 1742 // recompilations of a method exceeds the much larger set limit 1743 // PerMethodRecompilationCutoff, the method is abandoned. 1744 // This should only happen if the method is very large and has 1745 // many "lukewarm" deoptimizations. The code which enforces this 1746 // limit is elsewhere (class nmethod, class Method). 1747 // 1748 // Note that the per-BCI 'is_recompiled' bit gives the compiler one chance 1749 // to recompile at each bytecode independently of the per-BCI cutoff. 1750 // 1751 // The decision to update code is up to the compiler, and is encoded 1752 // in the Action_xxx code. If the compiler requests Action_none 1753 // no trap state is changed, no compiled code is changed, and the 1754 // computation suffers along in the interpreter. 1755 // 1756 // The other action codes specify various tactics for decompilation 1757 // and recompilation. Action_maybe_recompile is the loosest, and 1758 // allows the compiled code to stay around until enough traps are seen, 1759 // and until the compiler gets around to recompiling the trapping method. 1760 // 1761 // The other actions cause immediate removal of the present code. 1762 1763 // Traps caused by injected profile shouldn't pollute trap counts. 1764 bool injected_profile_trap = trap_method->has_injected_profile() && 1765 (reason == Reason_intrinsic || reason == Reason_unreached); 1766 1767 bool update_trap_state = (reason != Reason_tenured) && !injected_profile_trap; 1768 bool make_not_entrant = false; 1769 bool make_not_compilable = false; 1770 bool reprofile = false; 1771 switch (action) { 1772 case Action_none: 1773 // Keep the old code. 1774 update_trap_state = false; 1775 break; 1776 case Action_maybe_recompile: 1777 // Do not need to invalidate the present code, but we can 1778 // initiate another 1779 // Start compiler without (necessarily) invalidating the nmethod. 1780 // The system will tolerate the old code, but new code should be 1781 // generated when possible. 1782 break; 1783 case Action_reinterpret: 1784 // Go back into the interpreter for a while, and then consider 1785 // recompiling form scratch. 1786 make_not_entrant = true; 1787 // Reset invocation counter for outer most method. 1788 // This will allow the interpreter to exercise the bytecodes 1789 // for a while before recompiling. 1790 // By contrast, Action_make_not_entrant is immediate. 1791 // 1792 // Note that the compiler will track null_check, null_assert, 1793 // range_check, and class_check events and log them as if they 1794 // had been traps taken from compiled code. This will update 1795 // the MDO trap history so that the next compilation will 1796 // properly detect hot trap sites. 1797 reprofile = true; 1798 break; 1799 case Action_make_not_entrant: 1800 // Request immediate recompilation, and get rid of the old code. 1801 // Make them not entrant, so next time they are called they get 1802 // recompiled. Unloaded classes are loaded now so recompile before next 1803 // time they are called. Same for uninitialized. The interpreter will 1804 // link the missing class, if any. 1805 make_not_entrant = true; 1806 break; 1807 case Action_make_not_compilable: 1808 // Give up on compiling this method at all. 1809 make_not_entrant = true; 1810 make_not_compilable = true; 1811 break; 1812 default: 1813 ShouldNotReachHere(); 1814 } 1815 1816 // Setting +ProfileTraps fixes the following, on all platforms: 1817 // 4852688: ProfileInterpreter is off by default for ia64. The result is 1818 // infinite heroic-opt-uncommon-trap/deopt/recompile cycles, since the 1819 // recompile relies on a MethodData* to record heroic opt failures. 1820 1821 // Whether the interpreter is producing MDO data or not, we also need 1822 // to use the MDO to detect hot deoptimization points and control 1823 // aggressive optimization. 1824 bool inc_recompile_count = false; 1825 ProfileData* pdata = NULL; 1826 if (ProfileTraps && !is_client_compilation_mode_vm() && update_trap_state && trap_mdo != NULL) { 1827 assert(trap_mdo == get_method_data(thread, profiled_method, false), "sanity"); 1828 uint this_trap_count = 0; 1829 bool maybe_prior_trap = false; 1830 bool maybe_prior_recompile = false; 1831 pdata = query_update_method_data(trap_mdo, trap_bci, reason, true, 1832 #if INCLUDE_JVMCI 1833 nm->is_compiled_by_jvmci() && nm->is_osr_method(), 1834 #endif 1835 nm->method(), 1836 //outputs: 1837 this_trap_count, 1838 maybe_prior_trap, 1839 maybe_prior_recompile); 1840 // Because the interpreter also counts null, div0, range, and class 1841 // checks, these traps from compiled code are double-counted. 1842 // This is harmless; it just means that the PerXTrapLimit values 1843 // are in effect a little smaller than they look. 1844 1845 DeoptReason per_bc_reason = reason_recorded_per_bytecode_if_any(reason); 1846 if (per_bc_reason != Reason_none) { 1847 // Now take action based on the partially known per-BCI history. 1848 if (maybe_prior_trap 1849 && this_trap_count >= (uint)PerBytecodeTrapLimit) { 1850 // If there are too many traps at this BCI, force a recompile. 1851 // This will allow the compiler to see the limit overflow, and 1852 // take corrective action, if possible. The compiler generally 1853 // does not use the exact PerBytecodeTrapLimit value, but instead 1854 // changes its tactics if it sees any traps at all. This provides 1855 // a little hysteresis, delaying a recompile until a trap happens 1856 // several times. 1857 // 1858 // Actually, since there is only one bit of counter per BCI, 1859 // the possible per-BCI counts are {0,1,(per-method count)}. 1860 // This produces accurate results if in fact there is only 1861 // one hot trap site, but begins to get fuzzy if there are 1862 // many sites. For example, if there are ten sites each 1863 // trapping two or more times, they each get the blame for 1864 // all of their traps. 1865 make_not_entrant = true; 1866 } 1867 1868 // Detect repeated recompilation at the same BCI, and enforce a limit. 1869 if (make_not_entrant && maybe_prior_recompile) { 1870 // More than one recompile at this point. 1871 inc_recompile_count = maybe_prior_trap; 1872 } 1873 } else { 1874 // For reasons which are not recorded per-bytecode, we simply 1875 // force recompiles unconditionally. 1876 // (Note that PerMethodRecompilationCutoff is enforced elsewhere.) 1877 make_not_entrant = true; 1878 } 1879 1880 // Go back to the compiler if there are too many traps in this method. 1881 if (this_trap_count >= per_method_trap_limit(reason)) { 1882 // If there are too many traps in this method, force a recompile. 1883 // This will allow the compiler to see the limit overflow, and 1884 // take corrective action, if possible. 1885 // (This condition is an unlikely backstop only, because the 1886 // PerBytecodeTrapLimit is more likely to take effect first, 1887 // if it is applicable.) 1888 make_not_entrant = true; 1889 } 1890 1891 // Here's more hysteresis: If there has been a recompile at 1892 // this trap point already, run the method in the interpreter 1893 // for a while to exercise it more thoroughly. 1894 if (make_not_entrant && maybe_prior_recompile && maybe_prior_trap) { 1895 reprofile = true; 1896 } 1897 } 1898 1899 // Take requested actions on the method: 1900 1901 // Recompile 1902 if (make_not_entrant) { 1903 if (!nm->make_not_entrant()) { 1904 return; // the call did not change nmethod's state 1905 } 1906 1907 if (pdata != NULL) { 1908 // Record the recompilation event, if any. 1909 int tstate0 = pdata->trap_state(); 1910 int tstate1 = trap_state_set_recompiled(tstate0, true); 1911 if (tstate1 != tstate0) 1912 pdata->set_trap_state(tstate1); 1913 } 1914 1915 #if INCLUDE_RTM_OPT 1916 // Restart collecting RTM locking abort statistic if the method 1917 // is recompiled for a reason other than RTM state change. 1918 // Assume that in new recompiled code the statistic could be different, 1919 // for example, due to different inlining. 1920 if ((reason != Reason_rtm_state_change) && (trap_mdo != NULL) && 1921 UseRTMDeopt && (nm->as_nmethod()->rtm_state() != ProfileRTM)) { 1922 trap_mdo->atomic_set_rtm_state(ProfileRTM); 1923 } 1924 #endif 1925 // For code aging we count traps separately here, using make_not_entrant() 1926 // as a guard against simultaneous deopts in multiple threads. 1927 if (reason == Reason_tenured && trap_mdo != NULL) { 1928 trap_mdo->inc_tenure_traps(); 1929 } 1930 } 1931 1932 if (inc_recompile_count) { 1933 trap_mdo->inc_overflow_recompile_count(); 1934 if ((uint)trap_mdo->overflow_recompile_count() > 1935 (uint)PerBytecodeRecompilationCutoff) { 1936 // Give up on the method containing the bad BCI. 1937 if (trap_method() == nm->method()) { 1938 make_not_compilable = true; 1939 } else { 1940 trap_method->set_not_compilable(CompLevel_full_optimization, true, "overflow_recompile_count > PerBytecodeRecompilationCutoff"); 1941 // But give grace to the enclosing nm->method(). 1942 } 1943 } 1944 } 1945 1946 // Reprofile 1947 if (reprofile) { 1948 CompilationPolicy::policy()->reprofile(trap_scope, nm->is_osr_method()); 1949 } 1950 1951 // Give up compiling 1952 if (make_not_compilable && !nm->method()->is_not_compilable(CompLevel_full_optimization)) { 1953 assert(make_not_entrant, "consistent"); 1954 nm->method()->set_not_compilable(CompLevel_full_optimization); 1955 } 1956 1957 } // Free marked resources 1958 1959 } 1960 JRT_END 1961 1962 ProfileData* 1963 Deoptimization::query_update_method_data(MethodData* trap_mdo, 1964 int trap_bci, 1965 Deoptimization::DeoptReason reason, 1966 bool update_total_trap_count, 1967 #if INCLUDE_JVMCI 1968 bool is_osr, 1969 #endif 1970 Method* compiled_method, 1971 //outputs: 1972 uint& ret_this_trap_count, 1973 bool& ret_maybe_prior_trap, 1974 bool& ret_maybe_prior_recompile) { 1975 bool maybe_prior_trap = false; 1976 bool maybe_prior_recompile = false; 1977 uint this_trap_count = 0; 1978 if (update_total_trap_count) { 1979 uint idx = reason; 1980 #if INCLUDE_JVMCI 1981 if (is_osr) { 1982 idx += Reason_LIMIT; 1983 } 1984 #endif 1985 uint prior_trap_count = trap_mdo->trap_count(idx); 1986 this_trap_count = trap_mdo->inc_trap_count(idx); 1987 1988 // If the runtime cannot find a place to store trap history, 1989 // it is estimated based on the general condition of the method. 1990 // If the method has ever been recompiled, or has ever incurred 1991 // a trap with the present reason , then this BCI is assumed 1992 // (pessimistically) to be the culprit. 1993 maybe_prior_trap = (prior_trap_count != 0); 1994 maybe_prior_recompile = (trap_mdo->decompile_count() != 0); 1995 } 1996 ProfileData* pdata = NULL; 1997 1998 1999 // For reasons which are recorded per bytecode, we check per-BCI data. 2000 DeoptReason per_bc_reason = reason_recorded_per_bytecode_if_any(reason); 2001 assert(per_bc_reason != Reason_none || update_total_trap_count, "must be"); 2002 if (per_bc_reason != Reason_none) { 2003 // Find the profile data for this BCI. If there isn't one, 2004 // try to allocate one from the MDO's set of spares. 2005 // This will let us detect a repeated trap at this point. 2006 pdata = trap_mdo->allocate_bci_to_data(trap_bci, reason_is_speculate(reason) ? compiled_method : NULL); 2007 2008 if (pdata != NULL) { 2009 if (reason_is_speculate(reason) && !pdata->is_SpeculativeTrapData()) { 2010 if (LogCompilation && xtty != NULL) { 2011 ttyLocker ttyl; 2012 // no more room for speculative traps in this MDO 2013 xtty->elem("speculative_traps_oom"); 2014 } 2015 } 2016 // Query the trap state of this profile datum. 2017 int tstate0 = pdata->trap_state(); 2018 if (!trap_state_has_reason(tstate0, per_bc_reason)) 2019 maybe_prior_trap = false; 2020 if (!trap_state_is_recompiled(tstate0)) 2021 maybe_prior_recompile = false; 2022 2023 // Update the trap state of this profile datum. 2024 int tstate1 = tstate0; 2025 // Record the reason. 2026 tstate1 = trap_state_add_reason(tstate1, per_bc_reason); 2027 // Store the updated state on the MDO, for next time. 2028 if (tstate1 != tstate0) 2029 pdata->set_trap_state(tstate1); 2030 } else { 2031 if (LogCompilation && xtty != NULL) { 2032 ttyLocker ttyl; 2033 // Missing MDP? Leave a small complaint in the log. 2034 xtty->elem("missing_mdp bci='%d'", trap_bci); 2035 } 2036 } 2037 } 2038 2039 // Return results: 2040 ret_this_trap_count = this_trap_count; 2041 ret_maybe_prior_trap = maybe_prior_trap; 2042 ret_maybe_prior_recompile = maybe_prior_recompile; 2043 return pdata; 2044 } 2045 2046 void 2047 Deoptimization::update_method_data_from_interpreter(MethodData* trap_mdo, int trap_bci, int reason) { 2048 ResourceMark rm; 2049 // Ignored outputs: 2050 uint ignore_this_trap_count; 2051 bool ignore_maybe_prior_trap; 2052 bool ignore_maybe_prior_recompile; 2053 assert(!reason_is_speculate(reason), "reason speculate only used by compiler"); 2054 // JVMCI uses the total counts to determine if deoptimizations are happening too frequently -> do not adjust total counts 2055 bool update_total_counts = JVMCI_ONLY(false) NOT_JVMCI(true); 2056 query_update_method_data(trap_mdo, trap_bci, 2057 (DeoptReason)reason, 2058 update_total_counts, 2059 #if INCLUDE_JVMCI 2060 false, 2061 #endif 2062 NULL, 2063 ignore_this_trap_count, 2064 ignore_maybe_prior_trap, 2065 ignore_maybe_prior_recompile); 2066 } 2067 2068 Deoptimization::UnrollBlock* Deoptimization::uncommon_trap(JavaThread* thread, jint trap_request, jint exec_mode) { 2069 if (TraceDeoptimization) { 2070 tty->print("Uncommon trap "); 2071 } 2072 // Still in Java no safepoints 2073 { 2074 // This enters VM and may safepoint 2075 uncommon_trap_inner(thread, trap_request); 2076 } 2077 return fetch_unroll_info_helper(thread, exec_mode); 2078 } 2079 2080 // Local derived constants. 2081 // Further breakdown of DataLayout::trap_state, as promised by DataLayout. 2082 const int DS_REASON_MASK = DataLayout::trap_mask >> 1; 2083 const int DS_RECOMPILE_BIT = DataLayout::trap_mask - DS_REASON_MASK; 2084 2085 //---------------------------trap_state_reason--------------------------------- 2086 Deoptimization::DeoptReason 2087 Deoptimization::trap_state_reason(int trap_state) { 2088 // This assert provides the link between the width of DataLayout::trap_bits 2089 // and the encoding of "recorded" reasons. It ensures there are enough 2090 // bits to store all needed reasons in the per-BCI MDO profile. 2091 assert(DS_REASON_MASK >= Reason_RECORDED_LIMIT, "enough bits"); 2092 int recompile_bit = (trap_state & DS_RECOMPILE_BIT); 2093 trap_state -= recompile_bit; 2094 if (trap_state == DS_REASON_MASK) { 2095 return Reason_many; 2096 } else { 2097 assert((int)Reason_none == 0, "state=0 => Reason_none"); 2098 return (DeoptReason)trap_state; 2099 } 2100 } 2101 //-------------------------trap_state_has_reason------------------------------- 2102 int Deoptimization::trap_state_has_reason(int trap_state, int reason) { 2103 assert(reason_is_recorded_per_bytecode((DeoptReason)reason), "valid reason"); 2104 assert(DS_REASON_MASK >= Reason_RECORDED_LIMIT, "enough bits"); 2105 int recompile_bit = (trap_state & DS_RECOMPILE_BIT); 2106 trap_state -= recompile_bit; 2107 if (trap_state == DS_REASON_MASK) { 2108 return -1; // true, unspecifically (bottom of state lattice) 2109 } else if (trap_state == reason) { 2110 return 1; // true, definitely 2111 } else if (trap_state == 0) { 2112 return 0; // false, definitely (top of state lattice) 2113 } else { 2114 return 0; // false, definitely 2115 } 2116 } 2117 //-------------------------trap_state_add_reason------------------------------- 2118 int Deoptimization::trap_state_add_reason(int trap_state, int reason) { 2119 assert(reason_is_recorded_per_bytecode((DeoptReason)reason) || reason == Reason_many, "valid reason"); 2120 int recompile_bit = (trap_state & DS_RECOMPILE_BIT); 2121 trap_state -= recompile_bit; 2122 if (trap_state == DS_REASON_MASK) { 2123 return trap_state + recompile_bit; // already at state lattice bottom 2124 } else if (trap_state == reason) { 2125 return trap_state + recompile_bit; // the condition is already true 2126 } else if (trap_state == 0) { 2127 return reason + recompile_bit; // no condition has yet been true 2128 } else { 2129 return DS_REASON_MASK + recompile_bit; // fall to state lattice bottom 2130 } 2131 } 2132 //-----------------------trap_state_is_recompiled------------------------------ 2133 bool Deoptimization::trap_state_is_recompiled(int trap_state) { 2134 return (trap_state & DS_RECOMPILE_BIT) != 0; 2135 } 2136 //-----------------------trap_state_set_recompiled----------------------------- 2137 int Deoptimization::trap_state_set_recompiled(int trap_state, bool z) { 2138 if (z) return trap_state | DS_RECOMPILE_BIT; 2139 else return trap_state & ~DS_RECOMPILE_BIT; 2140 } 2141 //---------------------------format_trap_state--------------------------------- 2142 // This is used for debugging and diagnostics, including LogFile output. 2143 const char* Deoptimization::format_trap_state(char* buf, size_t buflen, 2144 int trap_state) { 2145 assert(buflen > 0, "sanity"); 2146 DeoptReason reason = trap_state_reason(trap_state); 2147 bool recomp_flag = trap_state_is_recompiled(trap_state); 2148 // Re-encode the state from its decoded components. 2149 int decoded_state = 0; 2150 if (reason_is_recorded_per_bytecode(reason) || reason == Reason_many) 2151 decoded_state = trap_state_add_reason(decoded_state, reason); 2152 if (recomp_flag) 2153 decoded_state = trap_state_set_recompiled(decoded_state, recomp_flag); 2154 // If the state re-encodes properly, format it symbolically. 2155 // Because this routine is used for debugging and diagnostics, 2156 // be robust even if the state is a strange value. 2157 size_t len; 2158 if (decoded_state != trap_state) { 2159 // Random buggy state that doesn't decode?? 2160 len = jio_snprintf(buf, buflen, "#%d", trap_state); 2161 } else { 2162 len = jio_snprintf(buf, buflen, "%s%s", 2163 trap_reason_name(reason), 2164 recomp_flag ? " recompiled" : ""); 2165 } 2166 return buf; 2167 } 2168 2169 2170 //--------------------------------statics-------------------------------------- 2171 const char* Deoptimization::_trap_reason_name[] = { 2172 // Note: Keep this in sync. with enum DeoptReason. 2173 "none", 2174 "null_check", 2175 "null_assert" JVMCI_ONLY("_or_unreached0"), 2176 "range_check", 2177 "class_check", 2178 "array_check", 2179 "intrinsic" JVMCI_ONLY("_or_type_checked_inlining"), 2180 "bimorphic" JVMCI_ONLY("_or_optimized_type_check"), 2181 "unloaded", 2182 "uninitialized", 2183 "unreached", 2184 "unhandled", 2185 "constraint", 2186 "div0_check", 2187 "age", 2188 "predicate", 2189 "loop_limit_check", 2190 "speculate_class_check", 2191 "speculate_null_check", 2192 "speculate_null_assert", 2193 "rtm_state_change", 2194 "unstable_if", 2195 "unstable_fused_if", 2196 #if INCLUDE_JVMCI 2197 "aliasing", 2198 "transfer_to_interpreter", 2199 "not_compiled_exception_handler", 2200 "unresolved", 2201 "jsr_mismatch", 2202 #endif 2203 "tenured" 2204 }; 2205 const char* Deoptimization::_trap_action_name[] = { 2206 // Note: Keep this in sync. with enum DeoptAction. 2207 "none", 2208 "maybe_recompile", 2209 "reinterpret", 2210 "make_not_entrant", 2211 "make_not_compilable" 2212 }; 2213 2214 const char* Deoptimization::trap_reason_name(int reason) { 2215 // Check that every reason has a name 2216 STATIC_ASSERT(sizeof(_trap_reason_name)/sizeof(const char*) == Reason_LIMIT); 2217 2218 if (reason == Reason_many) return "many"; 2219 if ((uint)reason < Reason_LIMIT) 2220 return _trap_reason_name[reason]; 2221 static char buf[20]; 2222 sprintf(buf, "reason%d", reason); 2223 return buf; 2224 } 2225 const char* Deoptimization::trap_action_name(int action) { 2226 // Check that every action has a name 2227 STATIC_ASSERT(sizeof(_trap_action_name)/sizeof(const char*) == Action_LIMIT); 2228 2229 if ((uint)action < Action_LIMIT) 2230 return _trap_action_name[action]; 2231 static char buf[20]; 2232 sprintf(buf, "action%d", action); 2233 return buf; 2234 } 2235 2236 // This is used for debugging and diagnostics, including LogFile output. 2237 const char* Deoptimization::format_trap_request(char* buf, size_t buflen, 2238 int trap_request) { 2239 jint unloaded_class_index = trap_request_index(trap_request); 2240 const char* reason = trap_reason_name(trap_request_reason(trap_request)); 2241 const char* action = trap_action_name(trap_request_action(trap_request)); 2242 #if INCLUDE_JVMCI 2243 int debug_id = trap_request_debug_id(trap_request); 2244 #endif 2245 size_t len; 2246 if (unloaded_class_index < 0) { 2247 len = jio_snprintf(buf, buflen, "reason='%s' action='%s'" JVMCI_ONLY(" debug_id='%d'"), 2248 reason, action 2249 #if INCLUDE_JVMCI 2250 ,debug_id 2251 #endif 2252 ); 2253 } else { 2254 len = jio_snprintf(buf, buflen, "reason='%s' action='%s' index='%d'" JVMCI_ONLY(" debug_id='%d'"), 2255 reason, action, unloaded_class_index 2256 #if INCLUDE_JVMCI 2257 ,debug_id 2258 #endif 2259 ); 2260 } 2261 return buf; 2262 } 2263 2264 juint Deoptimization::_deoptimization_hist 2265 [Deoptimization::Reason_LIMIT] 2266 [1 + Deoptimization::Action_LIMIT] 2267 [Deoptimization::BC_CASE_LIMIT] 2268 = {0}; 2269 2270 enum { 2271 LSB_BITS = 8, 2272 LSB_MASK = right_n_bits(LSB_BITS) 2273 }; 2274 2275 void Deoptimization::gather_statistics(DeoptReason reason, DeoptAction action, 2276 Bytecodes::Code bc) { 2277 assert(reason >= 0 && reason < Reason_LIMIT, "oob"); 2278 assert(action >= 0 && action < Action_LIMIT, "oob"); 2279 _deoptimization_hist[Reason_none][0][0] += 1; // total 2280 _deoptimization_hist[reason][0][0] += 1; // per-reason total 2281 juint* cases = _deoptimization_hist[reason][1+action]; 2282 juint* bc_counter_addr = NULL; 2283 juint bc_counter = 0; 2284 // Look for an unused counter, or an exact match to this BC. 2285 if (bc != Bytecodes::_illegal) { 2286 for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) { 2287 juint* counter_addr = &cases[bc_case]; 2288 juint counter = *counter_addr; 2289 if ((counter == 0 && bc_counter_addr == NULL) 2290 || (Bytecodes::Code)(counter & LSB_MASK) == bc) { 2291 // this counter is either free or is already devoted to this BC 2292 bc_counter_addr = counter_addr; 2293 bc_counter = counter | bc; 2294 } 2295 } 2296 } 2297 if (bc_counter_addr == NULL) { 2298 // Overflow, or no given bytecode. 2299 bc_counter_addr = &cases[BC_CASE_LIMIT-1]; 2300 bc_counter = (*bc_counter_addr & ~LSB_MASK); // clear LSB 2301 } 2302 *bc_counter_addr = bc_counter + (1 << LSB_BITS); 2303 } 2304 2305 jint Deoptimization::total_deoptimization_count() { 2306 return _deoptimization_hist[Reason_none][0][0]; 2307 } 2308 2309 jint Deoptimization::deoptimization_count(DeoptReason reason) { 2310 assert(reason >= 0 && reason < Reason_LIMIT, "oob"); 2311 return _deoptimization_hist[reason][0][0]; 2312 } 2313 2314 void Deoptimization::print_statistics() { 2315 juint total = total_deoptimization_count(); 2316 juint account = total; 2317 if (total != 0) { 2318 ttyLocker ttyl; 2319 if (xtty != NULL) xtty->head("statistics type='deoptimization'"); 2320 tty->print_cr("Deoptimization traps recorded:"); 2321 #define PRINT_STAT_LINE(name, r) \ 2322 tty->print_cr(" %4d (%4.1f%%) %s", (int)(r), ((r) * 100.0) / total, name); 2323 PRINT_STAT_LINE("total", total); 2324 // For each non-zero entry in the histogram, print the reason, 2325 // the action, and (if specifically known) the type of bytecode. 2326 for (int reason = 0; reason < Reason_LIMIT; reason++) { 2327 for (int action = 0; action < Action_LIMIT; action++) { 2328 juint* cases = _deoptimization_hist[reason][1+action]; 2329 for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) { 2330 juint counter = cases[bc_case]; 2331 if (counter != 0) { 2332 char name[1*K]; 2333 Bytecodes::Code bc = (Bytecodes::Code)(counter & LSB_MASK); 2334 if (bc_case == BC_CASE_LIMIT && (int)bc == 0) 2335 bc = Bytecodes::_illegal; 2336 sprintf(name, "%s/%s/%s", 2337 trap_reason_name(reason), 2338 trap_action_name(action), 2339 Bytecodes::is_defined(bc)? Bytecodes::name(bc): "other"); 2340 juint r = counter >> LSB_BITS; 2341 tty->print_cr(" %40s: " UINT32_FORMAT " (%.1f%%)", name, r, (r * 100.0) / total); 2342 account -= r; 2343 } 2344 } 2345 } 2346 } 2347 if (account != 0) { 2348 PRINT_STAT_LINE("unaccounted", account); 2349 } 2350 #undef PRINT_STAT_LINE 2351 if (xtty != NULL) xtty->tail("statistics"); 2352 } 2353 } 2354 #else // COMPILER2_OR_JVMCI 2355 2356 2357 // Stubs for C1 only system. 2358 bool Deoptimization::trap_state_is_recompiled(int trap_state) { 2359 return false; 2360 } 2361 2362 const char* Deoptimization::trap_reason_name(int reason) { 2363 return "unknown"; 2364 } 2365 2366 void Deoptimization::print_statistics() { 2367 // no output 2368 } 2369 2370 void 2371 Deoptimization::update_method_data_from_interpreter(MethodData* trap_mdo, int trap_bci, int reason) { 2372 // no udpate 2373 } 2374 2375 int Deoptimization::trap_state_has_reason(int trap_state, int reason) { 2376 return 0; 2377 } 2378 2379 void Deoptimization::gather_statistics(DeoptReason reason, DeoptAction action, 2380 Bytecodes::Code bc) { 2381 // no update 2382 } 2383 2384 const char* Deoptimization::format_trap_state(char* buf, size_t buflen, 2385 int trap_state) { 2386 jio_snprintf(buf, buflen, "#%d", trap_state); 2387 return buf; 2388 } 2389 2390 #endif // COMPILER2_OR_JVMCI