1 /* 2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/systemDictionary.hpp" 27 #include "code/codeCache.hpp" 28 #include "code/debugInfoRec.hpp" 29 #include "code/nmethod.hpp" 30 #include "code/pcDesc.hpp" 31 #include "code/scopeDesc.hpp" 32 #include "interpreter/bytecode.hpp" 33 #include "interpreter/interpreter.hpp" 34 #include "interpreter/oopMapCache.hpp" 35 #include "memory/allocation.inline.hpp" 36 #include "memory/oopFactory.hpp" 37 #include "memory/resourceArea.hpp" 38 #include "oops/method.hpp" 39 #include "oops/objArrayOop.inline.hpp" 40 #include "oops/typeArrayOop.inline.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "oops/fieldStreams.hpp" 43 #include "oops/verifyOopClosure.hpp" 44 #include "prims/jvmtiThreadState.hpp" 45 #include "runtime/biasedLocking.hpp" 46 #include "runtime/compilationPolicy.hpp" 47 #include "runtime/deoptimization.hpp" 48 #include "runtime/interfaceSupport.hpp" 49 #include "runtime/sharedRuntime.hpp" 50 #include "runtime/signature.hpp" 51 #include "runtime/stubRoutines.hpp" 52 #include "runtime/thread.hpp" 53 #include "runtime/vframe.hpp" 54 #include "runtime/vframeArray.hpp" 55 #include "runtime/vframe_hp.hpp" 56 #include "utilities/events.hpp" 57 #include "utilities/xmlstream.hpp" 58 59 #if INCLUDE_JVMCI 60 #include "jvmci/jvmciRuntime.hpp" 61 #include "jvmci/jvmciJavaClasses.hpp" 62 #endif 63 64 65 bool DeoptimizationMarker::_is_active = false; 66 67 Deoptimization::UnrollBlock::UnrollBlock(int size_of_deoptimized_frame, 68 int caller_adjustment, 69 int caller_actual_parameters, 70 int number_of_frames, 71 intptr_t* frame_sizes, 72 address* frame_pcs, 73 BasicType return_type, 74 int exec_mode) { 75 _size_of_deoptimized_frame = size_of_deoptimized_frame; 76 _caller_adjustment = caller_adjustment; 77 _caller_actual_parameters = caller_actual_parameters; 78 _number_of_frames = number_of_frames; 79 _frame_sizes = frame_sizes; 80 _frame_pcs = frame_pcs; 81 _register_block = NEW_C_HEAP_ARRAY(intptr_t, RegisterMap::reg_count * 2, mtCompiler); 82 _return_type = return_type; 83 _initial_info = 0; 84 // PD (x86 only) 85 _counter_temp = 0; 86 _unpack_kind = exec_mode; 87 _sender_sp_temp = 0; 88 89 _total_frame_sizes = size_of_frames(); 90 assert(exec_mode >= 0 && exec_mode < Unpack_LIMIT, "Unexpected exec_mode"); 91 } 92 93 94 Deoptimization::UnrollBlock::~UnrollBlock() { 95 FREE_C_HEAP_ARRAY(intptr_t, _frame_sizes); 96 FREE_C_HEAP_ARRAY(intptr_t, _frame_pcs); 97 FREE_C_HEAP_ARRAY(intptr_t, _register_block); 98 } 99 100 101 intptr_t* Deoptimization::UnrollBlock::value_addr_at(int register_number) const { 102 assert(register_number < RegisterMap::reg_count, "checking register number"); 103 return &_register_block[register_number * 2]; 104 } 105 106 107 108 int Deoptimization::UnrollBlock::size_of_frames() const { 109 // Acount first for the adjustment of the initial frame 110 int result = _caller_adjustment; 111 for (int index = 0; index < number_of_frames(); index++) { 112 result += frame_sizes()[index]; 113 } 114 return result; 115 } 116 117 118 void Deoptimization::UnrollBlock::print() { 119 ttyLocker ttyl; 120 tty->print_cr("UnrollBlock"); 121 tty->print_cr(" size_of_deoptimized_frame = %d", _size_of_deoptimized_frame); 122 tty->print( " frame_sizes: "); 123 for (int index = 0; index < number_of_frames(); index++) { 124 tty->print(INTX_FORMAT " ", frame_sizes()[index]); 125 } 126 tty->cr(); 127 } 128 129 130 // In order to make fetch_unroll_info work properly with escape 131 // analysis, The method was changed from JRT_LEAF to JRT_BLOCK_ENTRY and 132 // ResetNoHandleMark and HandleMark were removed from it. The actual reallocation 133 // of previously eliminated objects occurs in realloc_objects, which is 134 // called from the method fetch_unroll_info_helper below. 135 JRT_BLOCK_ENTRY(Deoptimization::UnrollBlock*, Deoptimization::fetch_unroll_info(JavaThread* thread, int exec_mode)) 136 // It is actually ok to allocate handles in a leaf method. It causes no safepoints, 137 // but makes the entry a little slower. There is however a little dance we have to 138 // do in debug mode to get around the NoHandleMark code in the JRT_LEAF macro 139 140 // fetch_unroll_info() is called at the beginning of the deoptimization 141 // handler. Note this fact before we start generating temporary frames 142 // that can confuse an asynchronous stack walker. This counter is 143 // decremented at the end of unpack_frames(). 144 if (TraceDeoptimization) { 145 tty->print_cr("Deoptimizing thread " INTPTR_FORMAT, p2i(thread)); 146 } 147 thread->inc_in_deopt_handler(); 148 149 return fetch_unroll_info_helper(thread, exec_mode); 150 JRT_END 151 152 153 // This is factored, since it is both called from a JRT_LEAF (deoptimization) and a JRT_ENTRY (uncommon_trap) 154 Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread* thread, int exec_mode) { 155 156 // Note: there is a safepoint safety issue here. No matter whether we enter 157 // via vanilla deopt or uncommon trap we MUST NOT stop at a safepoint once 158 // the vframeArray is created. 159 // 160 161 // Allocate our special deoptimization ResourceMark 162 DeoptResourceMark* dmark = new DeoptResourceMark(thread); 163 assert(thread->deopt_mark() == NULL, "Pending deopt!"); 164 thread->set_deopt_mark(dmark); 165 166 frame stub_frame = thread->last_frame(); // Makes stack walkable as side effect 167 RegisterMap map(thread, true); 168 RegisterMap dummy_map(thread, false); 169 // Now get the deoptee with a valid map 170 frame deoptee = stub_frame.sender(&map); 171 // Set the deoptee nmethod 172 assert(thread->deopt_compiled_method() == NULL, "Pending deopt!"); 173 CompiledMethod* cm = deoptee.cb()->as_compiled_method_or_null(); 174 thread->set_deopt_compiled_method(cm); 175 176 if (VerifyStack) { 177 thread->validate_frame_layout(); 178 } 179 180 // Create a growable array of VFrames where each VFrame represents an inlined 181 // Java frame. This storage is allocated with the usual system arena. 182 assert(deoptee.is_compiled_frame(), "Wrong frame type"); 183 GrowableArray<compiledVFrame*>* chunk = new GrowableArray<compiledVFrame*>(10); 184 vframe* vf = vframe::new_vframe(&deoptee, &map, thread); 185 while (!vf->is_top()) { 186 assert(vf->is_compiled_frame(), "Wrong frame type"); 187 chunk->push(compiledVFrame::cast(vf)); 188 vf = vf->sender(); 189 } 190 assert(vf->is_compiled_frame(), "Wrong frame type"); 191 chunk->push(compiledVFrame::cast(vf)); 192 193 bool realloc_failures = false; 194 195 #if defined(COMPILER2) || INCLUDE_JVMCI 196 // Reallocate the non-escaping objects and restore their fields. Then 197 // relock objects if synchronization on them was eliminated. 198 #ifndef INCLUDE_JVMCI 199 if (DoEscapeAnalysis || EliminateNestedLocks) { 200 if (EliminateAllocations) { 201 #endif // INCLUDE_JVMCI 202 assert (chunk->at(0)->scope() != NULL,"expect only compiled java frames"); 203 GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects(); 204 205 // The flag return_oop() indicates call sites which return oop 206 // in compiled code. Such sites include java method calls, 207 // runtime calls (for example, used to allocate new objects/arrays 208 // on slow code path) and any other calls generated in compiled code. 209 // It is not guaranteed that we can get such information here only 210 // by analyzing bytecode in deoptimized frames. This is why this flag 211 // is set during method compilation (see Compile::Process_OopMap_Node()). 212 // If the previous frame was popped or if we are dispatching an exception, 213 // we don't have an oop result. 214 bool save_oop_result = chunk->at(0)->scope()->return_oop() && !thread->popframe_forcing_deopt_reexecution() && (exec_mode == Unpack_deopt); 215 Handle return_value; 216 if (save_oop_result) { 217 // Reallocation may trigger GC. If deoptimization happened on return from 218 // call which returns oop we need to save it since it is not in oopmap. 219 oop result = deoptee.saved_oop_result(&map); 220 assert(result == NULL || result->is_oop(), "must be oop"); 221 return_value = Handle(thread, result); 222 assert(Universe::heap()->is_in_or_null(result), "must be heap pointer"); 223 if (TraceDeoptimization) { 224 ttyLocker ttyl; 225 tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, p2i(result), p2i(thread)); 226 } 227 } 228 if (objects != NULL) { 229 JRT_BLOCK 230 realloc_failures = realloc_objects(thread, &deoptee, objects, THREAD); 231 JRT_END 232 bool skip_internal = (cm != NULL) && !cm->is_compiled_by_jvmci(); 233 reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal); 234 #ifndef PRODUCT 235 if (TraceDeoptimization) { 236 ttyLocker ttyl; 237 tty->print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, p2i(thread)); 238 print_objects(objects, realloc_failures); 239 } 240 #endif 241 } 242 if (save_oop_result) { 243 // Restore result. 244 deoptee.set_saved_oop_result(&map, return_value()); 245 } 246 #ifndef INCLUDE_JVMCI 247 } 248 if (EliminateLocks) { 249 #endif // INCLUDE_JVMCI 250 #ifndef PRODUCT 251 bool first = true; 252 #endif 253 for (int i = 0; i < chunk->length(); i++) { 254 compiledVFrame* cvf = chunk->at(i); 255 assert (cvf->scope() != NULL,"expect only compiled java frames"); 256 GrowableArray<MonitorInfo*>* monitors = cvf->monitors(); 257 if (monitors->is_nonempty()) { 258 relock_objects(monitors, thread, realloc_failures); 259 #ifndef PRODUCT 260 if (PrintDeoptimizationDetails) { 261 ttyLocker ttyl; 262 for (int j = 0; j < monitors->length(); j++) { 263 MonitorInfo* mi = monitors->at(j); 264 if (mi->eliminated()) { 265 if (first) { 266 first = false; 267 tty->print_cr("RELOCK OBJECTS in thread " INTPTR_FORMAT, p2i(thread)); 268 } 269 if (mi->owner_is_scalar_replaced()) { 270 Klass* k = java_lang_Class::as_Klass(mi->owner_klass()); 271 tty->print_cr(" failed reallocation for klass %s", k->external_name()); 272 } else { 273 tty->print_cr(" object <" INTPTR_FORMAT "> locked", p2i(mi->owner())); 274 } 275 } 276 } 277 } 278 #endif // !PRODUCT 279 } 280 } 281 #ifndef INCLUDE_JVMCI 282 } 283 } 284 #endif // INCLUDE_JVMCI 285 #endif // COMPILER2 || INCLUDE_JVMCI 286 287 ScopeDesc* trap_scope = chunk->at(0)->scope(); 288 Handle exceptionObject; 289 if (trap_scope->rethrow_exception()) { 290 if (PrintDeoptimizationDetails) { 291 tty->print_cr("Exception to be rethrown in the interpreter for method %s::%s at bci %d", trap_scope->method()->method_holder()->name()->as_C_string(), trap_scope->method()->name()->as_C_string(), trap_scope->bci()); 292 } 293 GrowableArray<ScopeValue*>* expressions = trap_scope->expressions(); 294 guarantee(expressions != NULL && expressions->length() > 0, "must have exception to throw"); 295 ScopeValue* topOfStack = expressions->top(); 296 exceptionObject = StackValue::create_stack_value(&deoptee, &map, topOfStack)->get_obj(); 297 guarantee(exceptionObject() != NULL, "exception oop can not be null"); 298 } 299 300 // Ensure that no safepoint is taken after pointers have been stored 301 // in fields of rematerialized objects. If a safepoint occurs from here on 302 // out the java state residing in the vframeArray will be missed. 303 NoSafepointVerifier no_safepoint; 304 305 vframeArray* array = create_vframeArray(thread, deoptee, &map, chunk, realloc_failures); 306 #if defined(COMPILER2) || INCLUDE_JVMCI 307 if (realloc_failures) { 308 pop_frames_failed_reallocs(thread, array); 309 } 310 #endif 311 312 assert(thread->vframe_array_head() == NULL, "Pending deopt!"); 313 thread->set_vframe_array_head(array); 314 315 // Now that the vframeArray has been created if we have any deferred local writes 316 // added by jvmti then we can free up that structure as the data is now in the 317 // vframeArray 318 319 if (thread->deferred_locals() != NULL) { 320 GrowableArray<jvmtiDeferredLocalVariableSet*>* list = thread->deferred_locals(); 321 int i = 0; 322 do { 323 // Because of inlining we could have multiple vframes for a single frame 324 // and several of the vframes could have deferred writes. Find them all. 325 if (list->at(i)->id() == array->original().id()) { 326 jvmtiDeferredLocalVariableSet* dlv = list->at(i); 327 list->remove_at(i); 328 // individual jvmtiDeferredLocalVariableSet are CHeapObj's 329 delete dlv; 330 } else { 331 i++; 332 } 333 } while ( i < list->length() ); 334 if (list->length() == 0) { 335 thread->set_deferred_locals(NULL); 336 // free the list and elements back to C heap. 337 delete list; 338 } 339 340 } 341 342 #ifndef SHARK 343 // Compute the caller frame based on the sender sp of stub_frame and stored frame sizes info. 344 CodeBlob* cb = stub_frame.cb(); 345 // Verify we have the right vframeArray 346 assert(cb->frame_size() >= 0, "Unexpected frame size"); 347 intptr_t* unpack_sp = stub_frame.sp() + cb->frame_size(); 348 349 // If the deopt call site is a MethodHandle invoke call site we have 350 // to adjust the unpack_sp. 351 nmethod* deoptee_nm = deoptee.cb()->as_nmethod_or_null(); 352 if (deoptee_nm != NULL && deoptee_nm->is_method_handle_return(deoptee.pc())) 353 unpack_sp = deoptee.unextended_sp(); 354 355 #ifdef ASSERT 356 assert(cb->is_deoptimization_stub() || 357 cb->is_uncommon_trap_stub() || 358 strcmp("Stub<DeoptimizationStub.deoptimizationHandler>", cb->name()) == 0 || 359 strcmp("Stub<UncommonTrapStub.uncommonTrapHandler>", cb->name()) == 0, 360 "unexpected code blob: %s", cb->name()); 361 #endif 362 #else 363 intptr_t* unpack_sp = stub_frame.sender(&dummy_map).unextended_sp(); 364 #endif // !SHARK 365 366 // This is a guarantee instead of an assert because if vframe doesn't match 367 // we will unpack the wrong deoptimized frame and wind up in strange places 368 // where it will be very difficult to figure out what went wrong. Better 369 // to die an early death here than some very obscure death later when the 370 // trail is cold. 371 // Note: on ia64 this guarantee can be fooled by frames with no memory stack 372 // in that it will fail to detect a problem when there is one. This needs 373 // more work in tiger timeframe. 374 guarantee(array->unextended_sp() == unpack_sp, "vframe_array_head must contain the vframeArray to unpack"); 375 376 int number_of_frames = array->frames(); 377 378 // Compute the vframes' sizes. Note that frame_sizes[] entries are ordered from outermost to innermost 379 // virtual activation, which is the reverse of the elements in the vframes array. 380 intptr_t* frame_sizes = NEW_C_HEAP_ARRAY(intptr_t, number_of_frames, mtCompiler); 381 // +1 because we always have an interpreter return address for the final slot. 382 address* frame_pcs = NEW_C_HEAP_ARRAY(address, number_of_frames + 1, mtCompiler); 383 int popframe_extra_args = 0; 384 // Create an interpreter return address for the stub to use as its return 385 // address so the skeletal frames are perfectly walkable 386 frame_pcs[number_of_frames] = Interpreter::deopt_entry(vtos, 0); 387 388 // PopFrame requires that the preserved incoming arguments from the recently-popped topmost 389 // activation be put back on the expression stack of the caller for reexecution 390 if (JvmtiExport::can_pop_frame() && thread->popframe_forcing_deopt_reexecution()) { 391 popframe_extra_args = in_words(thread->popframe_preserved_args_size_in_words()); 392 } 393 394 // Find the current pc for sender of the deoptee. Since the sender may have been deoptimized 395 // itself since the deoptee vframeArray was created we must get a fresh value of the pc rather 396 // than simply use array->sender.pc(). This requires us to walk the current set of frames 397 // 398 frame deopt_sender = stub_frame.sender(&dummy_map); // First is the deoptee frame 399 deopt_sender = deopt_sender.sender(&dummy_map); // Now deoptee caller 400 401 // It's possible that the number of parameters at the call site is 402 // different than number of arguments in the callee when method 403 // handles are used. If the caller is interpreted get the real 404 // value so that the proper amount of space can be added to it's 405 // frame. 406 bool caller_was_method_handle = false; 407 if (deopt_sender.is_interpreted_frame()) { 408 methodHandle method = deopt_sender.interpreter_frame_method(); 409 Bytecode_invoke cur = Bytecode_invoke_check(method, deopt_sender.interpreter_frame_bci()); 410 if (cur.is_invokedynamic() || cur.is_invokehandle()) { 411 // Method handle invokes may involve fairly arbitrary chains of 412 // calls so it's impossible to know how much actual space the 413 // caller has for locals. 414 caller_was_method_handle = true; 415 } 416 } 417 418 // 419 // frame_sizes/frame_pcs[0] oldest frame (int or c2i) 420 // frame_sizes/frame_pcs[1] next oldest frame (int) 421 // frame_sizes/frame_pcs[n] youngest frame (int) 422 // 423 // Now a pc in frame_pcs is actually the return address to the frame's caller (a frame 424 // owns the space for the return address to it's caller). Confusing ain't it. 425 // 426 // The vframe array can address vframes with indices running from 427 // 0.._frames-1. Index 0 is the youngest frame and _frame - 1 is the oldest (root) frame. 428 // When we create the skeletal frames we need the oldest frame to be in the zero slot 429 // in the frame_sizes/frame_pcs so the assembly code can do a trivial walk. 430 // so things look a little strange in this loop. 431 // 432 int callee_parameters = 0; 433 int callee_locals = 0; 434 for (int index = 0; index < array->frames(); index++ ) { 435 // frame[number_of_frames - 1 ] = on_stack_size(youngest) 436 // frame[number_of_frames - 2 ] = on_stack_size(sender(youngest)) 437 // frame[number_of_frames - 3 ] = on_stack_size(sender(sender(youngest))) 438 frame_sizes[number_of_frames - 1 - index] = BytesPerWord * array->element(index)->on_stack_size(callee_parameters, 439 callee_locals, 440 index == 0, 441 popframe_extra_args); 442 // This pc doesn't have to be perfect just good enough to identify the frame 443 // as interpreted so the skeleton frame will be walkable 444 // The correct pc will be set when the skeleton frame is completely filled out 445 // The final pc we store in the loop is wrong and will be overwritten below 446 frame_pcs[number_of_frames - 1 - index ] = Interpreter::deopt_entry(vtos, 0) - frame::pc_return_offset; 447 448 callee_parameters = array->element(index)->method()->size_of_parameters(); 449 callee_locals = array->element(index)->method()->max_locals(); 450 popframe_extra_args = 0; 451 } 452 453 // Compute whether the root vframe returns a float or double value. 454 BasicType return_type; 455 { 456 methodHandle method(thread, array->element(0)->method()); 457 Bytecode_invoke invoke = Bytecode_invoke_check(method, array->element(0)->bci()); 458 return_type = invoke.is_valid() ? invoke.result_type() : T_ILLEGAL; 459 } 460 461 // Compute information for handling adapters and adjusting the frame size of the caller. 462 int caller_adjustment = 0; 463 464 // Compute the amount the oldest interpreter frame will have to adjust 465 // its caller's stack by. If the caller is a compiled frame then 466 // we pretend that the callee has no parameters so that the 467 // extension counts for the full amount of locals and not just 468 // locals-parms. This is because without a c2i adapter the parm 469 // area as created by the compiled frame will not be usable by 470 // the interpreter. (Depending on the calling convention there 471 // may not even be enough space). 472 473 // QQQ I'd rather see this pushed down into last_frame_adjust 474 // and have it take the sender (aka caller). 475 476 if (deopt_sender.is_compiled_frame() || caller_was_method_handle) { 477 caller_adjustment = last_frame_adjust(0, callee_locals); 478 } else if (callee_locals > callee_parameters) { 479 // The caller frame may need extending to accommodate 480 // non-parameter locals of the first unpacked interpreted frame. 481 // Compute that adjustment. 482 caller_adjustment = last_frame_adjust(callee_parameters, callee_locals); 483 } 484 485 // If the sender is deoptimized the we must retrieve the address of the handler 486 // since the frame will "magically" show the original pc before the deopt 487 // and we'd undo the deopt. 488 489 frame_pcs[0] = deopt_sender.raw_pc(); 490 491 #ifndef SHARK 492 assert(CodeCache::find_blob_unsafe(frame_pcs[0]) != NULL, "bad pc"); 493 #endif // SHARK 494 495 #ifdef INCLUDE_JVMCI 496 if (exceptionObject() != NULL) { 497 thread->set_exception_oop(exceptionObject()); 498 exec_mode = Unpack_exception; 499 } 500 #endif 501 502 if (thread->frames_to_pop_failed_realloc() > 0 && exec_mode != Unpack_uncommon_trap) { 503 assert(thread->has_pending_exception(), "should have thrown OOME"); 504 thread->set_exception_oop(thread->pending_exception()); 505 thread->clear_pending_exception(); 506 exec_mode = Unpack_exception; 507 } 508 509 #if INCLUDE_JVMCI 510 if (thread->frames_to_pop_failed_realloc() > 0) { 511 thread->set_pending_monitorenter(false); 512 } 513 #endif 514 515 UnrollBlock* info = new UnrollBlock(array->frame_size() * BytesPerWord, 516 caller_adjustment * BytesPerWord, 517 caller_was_method_handle ? 0 : callee_parameters, 518 number_of_frames, 519 frame_sizes, 520 frame_pcs, 521 return_type, 522 exec_mode); 523 // On some platforms, we need a way to pass some platform dependent 524 // information to the unpacking code so the skeletal frames come out 525 // correct (initial fp value, unextended sp, ...) 526 info->set_initial_info((intptr_t) array->sender().initial_deoptimization_info()); 527 528 if (array->frames() > 1) { 529 if (VerifyStack && TraceDeoptimization) { 530 ttyLocker ttyl; 531 tty->print_cr("Deoptimizing method containing inlining"); 532 } 533 } 534 535 array->set_unroll_block(info); 536 return info; 537 } 538 539 // Called to cleanup deoptimization data structures in normal case 540 // after unpacking to stack and when stack overflow error occurs 541 void Deoptimization::cleanup_deopt_info(JavaThread *thread, 542 vframeArray *array) { 543 544 // Get array if coming from exception 545 if (array == NULL) { 546 array = thread->vframe_array_head(); 547 } 548 thread->set_vframe_array_head(NULL); 549 550 // Free the previous UnrollBlock 551 vframeArray* old_array = thread->vframe_array_last(); 552 thread->set_vframe_array_last(array); 553 554 if (old_array != NULL) { 555 UnrollBlock* old_info = old_array->unroll_block(); 556 old_array->set_unroll_block(NULL); 557 delete old_info; 558 delete old_array; 559 } 560 561 // Deallocate any resource creating in this routine and any ResourceObjs allocated 562 // inside the vframeArray (StackValueCollections) 563 564 delete thread->deopt_mark(); 565 thread->set_deopt_mark(NULL); 566 thread->set_deopt_compiled_method(NULL); 567 568 569 if (JvmtiExport::can_pop_frame()) { 570 #ifndef CC_INTERP 571 // Regardless of whether we entered this routine with the pending 572 // popframe condition bit set, we should always clear it now 573 thread->clear_popframe_condition(); 574 #else 575 // C++ interpreter will clear has_pending_popframe when it enters 576 // with method_resume. For deopt_resume2 we clear it now. 577 if (thread->popframe_forcing_deopt_reexecution()) 578 thread->clear_popframe_condition(); 579 #endif /* CC_INTERP */ 580 } 581 582 // unpack_frames() is called at the end of the deoptimization handler 583 // and (in C2) at the end of the uncommon trap handler. Note this fact 584 // so that an asynchronous stack walker can work again. This counter is 585 // incremented at the beginning of fetch_unroll_info() and (in C2) at 586 // the beginning of uncommon_trap(). 587 thread->dec_in_deopt_handler(); 588 } 589 590 // Moved from cpu directories because none of the cpus has callee save values. 591 // If a cpu implements callee save values, move this to deoptimization_<cpu>.cpp. 592 void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) { 593 594 // This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in 595 // the days we had adapter frames. When we deoptimize a situation where a 596 // compiled caller calls a compiled caller will have registers it expects 597 // to survive the call to the callee. If we deoptimize the callee the only 598 // way we can restore these registers is to have the oldest interpreter 599 // frame that we create restore these values. That is what this routine 600 // will accomplish. 601 602 // At the moment we have modified c2 to not have any callee save registers 603 // so this problem does not exist and this routine is just a place holder. 604 605 assert(f->is_interpreted_frame(), "must be interpreted"); 606 } 607 608 // Return BasicType of value being returned 609 JRT_LEAF(BasicType, Deoptimization::unpack_frames(JavaThread* thread, int exec_mode)) 610 611 // We are already active int he special DeoptResourceMark any ResourceObj's we 612 // allocate will be freed at the end of the routine. 613 614 // It is actually ok to allocate handles in a leaf method. It causes no safepoints, 615 // but makes the entry a little slower. There is however a little dance we have to 616 // do in debug mode to get around the NoHandleMark code in the JRT_LEAF macro 617 ResetNoHandleMark rnhm; // No-op in release/product versions 618 HandleMark hm; 619 620 frame stub_frame = thread->last_frame(); 621 622 // Since the frame to unpack is the top frame of this thread, the vframe_array_head 623 // must point to the vframeArray for the unpack frame. 624 vframeArray* array = thread->vframe_array_head(); 625 626 #ifndef PRODUCT 627 if (TraceDeoptimization) { 628 ttyLocker ttyl; 629 tty->print_cr("DEOPT UNPACKING thread " INTPTR_FORMAT " vframeArray " INTPTR_FORMAT " mode %d", 630 p2i(thread), p2i(array), exec_mode); 631 } 632 #endif 633 Events::log(thread, "DEOPT UNPACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT " mode %d", 634 p2i(stub_frame.pc()), p2i(stub_frame.sp()), exec_mode); 635 636 UnrollBlock* info = array->unroll_block(); 637 638 // Unpack the interpreter frames and any adapter frame (c2 only) we might create. 639 array->unpack_to_stack(stub_frame, exec_mode, info->caller_actual_parameters()); 640 641 BasicType bt = info->return_type(); 642 643 // If we have an exception pending, claim that the return type is an oop 644 // so the deopt_blob does not overwrite the exception_oop. 645 646 if (exec_mode == Unpack_exception) 647 bt = T_OBJECT; 648 649 // Cleanup thread deopt data 650 cleanup_deopt_info(thread, array); 651 652 #ifndef PRODUCT 653 if (VerifyStack) { 654 ResourceMark res_mark; 655 656 thread->validate_frame_layout(); 657 658 // Verify that the just-unpacked frames match the interpreter's 659 // notions of expression stack and locals 660 vframeArray* cur_array = thread->vframe_array_last(); 661 RegisterMap rm(thread, false); 662 rm.set_include_argument_oops(false); 663 bool is_top_frame = true; 664 int callee_size_of_parameters = 0; 665 int callee_max_locals = 0; 666 for (int i = 0; i < cur_array->frames(); i++) { 667 vframeArrayElement* el = cur_array->element(i); 668 frame* iframe = el->iframe(); 669 guarantee(iframe->is_interpreted_frame(), "Wrong frame type"); 670 671 // Get the oop map for this bci 672 InterpreterOopMap mask; 673 int cur_invoke_parameter_size = 0; 674 bool try_next_mask = false; 675 int next_mask_expression_stack_size = -1; 676 int top_frame_expression_stack_adjustment = 0; 677 methodHandle mh(thread, iframe->interpreter_frame_method()); 678 OopMapCache::compute_one_oop_map(mh, iframe->interpreter_frame_bci(), &mask); 679 BytecodeStream str(mh); 680 str.set_start(iframe->interpreter_frame_bci()); 681 int max_bci = mh->code_size(); 682 // Get to the next bytecode if possible 683 assert(str.bci() < max_bci, "bci in interpreter frame out of bounds"); 684 // Check to see if we can grab the number of outgoing arguments 685 // at an uncommon trap for an invoke (where the compiler 686 // generates debug info before the invoke has executed) 687 Bytecodes::Code cur_code = str.next(); 688 if (cur_code == Bytecodes::_invokevirtual || 689 cur_code == Bytecodes::_invokespecial || 690 cur_code == Bytecodes::_invokestatic || 691 cur_code == Bytecodes::_invokeinterface || 692 cur_code == Bytecodes::_invokedynamic) { 693 Bytecode_invoke invoke(mh, iframe->interpreter_frame_bci()); 694 Symbol* signature = invoke.signature(); 695 ArgumentSizeComputer asc(signature); 696 cur_invoke_parameter_size = asc.size(); 697 if (invoke.has_receiver()) { 698 // Add in receiver 699 ++cur_invoke_parameter_size; 700 } 701 if (i != 0 && !invoke.is_invokedynamic() && MethodHandles::has_member_arg(invoke.klass(), invoke.name())) { 702 callee_size_of_parameters++; 703 } 704 } 705 if (str.bci() < max_bci) { 706 Bytecodes::Code bc = str.next(); 707 if (bc >= 0) { 708 // The interpreter oop map generator reports results before 709 // the current bytecode has executed except in the case of 710 // calls. It seems to be hard to tell whether the compiler 711 // has emitted debug information matching the "state before" 712 // a given bytecode or the state after, so we try both 713 switch (cur_code) { 714 case Bytecodes::_invokevirtual: 715 case Bytecodes::_invokespecial: 716 case Bytecodes::_invokestatic: 717 case Bytecodes::_invokeinterface: 718 case Bytecodes::_invokedynamic: 719 case Bytecodes::_athrow: 720 break; 721 default: { 722 InterpreterOopMap next_mask; 723 OopMapCache::compute_one_oop_map(mh, str.bci(), &next_mask); 724 next_mask_expression_stack_size = next_mask.expression_stack_size(); 725 // Need to subtract off the size of the result type of 726 // the bytecode because this is not described in the 727 // debug info but returned to the interpreter in the TOS 728 // caching register 729 BasicType bytecode_result_type = Bytecodes::result_type(cur_code); 730 if (bytecode_result_type != T_ILLEGAL) { 731 top_frame_expression_stack_adjustment = type2size[bytecode_result_type]; 732 } 733 assert(top_frame_expression_stack_adjustment >= 0, ""); 734 try_next_mask = true; 735 break; 736 } 737 } 738 } 739 } 740 741 // Verify stack depth and oops in frame 742 // This assertion may be dependent on the platform we're running on and may need modification (tested on x86 and sparc) 743 if (!( 744 /* SPARC */ 745 (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + callee_size_of_parameters) || 746 /* x86 */ 747 (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + callee_max_locals) || 748 (try_next_mask && 749 (iframe->interpreter_frame_expression_stack_size() == (next_mask_expression_stack_size - 750 top_frame_expression_stack_adjustment))) || 751 (is_top_frame && (exec_mode == Unpack_exception) && iframe->interpreter_frame_expression_stack_size() == 0) || 752 (is_top_frame && (exec_mode == Unpack_uncommon_trap || exec_mode == Unpack_reexecute || el->should_reexecute()) && 753 (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + cur_invoke_parameter_size)) 754 )) { 755 ttyLocker ttyl; 756 757 // Print out some information that will help us debug the problem 758 tty->print_cr("Wrong number of expression stack elements during deoptimization"); 759 tty->print_cr(" Error occurred while verifying frame %d (0..%d, 0 is topmost)", i, cur_array->frames() - 1); 760 tty->print_cr(" Fabricated interpreter frame had %d expression stack elements", 761 iframe->interpreter_frame_expression_stack_size()); 762 tty->print_cr(" Interpreter oop map had %d expression stack elements", mask.expression_stack_size()); 763 tty->print_cr(" try_next_mask = %d", try_next_mask); 764 tty->print_cr(" next_mask_expression_stack_size = %d", next_mask_expression_stack_size); 765 tty->print_cr(" callee_size_of_parameters = %d", callee_size_of_parameters); 766 tty->print_cr(" callee_max_locals = %d", callee_max_locals); 767 tty->print_cr(" top_frame_expression_stack_adjustment = %d", top_frame_expression_stack_adjustment); 768 tty->print_cr(" exec_mode = %d", exec_mode); 769 tty->print_cr(" cur_invoke_parameter_size = %d", cur_invoke_parameter_size); 770 tty->print_cr(" Thread = " INTPTR_FORMAT ", thread ID = %d", p2i(thread), thread->osthread()->thread_id()); 771 tty->print_cr(" Interpreted frames:"); 772 for (int k = 0; k < cur_array->frames(); k++) { 773 vframeArrayElement* el = cur_array->element(k); 774 tty->print_cr(" %s (bci %d)", el->method()->name_and_sig_as_C_string(), el->bci()); 775 } 776 cur_array->print_on_2(tty); 777 guarantee(false, "wrong number of expression stack elements during deopt"); 778 } 779 VerifyOopClosure verify; 780 iframe->oops_interpreted_do(&verify, &rm, false); 781 callee_size_of_parameters = mh->size_of_parameters(); 782 callee_max_locals = mh->max_locals(); 783 is_top_frame = false; 784 } 785 } 786 #endif /* !PRODUCT */ 787 788 789 return bt; 790 JRT_END 791 792 793 int Deoptimization::deoptimize_dependents() { 794 Threads::deoptimized_wrt_marked_nmethods(); 795 return 0; 796 } 797 798 Deoptimization::DeoptAction Deoptimization::_unloaded_action 799 = Deoptimization::Action_reinterpret; 800 801 #if defined(COMPILER2) || INCLUDE_JVMCI 802 bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, GrowableArray<ScopeValue*>* objects, TRAPS) { 803 Handle pending_exception(THREAD, thread->pending_exception()); 804 const char* exception_file = thread->exception_file(); 805 int exception_line = thread->exception_line(); 806 thread->clear_pending_exception(); 807 808 bool failures = false; 809 810 for (int i = 0; i < objects->length(); i++) { 811 assert(objects->at(i)->is_object(), "invalid debug information"); 812 ObjectValue* sv = (ObjectValue*) objects->at(i); 813 814 Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()()); 815 oop obj = NULL; 816 817 if (k->is_instance_klass()) { 818 InstanceKlass* ik = InstanceKlass::cast(k); 819 obj = ik->allocate_instance(THREAD); 820 } else if (k->is_typeArray_klass()) { 821 TypeArrayKlass* ak = TypeArrayKlass::cast(k); 822 assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length"); 823 int len = sv->field_size() / type2size[ak->element_type()]; 824 obj = ak->allocate(len, THREAD); 825 } else if (k->is_objArray_klass()) { 826 ObjArrayKlass* ak = ObjArrayKlass::cast(k); 827 obj = ak->allocate(sv->field_size(), THREAD); 828 } 829 830 if (obj == NULL) { 831 failures = true; 832 } 833 834 assert(sv->value().is_null(), "redundant reallocation"); 835 assert(obj != NULL || HAS_PENDING_EXCEPTION, "allocation should succeed or we should get an exception"); 836 CLEAR_PENDING_EXCEPTION; 837 sv->set_value(obj); 838 } 839 840 if (failures) { 841 THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures); 842 } else if (pending_exception.not_null()) { 843 thread->set_pending_exception(pending_exception(), exception_file, exception_line); 844 } 845 846 return failures; 847 } 848 849 // restore elements of an eliminated type array 850 void Deoptimization::reassign_type_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, typeArrayOop obj, BasicType type) { 851 int index = 0; 852 intptr_t val; 853 854 for (int i = 0; i < sv->field_size(); i++) { 855 StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i)); 856 switch(type) { 857 case T_LONG: case T_DOUBLE: { 858 assert(value->type() == T_INT, "Agreement."); 859 StackValue* low = 860 StackValue::create_stack_value(fr, reg_map, sv->field_at(++i)); 861 #ifdef _LP64 862 jlong res = (jlong)low->get_int(); 863 #else 864 #ifdef SPARC 865 // For SPARC we have to swap high and low words. 866 jlong res = jlong_from((jint)low->get_int(), (jint)value->get_int()); 867 #else 868 jlong res = jlong_from((jint)value->get_int(), (jint)low->get_int()); 869 #endif //SPARC 870 #endif 871 obj->long_at_put(index, res); 872 break; 873 } 874 875 // Have to cast to INT (32 bits) pointer to avoid little/big-endian problem. 876 case T_INT: case T_FLOAT: { // 4 bytes. 877 assert(value->type() == T_INT, "Agreement."); 878 bool big_value = false; 879 if (i + 1 < sv->field_size() && type == T_INT) { 880 if (sv->field_at(i)->is_location()) { 881 Location::Type type = ((LocationValue*) sv->field_at(i))->location().type(); 882 if (type == Location::dbl || type == Location::lng) { 883 big_value = true; 884 } 885 } else if (sv->field_at(i)->is_constant_int()) { 886 ScopeValue* next_scope_field = sv->field_at(i + 1); 887 if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) { 888 big_value = true; 889 } 890 } 891 } 892 893 if (big_value) { 894 StackValue* low = StackValue::create_stack_value(fr, reg_map, sv->field_at(++i)); 895 #ifdef _LP64 896 jlong res = (jlong)low->get_int(); 897 #else 898 #ifdef SPARC 899 // For SPARC we have to swap high and low words. 900 jlong res = jlong_from((jint)low->get_int(), (jint)value->get_int()); 901 #else 902 jlong res = jlong_from((jint)value->get_int(), (jint)low->get_int()); 903 #endif //SPARC 904 #endif 905 obj->int_at_put(index, (jint)*((jint*)&res)); 906 obj->int_at_put(++index, (jint)*(((jint*)&res) + 1)); 907 } else { 908 val = value->get_int(); 909 obj->int_at_put(index, (jint)*((jint*)&val)); 910 } 911 break; 912 } 913 914 case T_SHORT: 915 assert(value->type() == T_INT, "Agreement."); 916 val = value->get_int(); 917 obj->short_at_put(index, (jshort)*((jint*)&val)); 918 break; 919 920 case T_CHAR: 921 assert(value->type() == T_INT, "Agreement."); 922 val = value->get_int(); 923 obj->char_at_put(index, (jchar)*((jint*)&val)); 924 break; 925 926 case T_BYTE: 927 assert(value->type() == T_INT, "Agreement."); 928 val = value->get_int(); 929 obj->byte_at_put(index, (jbyte)*((jint*)&val)); 930 break; 931 932 case T_BOOLEAN: 933 assert(value->type() == T_INT, "Agreement."); 934 val = value->get_int(); 935 obj->bool_at_put(index, (jboolean)*((jint*)&val)); 936 break; 937 938 default: 939 ShouldNotReachHere(); 940 } 941 index++; 942 } 943 } 944 945 946 // restore fields of an eliminated object array 947 void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) { 948 for (int i = 0; i < sv->field_size(); i++) { 949 StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i)); 950 assert(value->type() == T_OBJECT, "object element expected"); 951 obj->obj_at_put(i, value->get_obj()()); 952 } 953 } 954 955 class ReassignedField { 956 public: 957 int _offset; 958 BasicType _type; 959 public: 960 ReassignedField() { 961 _offset = 0; 962 _type = T_ILLEGAL; 963 } 964 }; 965 966 int compare(ReassignedField* left, ReassignedField* right) { 967 return left->_offset - right->_offset; 968 } 969 970 // Restore fields of an eliminated instance object using the same field order 971 // returned by HotSpotResolvedObjectTypeImpl.getInstanceFields(true) 972 static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool skip_internal) { 973 if (klass->superklass() != NULL) { 974 svIndex = reassign_fields_by_klass(klass->superklass(), fr, reg_map, sv, svIndex, obj, skip_internal); 975 } 976 977 GrowableArray<ReassignedField>* fields = new GrowableArray<ReassignedField>(); 978 for (AllFieldStream fs(klass); !fs.done(); fs.next()) { 979 if (!fs.access_flags().is_static() && (!skip_internal || !fs.access_flags().is_internal())) { 980 ReassignedField field; 981 field._offset = fs.offset(); 982 field._type = FieldType::basic_type(fs.signature()); 983 fields->append(field); 984 } 985 } 986 fields->sort(compare); 987 for (int i = 0; i < fields->length(); i++) { 988 intptr_t val; 989 ScopeValue* scope_field = sv->field_at(svIndex); 990 StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field); 991 int offset = fields->at(i)._offset; 992 BasicType type = fields->at(i)._type; 993 switch (type) { 994 case T_OBJECT: case T_ARRAY: 995 assert(value->type() == T_OBJECT, "Agreement."); 996 obj->obj_field_put(offset, value->get_obj()()); 997 break; 998 999 // Have to cast to INT (32 bits) pointer to avoid little/big-endian problem. 1000 case T_INT: case T_FLOAT: { // 4 bytes. 1001 assert(value->type() == T_INT, "Agreement."); 1002 bool big_value = false; 1003 if (i+1 < fields->length() && fields->at(i+1)._type == T_INT) { 1004 if (scope_field->is_location()) { 1005 Location::Type type = ((LocationValue*) scope_field)->location().type(); 1006 if (type == Location::dbl || type == Location::lng) { 1007 big_value = true; 1008 } 1009 } 1010 if (scope_field->is_constant_int()) { 1011 ScopeValue* next_scope_field = sv->field_at(svIndex + 1); 1012 if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) { 1013 big_value = true; 1014 } 1015 } 1016 } 1017 1018 if (big_value) { 1019 i++; 1020 assert(i < fields->length(), "second T_INT field needed"); 1021 assert(fields->at(i)._type == T_INT, "T_INT field needed"); 1022 } else { 1023 val = value->get_int(); 1024 obj->int_field_put(offset, (jint)*((jint*)&val)); 1025 break; 1026 } 1027 } 1028 /* no break */ 1029 1030 case T_LONG: case T_DOUBLE: { 1031 assert(value->type() == T_INT, "Agreement."); 1032 StackValue* low = StackValue::create_stack_value(fr, reg_map, sv->field_at(++svIndex)); 1033 #ifdef _LP64 1034 jlong res = (jlong)low->get_int(); 1035 #else 1036 #ifdef SPARC 1037 // For SPARC we have to swap high and low words. 1038 jlong res = jlong_from((jint)low->get_int(), (jint)value->get_int()); 1039 #else 1040 jlong res = jlong_from((jint)value->get_int(), (jint)low->get_int()); 1041 #endif //SPARC 1042 #endif 1043 obj->long_field_put(offset, res); 1044 break; 1045 } 1046 1047 case T_SHORT: 1048 assert(value->type() == T_INT, "Agreement."); 1049 val = value->get_int(); 1050 obj->short_field_put(offset, (jshort)*((jint*)&val)); 1051 break; 1052 1053 case T_CHAR: 1054 assert(value->type() == T_INT, "Agreement."); 1055 val = value->get_int(); 1056 obj->char_field_put(offset, (jchar)*((jint*)&val)); 1057 break; 1058 1059 case T_BYTE: 1060 assert(value->type() == T_INT, "Agreement."); 1061 val = value->get_int(); 1062 obj->byte_field_put(offset, (jbyte)*((jint*)&val)); 1063 break; 1064 1065 case T_BOOLEAN: 1066 assert(value->type() == T_INT, "Agreement."); 1067 val = value->get_int(); 1068 obj->bool_field_put(offset, (jboolean)*((jint*)&val)); 1069 break; 1070 1071 default: 1072 ShouldNotReachHere(); 1073 } 1074 svIndex++; 1075 } 1076 return svIndex; 1077 } 1078 1079 // restore fields of all eliminated objects and arrays 1080 void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures, bool skip_internal) { 1081 for (int i = 0; i < objects->length(); i++) { 1082 ObjectValue* sv = (ObjectValue*) objects->at(i); 1083 Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()()); 1084 Handle obj = sv->value(); 1085 assert(obj.not_null() || realloc_failures, "reallocation was missed"); 1086 if (PrintDeoptimizationDetails) { 1087 tty->print_cr("reassign fields for object of type %s!", k->name()->as_C_string()); 1088 } 1089 if (obj.is_null()) { 1090 continue; 1091 } 1092 1093 if (k->is_instance_klass()) { 1094 InstanceKlass* ik = InstanceKlass::cast(k); 1095 reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), skip_internal); 1096 } else if (k->is_typeArray_klass()) { 1097 TypeArrayKlass* ak = TypeArrayKlass::cast(k); 1098 reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type()); 1099 } else if (k->is_objArray_klass()) { 1100 reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj()); 1101 } 1102 } 1103 } 1104 1105 1106 // relock objects for which synchronization was eliminated 1107 void Deoptimization::relock_objects(GrowableArray<MonitorInfo*>* monitors, JavaThread* thread, bool realloc_failures) { 1108 for (int i = 0; i < monitors->length(); i++) { 1109 MonitorInfo* mon_info = monitors->at(i); 1110 if (mon_info->eliminated()) { 1111 assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed"); 1112 if (!mon_info->owner_is_scalar_replaced()) { 1113 Handle obj(thread, mon_info->owner()); 1114 markOop mark = obj->mark(); 1115 if (UseBiasedLocking && mark->has_bias_pattern()) { 1116 // New allocated objects may have the mark set to anonymously biased. 1117 // Also the deoptimized method may called methods with synchronization 1118 // where the thread-local object is bias locked to the current thread. 1119 assert(mark->is_biased_anonymously() || 1120 mark->biased_locker() == thread, "should be locked to current thread"); 1121 // Reset mark word to unbiased prototype. 1122 markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age()); 1123 obj->set_mark(unbiased_prototype); 1124 } 1125 BasicLock* lock = mon_info->lock(); 1126 ObjectSynchronizer::slow_enter(obj, lock, thread); 1127 assert(mon_info->owner()->is_locked(), "object must be locked now"); 1128 } 1129 } 1130 } 1131 } 1132 1133 1134 #ifndef PRODUCT 1135 // print information about reallocated objects 1136 void Deoptimization::print_objects(GrowableArray<ScopeValue*>* objects, bool realloc_failures) { 1137 fieldDescriptor fd; 1138 1139 for (int i = 0; i < objects->length(); i++) { 1140 ObjectValue* sv = (ObjectValue*) objects->at(i); 1141 Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()()); 1142 Handle obj = sv->value(); 1143 1144 tty->print(" object <" INTPTR_FORMAT "> of type ", p2i(sv->value()())); 1145 k->print_value(); 1146 assert(obj.not_null() || realloc_failures, "reallocation was missed"); 1147 if (obj.is_null()) { 1148 tty->print(" allocation failed"); 1149 } else { 1150 tty->print(" allocated (%d bytes)", obj->size() * HeapWordSize); 1151 } 1152 tty->cr(); 1153 1154 if (Verbose && !obj.is_null()) { 1155 k->oop_print_on(obj(), tty); 1156 } 1157 } 1158 } 1159 #endif 1160 #endif // COMPILER2 || INCLUDE_JVMCI 1161 1162 vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures) { 1163 Events::log(thread, "DEOPT PACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT, p2i(fr.pc()), p2i(fr.sp())); 1164 1165 #ifndef PRODUCT 1166 if (PrintDeoptimizationDetails) { 1167 ttyLocker ttyl; 1168 tty->print("DEOPT PACKING thread " INTPTR_FORMAT " ", p2i(thread)); 1169 fr.print_on(tty); 1170 tty->print_cr(" Virtual frames (innermost first):"); 1171 for (int index = 0; index < chunk->length(); index++) { 1172 compiledVFrame* vf = chunk->at(index); 1173 tty->print(" %2d - ", index); 1174 vf->print_value(); 1175 int bci = chunk->at(index)->raw_bci(); 1176 const char* code_name; 1177 if (bci == SynchronizationEntryBCI) { 1178 code_name = "sync entry"; 1179 } else { 1180 Bytecodes::Code code = vf->method()->code_at(bci); 1181 code_name = Bytecodes::name(code); 1182 } 1183 tty->print(" - %s", code_name); 1184 tty->print_cr(" @ bci %d ", bci); 1185 if (Verbose) { 1186 vf->print(); 1187 tty->cr(); 1188 } 1189 } 1190 } 1191 #endif 1192 1193 // Register map for next frame (used for stack crawl). We capture 1194 // the state of the deopt'ing frame's caller. Thus if we need to 1195 // stuff a C2I adapter we can properly fill in the callee-save 1196 // register locations. 1197 frame caller = fr.sender(reg_map); 1198 int frame_size = caller.sp() - fr.sp(); 1199 1200 frame sender = caller; 1201 1202 // Since the Java thread being deoptimized will eventually adjust it's own stack, 1203 // the vframeArray containing the unpacking information is allocated in the C heap. 1204 // For Compiler1, the caller of the deoptimized frame is saved for use by unpack_frames(). 1205 vframeArray* array = vframeArray::allocate(thread, frame_size, chunk, reg_map, sender, caller, fr, realloc_failures); 1206 1207 // Compare the vframeArray to the collected vframes 1208 assert(array->structural_compare(thread, chunk), "just checking"); 1209 1210 #ifndef PRODUCT 1211 if (PrintDeoptimizationDetails) { 1212 ttyLocker ttyl; 1213 tty->print_cr(" Created vframeArray " INTPTR_FORMAT, p2i(array)); 1214 } 1215 #endif // PRODUCT 1216 1217 return array; 1218 } 1219 1220 #if defined(COMPILER2) || INCLUDE_JVMCI 1221 void Deoptimization::pop_frames_failed_reallocs(JavaThread* thread, vframeArray* array) { 1222 // Reallocation of some scalar replaced objects failed. Record 1223 // that we need to pop all the interpreter frames for the 1224 // deoptimized compiled frame. 1225 assert(thread->frames_to_pop_failed_realloc() == 0, "missed frames to pop?"); 1226 thread->set_frames_to_pop_failed_realloc(array->frames()); 1227 // Unlock all monitors here otherwise the interpreter will see a 1228 // mix of locked and unlocked monitors (because of failed 1229 // reallocations of synchronized objects) and be confused. 1230 for (int i = 0; i < array->frames(); i++) { 1231 MonitorChunk* monitors = array->element(i)->monitors(); 1232 if (monitors != NULL) { 1233 for (int j = 0; j < monitors->number_of_monitors(); j++) { 1234 BasicObjectLock* src = monitors->at(j); 1235 if (src->obj() != NULL) { 1236 ObjectSynchronizer::fast_exit(src->obj(), src->lock(), thread); 1237 } 1238 } 1239 array->element(i)->free_monitors(thread); 1240 #ifdef ASSERT 1241 array->element(i)->set_removed_monitors(); 1242 #endif 1243 } 1244 } 1245 } 1246 #endif 1247 1248 static void collect_monitors(compiledVFrame* cvf, GrowableArray<Handle>* objects_to_revoke) { 1249 GrowableArray<MonitorInfo*>* monitors = cvf->monitors(); 1250 Thread* thread = Thread::current(); 1251 for (int i = 0; i < monitors->length(); i++) { 1252 MonitorInfo* mon_info = monitors->at(i); 1253 if (!mon_info->eliminated() && mon_info->owner() != NULL) { 1254 objects_to_revoke->append(Handle(thread, mon_info->owner())); 1255 } 1256 } 1257 } 1258 1259 1260 void Deoptimization::revoke_biases_of_monitors(JavaThread* thread, frame fr, RegisterMap* map) { 1261 if (!UseBiasedLocking) { 1262 return; 1263 } 1264 1265 GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>(); 1266 1267 // Unfortunately we don't have a RegisterMap available in most of 1268 // the places we want to call this routine so we need to walk the 1269 // stack again to update the register map. 1270 if (map == NULL || !map->update_map()) { 1271 StackFrameStream sfs(thread, true); 1272 bool found = false; 1273 while (!found && !sfs.is_done()) { 1274 frame* cur = sfs.current(); 1275 sfs.next(); 1276 found = cur->id() == fr.id(); 1277 } 1278 assert(found, "frame to be deoptimized not found on target thread's stack"); 1279 map = sfs.register_map(); 1280 } 1281 1282 vframe* vf = vframe::new_vframe(&fr, map, thread); 1283 compiledVFrame* cvf = compiledVFrame::cast(vf); 1284 // Revoke monitors' biases in all scopes 1285 while (!cvf->is_top()) { 1286 collect_monitors(cvf, objects_to_revoke); 1287 cvf = compiledVFrame::cast(cvf->sender()); 1288 } 1289 collect_monitors(cvf, objects_to_revoke); 1290 1291 if (SafepointSynchronize::is_at_safepoint()) { 1292 BiasedLocking::revoke_at_safepoint(objects_to_revoke); 1293 } else { 1294 BiasedLocking::revoke(objects_to_revoke); 1295 } 1296 } 1297 1298 1299 void Deoptimization::revoke_biases_of_monitors(CodeBlob* cb) { 1300 if (!UseBiasedLocking) { 1301 return; 1302 } 1303 1304 assert(SafepointSynchronize::is_at_safepoint(), "must only be called from safepoint"); 1305 GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>(); 1306 for (JavaThread* jt = Threads::first(); jt != NULL ; jt = jt->next()) { 1307 if (jt->has_last_Java_frame()) { 1308 StackFrameStream sfs(jt, true); 1309 while (!sfs.is_done()) { 1310 frame* cur = sfs.current(); 1311 if (cb->contains(cur->pc())) { 1312 vframe* vf = vframe::new_vframe(cur, sfs.register_map(), jt); 1313 compiledVFrame* cvf = compiledVFrame::cast(vf); 1314 // Revoke monitors' biases in all scopes 1315 while (!cvf->is_top()) { 1316 collect_monitors(cvf, objects_to_revoke); 1317 cvf = compiledVFrame::cast(cvf->sender()); 1318 } 1319 collect_monitors(cvf, objects_to_revoke); 1320 } 1321 sfs.next(); 1322 } 1323 } 1324 } 1325 BiasedLocking::revoke_at_safepoint(objects_to_revoke); 1326 } 1327 1328 1329 void Deoptimization::deoptimize_single_frame(JavaThread* thread, frame fr, Deoptimization::DeoptReason reason) { 1330 assert(fr.can_be_deoptimized(), "checking frame type"); 1331 1332 gather_statistics(reason, Action_none, Bytecodes::_illegal); 1333 1334 if (LogCompilation && xtty != NULL) { 1335 CompiledMethod* cm = fr.cb()->as_compiled_method_or_null(); 1336 assert(cm != NULL, "only compiled methods can deopt"); 1337 1338 ttyLocker ttyl; 1339 xtty->begin_head("deoptimized thread='" UINTX_FORMAT "' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc())); 1340 cm->log_identity(xtty); 1341 xtty->end_head(); 1342 for (ScopeDesc* sd = cm->scope_desc_at(fr.pc()); ; sd = sd->sender()) { 1343 xtty->begin_elem("jvms bci='%d'", sd->bci()); 1344 xtty->method(sd->method()); 1345 xtty->end_elem(); 1346 if (sd->is_top()) break; 1347 } 1348 xtty->tail("deoptimized"); 1349 } 1350 1351 // Patch the compiled method so that when execution returns to it we will 1352 // deopt the execution state and return to the interpreter. 1353 fr.deoptimize(thread); 1354 } 1355 1356 void Deoptimization::deoptimize(JavaThread* thread, frame fr, RegisterMap *map) { 1357 deoptimize(thread, fr, map, Reason_constraint); 1358 } 1359 1360 void Deoptimization::deoptimize(JavaThread* thread, frame fr, RegisterMap *map, DeoptReason reason) { 1361 // Deoptimize only if the frame comes from compile code. 1362 // Do not deoptimize the frame which is already patched 1363 // during the execution of the loops below. 1364 if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) { 1365 return; 1366 } 1367 ResourceMark rm; 1368 DeoptimizationMarker dm; 1369 if (UseBiasedLocking) { 1370 revoke_biases_of_monitors(thread, fr, map); 1371 } 1372 deoptimize_single_frame(thread, fr, reason); 1373 1374 } 1375 1376 1377 void Deoptimization::deoptimize_frame_internal(JavaThread* thread, intptr_t* id, DeoptReason reason) { 1378 assert(thread == Thread::current() || SafepointSynchronize::is_at_safepoint(), 1379 "can only deoptimize other thread at a safepoint"); 1380 // Compute frame and register map based on thread and sp. 1381 RegisterMap reg_map(thread, UseBiasedLocking); 1382 frame fr = thread->last_frame(); 1383 while (fr.id() != id) { 1384 fr = fr.sender(®_map); 1385 } 1386 deoptimize(thread, fr, ®_map, reason); 1387 } 1388 1389 1390 void Deoptimization::deoptimize_frame(JavaThread* thread, intptr_t* id, DeoptReason reason) { 1391 if (thread == Thread::current()) { 1392 Deoptimization::deoptimize_frame_internal(thread, id, reason); 1393 } else { 1394 VM_DeoptimizeFrame deopt(thread, id, reason); 1395 VMThread::execute(&deopt); 1396 } 1397 } 1398 1399 void Deoptimization::deoptimize_frame(JavaThread* thread, intptr_t* id) { 1400 deoptimize_frame(thread, id, Reason_constraint); 1401 } 1402 1403 // JVMTI PopFrame support 1404 JRT_LEAF(void, Deoptimization::popframe_preserve_args(JavaThread* thread, int bytes_to_save, void* start_address)) 1405 { 1406 thread->popframe_preserve_args(in_ByteSize(bytes_to_save), start_address); 1407 } 1408 JRT_END 1409 1410 MethodData* 1411 Deoptimization::get_method_data(JavaThread* thread, methodHandle m, 1412 bool create_if_missing) { 1413 Thread* THREAD = thread; 1414 MethodData* mdo = m()->method_data(); 1415 if (mdo == NULL && create_if_missing && !HAS_PENDING_EXCEPTION) { 1416 // Build an MDO. Ignore errors like OutOfMemory; 1417 // that simply means we won't have an MDO to update. 1418 Method::build_interpreter_method_data(m, THREAD); 1419 if (HAS_PENDING_EXCEPTION) { 1420 assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here"); 1421 CLEAR_PENDING_EXCEPTION; 1422 } 1423 mdo = m()->method_data(); 1424 } 1425 return mdo; 1426 } 1427 1428 #if defined(COMPILER2) || defined(SHARK) || INCLUDE_JVMCI 1429 void Deoptimization::load_class_by_index(const constantPoolHandle& constant_pool, int index, TRAPS) { 1430 // in case of an unresolved klass entry, load the class. 1431 if (constant_pool->tag_at(index).is_unresolved_klass()) { 1432 Klass* tk = constant_pool->klass_at_ignore_error(index, CHECK); 1433 return; 1434 } 1435 1436 if (!constant_pool->tag_at(index).is_symbol()) return; 1437 1438 Handle class_loader (THREAD, constant_pool->pool_holder()->class_loader()); 1439 Symbol* symbol = constant_pool->symbol_at(index); 1440 1441 // class name? 1442 if (symbol->byte_at(0) != '(') { 1443 Handle protection_domain (THREAD, constant_pool->pool_holder()->protection_domain()); 1444 SystemDictionary::resolve_or_null(symbol, class_loader, protection_domain, CHECK); 1445 return; 1446 } 1447 1448 // then it must be a signature! 1449 ResourceMark rm(THREAD); 1450 for (SignatureStream ss(symbol); !ss.is_done(); ss.next()) { 1451 if (ss.is_object()) { 1452 Symbol* class_name = ss.as_symbol(CHECK); 1453 Handle protection_domain (THREAD, constant_pool->pool_holder()->protection_domain()); 1454 SystemDictionary::resolve_or_null(class_name, class_loader, protection_domain, CHECK); 1455 } 1456 } 1457 } 1458 1459 1460 void Deoptimization::load_class_by_index(const constantPoolHandle& constant_pool, int index) { 1461 EXCEPTION_MARK; 1462 load_class_by_index(constant_pool, index, THREAD); 1463 if (HAS_PENDING_EXCEPTION) { 1464 // Exception happened during classloading. We ignore the exception here, since it 1465 // is going to be rethrown since the current activation is going to be deoptimized and 1466 // the interpreter will re-execute the bytecode. 1467 CLEAR_PENDING_EXCEPTION; 1468 // Class loading called java code which may have caused a stack 1469 // overflow. If the exception was thrown right before the return 1470 // to the runtime the stack is no longer guarded. Reguard the 1471 // stack otherwise if we return to the uncommon trap blob and the 1472 // stack bang causes a stack overflow we crash. 1473 assert(THREAD->is_Java_thread(), "only a java thread can be here"); 1474 JavaThread* thread = (JavaThread*)THREAD; 1475 bool guard_pages_enabled = thread->stack_guards_enabled(); 1476 if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack(); 1477 assert(guard_pages_enabled, "stack banging in uncommon trap blob may cause crash"); 1478 } 1479 } 1480 1481 JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* thread, jint trap_request)) { 1482 HandleMark hm; 1483 1484 // uncommon_trap() is called at the beginning of the uncommon trap 1485 // handler. Note this fact before we start generating temporary frames 1486 // that can confuse an asynchronous stack walker. This counter is 1487 // decremented at the end of unpack_frames(). 1488 thread->inc_in_deopt_handler(); 1489 1490 // We need to update the map if we have biased locking. 1491 #if INCLUDE_JVMCI 1492 // JVMCI might need to get an exception from the stack, which in turn requires the register map to be valid 1493 RegisterMap reg_map(thread, true); 1494 #else 1495 RegisterMap reg_map(thread, UseBiasedLocking); 1496 #endif 1497 frame stub_frame = thread->last_frame(); 1498 frame fr = stub_frame.sender(®_map); 1499 // Make sure the calling nmethod is not getting deoptimized and removed 1500 // before we are done with it. 1501 nmethodLocker nl(fr.pc()); 1502 1503 // Log a message 1504 Events::log(thread, "Uncommon trap: trap_request=" PTR32_FORMAT " fr.pc=" INTPTR_FORMAT " relative=" INTPTR_FORMAT, 1505 trap_request, p2i(fr.pc()), fr.pc() - fr.cb()->code_begin()); 1506 1507 { 1508 ResourceMark rm; 1509 1510 // Revoke biases of any monitors in the frame to ensure we can migrate them 1511 revoke_biases_of_monitors(thread, fr, ®_map); 1512 1513 DeoptReason reason = trap_request_reason(trap_request); 1514 DeoptAction action = trap_request_action(trap_request); 1515 #if INCLUDE_JVMCI 1516 int debug_id = trap_request_debug_id(trap_request); 1517 #endif 1518 jint unloaded_class_index = trap_request_index(trap_request); // CP idx or -1 1519 1520 vframe* vf = vframe::new_vframe(&fr, ®_map, thread); 1521 compiledVFrame* cvf = compiledVFrame::cast(vf); 1522 1523 CompiledMethod* nm = cvf->code(); 1524 1525 ScopeDesc* trap_scope = cvf->scope(); 1526 1527 if (TraceDeoptimization) { 1528 ttyLocker ttyl; 1529 tty->print_cr(" bci=%d pc=" INTPTR_FORMAT ", relative_pc=" INTPTR_FORMAT ", method=%s" JVMCI_ONLY(", debug_id=%d"), trap_scope->bci(), p2i(fr.pc()), fr.pc() - nm->code_begin(), trap_scope->method()->name_and_sig_as_C_string() 1530 #if INCLUDE_JVMCI 1531 , debug_id 1532 #endif 1533 ); 1534 } 1535 1536 methodHandle trap_method = trap_scope->method(); 1537 int trap_bci = trap_scope->bci(); 1538 #if INCLUDE_JVMCI 1539 oop speculation = thread->pending_failed_speculation(); 1540 if (nm->is_compiled_by_jvmci()) { 1541 if (speculation != NULL) { 1542 oop speculation_log = nm->as_nmethod()->speculation_log(); 1543 if (speculation_log != NULL) { 1544 if (TraceDeoptimization || TraceUncollectedSpeculations) { 1545 if (HotSpotSpeculationLog::lastFailed(speculation_log) != NULL) { 1546 tty->print_cr("A speculation that was not collected by the compiler is being overwritten"); 1547 } 1548 } 1549 if (TraceDeoptimization) { 1550 tty->print_cr("Saving speculation to speculation log"); 1551 } 1552 HotSpotSpeculationLog::set_lastFailed(speculation_log, speculation); 1553 } else { 1554 if (TraceDeoptimization) { 1555 tty->print_cr("Speculation present but no speculation log"); 1556 } 1557 } 1558 thread->set_pending_failed_speculation(NULL); 1559 } else { 1560 if (TraceDeoptimization) { 1561 tty->print_cr("No speculation"); 1562 } 1563 } 1564 } else { 1565 assert(speculation == NULL, "There should not be a speculation for method compiled by non-JVMCI compilers"); 1566 } 1567 1568 if (trap_bci == SynchronizationEntryBCI) { 1569 trap_bci = 0; 1570 thread->set_pending_monitorenter(true); 1571 } 1572 1573 if (reason == Deoptimization::Reason_transfer_to_interpreter) { 1574 thread->set_pending_transfer_to_interpreter(true); 1575 } 1576 #endif 1577 1578 Bytecodes::Code trap_bc = trap_method->java_code_at(trap_bci); 1579 // Record this event in the histogram. 1580 gather_statistics(reason, action, trap_bc); 1581 1582 // Ensure that we can record deopt. history: 1583 // Need MDO to record RTM code generation state. 1584 bool create_if_missing = ProfileTraps || UseCodeAging RTM_OPT_ONLY( || UseRTMLocking ); 1585 1586 methodHandle profiled_method; 1587 #if INCLUDE_JVMCI 1588 if (nm->is_compiled_by_jvmci()) { 1589 profiled_method = nm->method(); 1590 } else { 1591 profiled_method = trap_method; 1592 } 1593 #else 1594 profiled_method = trap_method; 1595 #endif 1596 1597 MethodData* trap_mdo = 1598 get_method_data(thread, profiled_method, create_if_missing); 1599 1600 // Log a message 1601 Events::log_deopt_message(thread, "Uncommon trap: reason=%s action=%s pc=" INTPTR_FORMAT " method=%s @ %d %s", 1602 trap_reason_name(reason), trap_action_name(action), p2i(fr.pc()), 1603 trap_method->name_and_sig_as_C_string(), trap_bci, nm->compiler_name()); 1604 1605 // Print a bunch of diagnostics, if requested. 1606 if (TraceDeoptimization || LogCompilation) { 1607 ResourceMark rm; 1608 ttyLocker ttyl; 1609 char buf[100]; 1610 if (xtty != NULL) { 1611 xtty->begin_head("uncommon_trap thread='" UINTX_FORMAT "' %s", 1612 os::current_thread_id(), 1613 format_trap_request(buf, sizeof(buf), trap_request)); 1614 nm->log_identity(xtty); 1615 } 1616 Symbol* class_name = NULL; 1617 bool unresolved = false; 1618 if (unloaded_class_index >= 0) { 1619 constantPoolHandle constants (THREAD, trap_method->constants()); 1620 if (constants->tag_at(unloaded_class_index).is_unresolved_klass()) { 1621 class_name = constants->klass_name_at(unloaded_class_index); 1622 unresolved = true; 1623 if (xtty != NULL) 1624 xtty->print(" unresolved='1'"); 1625 } else if (constants->tag_at(unloaded_class_index).is_symbol()) { 1626 class_name = constants->symbol_at(unloaded_class_index); 1627 } 1628 if (xtty != NULL) 1629 xtty->name(class_name); 1630 } 1631 if (xtty != NULL && trap_mdo != NULL && (int)reason < (int)MethodData::_trap_hist_limit) { 1632 // Dump the relevant MDO state. 1633 // This is the deopt count for the current reason, any previous 1634 // reasons or recompiles seen at this point. 1635 int dcnt = trap_mdo->trap_count(reason); 1636 if (dcnt != 0) 1637 xtty->print(" count='%d'", dcnt); 1638 ProfileData* pdata = trap_mdo->bci_to_data(trap_bci); 1639 int dos = (pdata == NULL)? 0: pdata->trap_state(); 1640 if (dos != 0) { 1641 xtty->print(" state='%s'", format_trap_state(buf, sizeof(buf), dos)); 1642 if (trap_state_is_recompiled(dos)) { 1643 int recnt2 = trap_mdo->overflow_recompile_count(); 1644 if (recnt2 != 0) 1645 xtty->print(" recompiles2='%d'", recnt2); 1646 } 1647 } 1648 } 1649 if (xtty != NULL) { 1650 xtty->stamp(); 1651 xtty->end_head(); 1652 } 1653 if (TraceDeoptimization) { // make noise on the tty 1654 tty->print("Uncommon trap occurred in"); 1655 nm->method()->print_short_name(tty); 1656 tty->print(" compiler=%s compile_id=%d", nm->compiler_name(), nm->compile_id()); 1657 #if INCLUDE_JVMCI 1658 if (nm->is_nmethod()) { 1659 oop installedCode = nm->as_nmethod()->jvmci_installed_code(); 1660 if (installedCode != NULL) { 1661 oop installedCodeName = NULL; 1662 if (installedCode->is_a(InstalledCode::klass())) { 1663 installedCodeName = InstalledCode::name(installedCode); 1664 } 1665 if (installedCodeName != NULL) { 1666 tty->print(" (JVMCI: installedCodeName=%s) ", java_lang_String::as_utf8_string(installedCodeName)); 1667 } else { 1668 tty->print(" (JVMCI: installed code has no name) "); 1669 } 1670 } else if (nm->is_compiled_by_jvmci()) { 1671 tty->print(" (JVMCI: no installed code) "); 1672 } 1673 } 1674 #endif 1675 tty->print(" (@" INTPTR_FORMAT ") thread=" UINTX_FORMAT " reason=%s action=%s unloaded_class_index=%d" JVMCI_ONLY(" debug_id=%d"), 1676 p2i(fr.pc()), 1677 os::current_thread_id(), 1678 trap_reason_name(reason), 1679 trap_action_name(action), 1680 unloaded_class_index 1681 #if INCLUDE_JVMCI 1682 , debug_id 1683 #endif 1684 ); 1685 if (class_name != NULL) { 1686 tty->print(unresolved ? " unresolved class: " : " symbol: "); 1687 class_name->print_symbol_on(tty); 1688 } 1689 tty->cr(); 1690 } 1691 if (xtty != NULL) { 1692 // Log the precise location of the trap. 1693 for (ScopeDesc* sd = trap_scope; ; sd = sd->sender()) { 1694 xtty->begin_elem("jvms bci='%d'", sd->bci()); 1695 xtty->method(sd->method()); 1696 xtty->end_elem(); 1697 if (sd->is_top()) break; 1698 } 1699 xtty->tail("uncommon_trap"); 1700 } 1701 } 1702 // (End diagnostic printout.) 1703 1704 // Load class if necessary 1705 if (unloaded_class_index >= 0) { 1706 constantPoolHandle constants(THREAD, trap_method->constants()); 1707 load_class_by_index(constants, unloaded_class_index); 1708 } 1709 1710 // Flush the nmethod if necessary and desirable. 1711 // 1712 // We need to avoid situations where we are re-flushing the nmethod 1713 // because of a hot deoptimization site. Repeated flushes at the same 1714 // point need to be detected by the compiler and avoided. If the compiler 1715 // cannot avoid them (or has a bug and "refuses" to avoid them), this 1716 // module must take measures to avoid an infinite cycle of recompilation 1717 // and deoptimization. There are several such measures: 1718 // 1719 // 1. If a recompilation is ordered a second time at some site X 1720 // and for the same reason R, the action is adjusted to 'reinterpret', 1721 // to give the interpreter time to exercise the method more thoroughly. 1722 // If this happens, the method's overflow_recompile_count is incremented. 1723 // 1724 // 2. If the compiler fails to reduce the deoptimization rate, then 1725 // the method's overflow_recompile_count will begin to exceed the set 1726 // limit PerBytecodeRecompilationCutoff. If this happens, the action 1727 // is adjusted to 'make_not_compilable', and the method is abandoned 1728 // to the interpreter. This is a performance hit for hot methods, 1729 // but is better than a disastrous infinite cycle of recompilations. 1730 // (Actually, only the method containing the site X is abandoned.) 1731 // 1732 // 3. In parallel with the previous measures, if the total number of 1733 // recompilations of a method exceeds the much larger set limit 1734 // PerMethodRecompilationCutoff, the method is abandoned. 1735 // This should only happen if the method is very large and has 1736 // many "lukewarm" deoptimizations. The code which enforces this 1737 // limit is elsewhere (class nmethod, class Method). 1738 // 1739 // Note that the per-BCI 'is_recompiled' bit gives the compiler one chance 1740 // to recompile at each bytecode independently of the per-BCI cutoff. 1741 // 1742 // The decision to update code is up to the compiler, and is encoded 1743 // in the Action_xxx code. If the compiler requests Action_none 1744 // no trap state is changed, no compiled code is changed, and the 1745 // computation suffers along in the interpreter. 1746 // 1747 // The other action codes specify various tactics for decompilation 1748 // and recompilation. Action_maybe_recompile is the loosest, and 1749 // allows the compiled code to stay around until enough traps are seen, 1750 // and until the compiler gets around to recompiling the trapping method. 1751 // 1752 // The other actions cause immediate removal of the present code. 1753 1754 // Traps caused by injected profile shouldn't pollute trap counts. 1755 bool injected_profile_trap = trap_method->has_injected_profile() && 1756 (reason == Reason_intrinsic || reason == Reason_unreached); 1757 1758 bool update_trap_state = (reason != Reason_tenured) && !injected_profile_trap; 1759 bool make_not_entrant = false; 1760 bool make_not_compilable = false; 1761 bool reprofile = false; 1762 switch (action) { 1763 case Action_none: 1764 // Keep the old code. 1765 update_trap_state = false; 1766 break; 1767 case Action_maybe_recompile: 1768 // Do not need to invalidate the present code, but we can 1769 // initiate another 1770 // Start compiler without (necessarily) invalidating the nmethod. 1771 // The system will tolerate the old code, but new code should be 1772 // generated when possible. 1773 break; 1774 case Action_reinterpret: 1775 // Go back into the interpreter for a while, and then consider 1776 // recompiling form scratch. 1777 make_not_entrant = true; 1778 // Reset invocation counter for outer most method. 1779 // This will allow the interpreter to exercise the bytecodes 1780 // for a while before recompiling. 1781 // By contrast, Action_make_not_entrant is immediate. 1782 // 1783 // Note that the compiler will track null_check, null_assert, 1784 // range_check, and class_check events and log them as if they 1785 // had been traps taken from compiled code. This will update 1786 // the MDO trap history so that the next compilation will 1787 // properly detect hot trap sites. 1788 reprofile = true; 1789 break; 1790 case Action_make_not_entrant: 1791 // Request immediate recompilation, and get rid of the old code. 1792 // Make them not entrant, so next time they are called they get 1793 // recompiled. Unloaded classes are loaded now so recompile before next 1794 // time they are called. Same for uninitialized. The interpreter will 1795 // link the missing class, if any. 1796 make_not_entrant = true; 1797 break; 1798 case Action_make_not_compilable: 1799 // Give up on compiling this method at all. 1800 make_not_entrant = true; 1801 make_not_compilable = true; 1802 break; 1803 default: 1804 ShouldNotReachHere(); 1805 } 1806 1807 // Setting +ProfileTraps fixes the following, on all platforms: 1808 // 4852688: ProfileInterpreter is off by default for ia64. The result is 1809 // infinite heroic-opt-uncommon-trap/deopt/recompile cycles, since the 1810 // recompile relies on a MethodData* to record heroic opt failures. 1811 1812 // Whether the interpreter is producing MDO data or not, we also need 1813 // to use the MDO to detect hot deoptimization points and control 1814 // aggressive optimization. 1815 bool inc_recompile_count = false; 1816 ProfileData* pdata = NULL; 1817 if (ProfileTraps && !is_client_compilation_mode_vm() && update_trap_state && trap_mdo != NULL) { 1818 assert(trap_mdo == get_method_data(thread, profiled_method, false), "sanity"); 1819 uint this_trap_count = 0; 1820 bool maybe_prior_trap = false; 1821 bool maybe_prior_recompile = false; 1822 pdata = query_update_method_data(trap_mdo, trap_bci, reason, true, 1823 #if INCLUDE_JVMCI 1824 nm->is_compiled_by_jvmci() && nm->is_osr_method(), 1825 #endif 1826 nm->method(), 1827 //outputs: 1828 this_trap_count, 1829 maybe_prior_trap, 1830 maybe_prior_recompile); 1831 // Because the interpreter also counts null, div0, range, and class 1832 // checks, these traps from compiled code are double-counted. 1833 // This is harmless; it just means that the PerXTrapLimit values 1834 // are in effect a little smaller than they look. 1835 1836 DeoptReason per_bc_reason = reason_recorded_per_bytecode_if_any(reason); 1837 if (per_bc_reason != Reason_none) { 1838 // Now take action based on the partially known per-BCI history. 1839 if (maybe_prior_trap 1840 && this_trap_count >= (uint)PerBytecodeTrapLimit) { 1841 // If there are too many traps at this BCI, force a recompile. 1842 // This will allow the compiler to see the limit overflow, and 1843 // take corrective action, if possible. The compiler generally 1844 // does not use the exact PerBytecodeTrapLimit value, but instead 1845 // changes its tactics if it sees any traps at all. This provides 1846 // a little hysteresis, delaying a recompile until a trap happens 1847 // several times. 1848 // 1849 // Actually, since there is only one bit of counter per BCI, 1850 // the possible per-BCI counts are {0,1,(per-method count)}. 1851 // This produces accurate results if in fact there is only 1852 // one hot trap site, but begins to get fuzzy if there are 1853 // many sites. For example, if there are ten sites each 1854 // trapping two or more times, they each get the blame for 1855 // all of their traps. 1856 make_not_entrant = true; 1857 } 1858 1859 // Detect repeated recompilation at the same BCI, and enforce a limit. 1860 if (make_not_entrant && maybe_prior_recompile) { 1861 // More than one recompile at this point. 1862 inc_recompile_count = maybe_prior_trap; 1863 } 1864 } else { 1865 // For reasons which are not recorded per-bytecode, we simply 1866 // force recompiles unconditionally. 1867 // (Note that PerMethodRecompilationCutoff is enforced elsewhere.) 1868 make_not_entrant = true; 1869 } 1870 1871 // Go back to the compiler if there are too many traps in this method. 1872 if (this_trap_count >= per_method_trap_limit(reason)) { 1873 // If there are too many traps in this method, force a recompile. 1874 // This will allow the compiler to see the limit overflow, and 1875 // take corrective action, if possible. 1876 // (This condition is an unlikely backstop only, because the 1877 // PerBytecodeTrapLimit is more likely to take effect first, 1878 // if it is applicable.) 1879 make_not_entrant = true; 1880 } 1881 1882 // Here's more hysteresis: If there has been a recompile at 1883 // this trap point already, run the method in the interpreter 1884 // for a while to exercise it more thoroughly. 1885 if (make_not_entrant && maybe_prior_recompile && maybe_prior_trap) { 1886 reprofile = true; 1887 } 1888 } 1889 1890 // Take requested actions on the method: 1891 1892 // Recompile 1893 if (make_not_entrant) { 1894 if (!nm->make_not_entrant()) { 1895 return; // the call did not change nmethod's state 1896 } 1897 1898 if (pdata != NULL) { 1899 // Record the recompilation event, if any. 1900 int tstate0 = pdata->trap_state(); 1901 int tstate1 = trap_state_set_recompiled(tstate0, true); 1902 if (tstate1 != tstate0) 1903 pdata->set_trap_state(tstate1); 1904 } 1905 1906 #if INCLUDE_RTM_OPT 1907 // Restart collecting RTM locking abort statistic if the method 1908 // is recompiled for a reason other than RTM state change. 1909 // Assume that in new recompiled code the statistic could be different, 1910 // for example, due to different inlining. 1911 if ((reason != Reason_rtm_state_change) && (trap_mdo != NULL) && 1912 UseRTMDeopt && (nm->as_nmethod()->rtm_state() != ProfileRTM)) { 1913 trap_mdo->atomic_set_rtm_state(ProfileRTM); 1914 } 1915 #endif 1916 // For code aging we count traps separately here, using make_not_entrant() 1917 // as a guard against simultaneous deopts in multiple threads. 1918 if (reason == Reason_tenured && trap_mdo != NULL) { 1919 trap_mdo->inc_tenure_traps(); 1920 } 1921 } 1922 1923 if (inc_recompile_count) { 1924 trap_mdo->inc_overflow_recompile_count(); 1925 if ((uint)trap_mdo->overflow_recompile_count() > 1926 (uint)PerBytecodeRecompilationCutoff) { 1927 // Give up on the method containing the bad BCI. 1928 if (trap_method() == nm->method()) { 1929 make_not_compilable = true; 1930 } else { 1931 trap_method->set_not_compilable(CompLevel_full_optimization, true, "overflow_recompile_count > PerBytecodeRecompilationCutoff"); 1932 // But give grace to the enclosing nm->method(). 1933 } 1934 } 1935 } 1936 1937 // Reprofile 1938 if (reprofile) { 1939 CompilationPolicy::policy()->reprofile(trap_scope, nm->is_osr_method()); 1940 } 1941 1942 // Give up compiling 1943 if (make_not_compilable && !nm->method()->is_not_compilable(CompLevel_full_optimization)) { 1944 assert(make_not_entrant, "consistent"); 1945 nm->method()->set_not_compilable(CompLevel_full_optimization); 1946 } 1947 1948 } // Free marked resources 1949 1950 } 1951 JRT_END 1952 1953 ProfileData* 1954 Deoptimization::query_update_method_data(MethodData* trap_mdo, 1955 int trap_bci, 1956 Deoptimization::DeoptReason reason, 1957 bool update_total_trap_count, 1958 #if INCLUDE_JVMCI 1959 bool is_osr, 1960 #endif 1961 Method* compiled_method, 1962 //outputs: 1963 uint& ret_this_trap_count, 1964 bool& ret_maybe_prior_trap, 1965 bool& ret_maybe_prior_recompile) { 1966 bool maybe_prior_trap = false; 1967 bool maybe_prior_recompile = false; 1968 uint this_trap_count = 0; 1969 if (update_total_trap_count) { 1970 uint idx = reason; 1971 #if INCLUDE_JVMCI 1972 if (is_osr) { 1973 idx += Reason_LIMIT; 1974 } 1975 #endif 1976 uint prior_trap_count = trap_mdo->trap_count(idx); 1977 this_trap_count = trap_mdo->inc_trap_count(idx); 1978 1979 // If the runtime cannot find a place to store trap history, 1980 // it is estimated based on the general condition of the method. 1981 // If the method has ever been recompiled, or has ever incurred 1982 // a trap with the present reason , then this BCI is assumed 1983 // (pessimistically) to be the culprit. 1984 maybe_prior_trap = (prior_trap_count != 0); 1985 maybe_prior_recompile = (trap_mdo->decompile_count() != 0); 1986 } 1987 ProfileData* pdata = NULL; 1988 1989 1990 // For reasons which are recorded per bytecode, we check per-BCI data. 1991 DeoptReason per_bc_reason = reason_recorded_per_bytecode_if_any(reason); 1992 assert(per_bc_reason != Reason_none || update_total_trap_count, "must be"); 1993 if (per_bc_reason != Reason_none) { 1994 // Find the profile data for this BCI. If there isn't one, 1995 // try to allocate one from the MDO's set of spares. 1996 // This will let us detect a repeated trap at this point. 1997 pdata = trap_mdo->allocate_bci_to_data(trap_bci, reason_is_speculate(reason) ? compiled_method : NULL); 1998 1999 if (pdata != NULL) { 2000 if (reason_is_speculate(reason) && !pdata->is_SpeculativeTrapData()) { 2001 if (LogCompilation && xtty != NULL) { 2002 ttyLocker ttyl; 2003 // no more room for speculative traps in this MDO 2004 xtty->elem("speculative_traps_oom"); 2005 } 2006 } 2007 // Query the trap state of this profile datum. 2008 int tstate0 = pdata->trap_state(); 2009 if (!trap_state_has_reason(tstate0, per_bc_reason)) 2010 maybe_prior_trap = false; 2011 if (!trap_state_is_recompiled(tstate0)) 2012 maybe_prior_recompile = false; 2013 2014 // Update the trap state of this profile datum. 2015 int tstate1 = tstate0; 2016 // Record the reason. 2017 tstate1 = trap_state_add_reason(tstate1, per_bc_reason); 2018 // Store the updated state on the MDO, for next time. 2019 if (tstate1 != tstate0) 2020 pdata->set_trap_state(tstate1); 2021 } else { 2022 if (LogCompilation && xtty != NULL) { 2023 ttyLocker ttyl; 2024 // Missing MDP? Leave a small complaint in the log. 2025 xtty->elem("missing_mdp bci='%d'", trap_bci); 2026 } 2027 } 2028 } 2029 2030 // Return results: 2031 ret_this_trap_count = this_trap_count; 2032 ret_maybe_prior_trap = maybe_prior_trap; 2033 ret_maybe_prior_recompile = maybe_prior_recompile; 2034 return pdata; 2035 } 2036 2037 void 2038 Deoptimization::update_method_data_from_interpreter(MethodData* trap_mdo, int trap_bci, int reason) { 2039 ResourceMark rm; 2040 // Ignored outputs: 2041 uint ignore_this_trap_count; 2042 bool ignore_maybe_prior_trap; 2043 bool ignore_maybe_prior_recompile; 2044 assert(!reason_is_speculate(reason), "reason speculate only used by compiler"); 2045 // JVMCI uses the total counts to determine if deoptimizations are happening too frequently -> do not adjust total counts 2046 bool update_total_counts = JVMCI_ONLY(false) NOT_JVMCI(true); 2047 query_update_method_data(trap_mdo, trap_bci, 2048 (DeoptReason)reason, 2049 update_total_counts, 2050 #if INCLUDE_JVMCI 2051 false, 2052 #endif 2053 NULL, 2054 ignore_this_trap_count, 2055 ignore_maybe_prior_trap, 2056 ignore_maybe_prior_recompile); 2057 } 2058 2059 Deoptimization::UnrollBlock* Deoptimization::uncommon_trap(JavaThread* thread, jint trap_request, jint exec_mode) { 2060 if (TraceDeoptimization) { 2061 tty->print("Uncommon trap "); 2062 } 2063 // Still in Java no safepoints 2064 { 2065 // This enters VM and may safepoint 2066 uncommon_trap_inner(thread, trap_request); 2067 } 2068 return fetch_unroll_info_helper(thread, exec_mode); 2069 } 2070 2071 // Local derived constants. 2072 // Further breakdown of DataLayout::trap_state, as promised by DataLayout. 2073 const int DS_REASON_MASK = DataLayout::trap_mask >> 1; 2074 const int DS_RECOMPILE_BIT = DataLayout::trap_mask - DS_REASON_MASK; 2075 2076 //---------------------------trap_state_reason--------------------------------- 2077 Deoptimization::DeoptReason 2078 Deoptimization::trap_state_reason(int trap_state) { 2079 // This assert provides the link between the width of DataLayout::trap_bits 2080 // and the encoding of "recorded" reasons. It ensures there are enough 2081 // bits to store all needed reasons in the per-BCI MDO profile. 2082 assert(DS_REASON_MASK >= Reason_RECORDED_LIMIT, "enough bits"); 2083 int recompile_bit = (trap_state & DS_RECOMPILE_BIT); 2084 trap_state -= recompile_bit; 2085 if (trap_state == DS_REASON_MASK) { 2086 return Reason_many; 2087 } else { 2088 assert((int)Reason_none == 0, "state=0 => Reason_none"); 2089 return (DeoptReason)trap_state; 2090 } 2091 } 2092 //-------------------------trap_state_has_reason------------------------------- 2093 int Deoptimization::trap_state_has_reason(int trap_state, int reason) { 2094 assert(reason_is_recorded_per_bytecode((DeoptReason)reason), "valid reason"); 2095 assert(DS_REASON_MASK >= Reason_RECORDED_LIMIT, "enough bits"); 2096 int recompile_bit = (trap_state & DS_RECOMPILE_BIT); 2097 trap_state -= recompile_bit; 2098 if (trap_state == DS_REASON_MASK) { 2099 return -1; // true, unspecifically (bottom of state lattice) 2100 } else if (trap_state == reason) { 2101 return 1; // true, definitely 2102 } else if (trap_state == 0) { 2103 return 0; // false, definitely (top of state lattice) 2104 } else { 2105 return 0; // false, definitely 2106 } 2107 } 2108 //-------------------------trap_state_add_reason------------------------------- 2109 int Deoptimization::trap_state_add_reason(int trap_state, int reason) { 2110 assert(reason_is_recorded_per_bytecode((DeoptReason)reason) || reason == Reason_many, "valid reason"); 2111 int recompile_bit = (trap_state & DS_RECOMPILE_BIT); 2112 trap_state -= recompile_bit; 2113 if (trap_state == DS_REASON_MASK) { 2114 return trap_state + recompile_bit; // already at state lattice bottom 2115 } else if (trap_state == reason) { 2116 return trap_state + recompile_bit; // the condition is already true 2117 } else if (trap_state == 0) { 2118 return reason + recompile_bit; // no condition has yet been true 2119 } else { 2120 return DS_REASON_MASK + recompile_bit; // fall to state lattice bottom 2121 } 2122 } 2123 //-----------------------trap_state_is_recompiled------------------------------ 2124 bool Deoptimization::trap_state_is_recompiled(int trap_state) { 2125 return (trap_state & DS_RECOMPILE_BIT) != 0; 2126 } 2127 //-----------------------trap_state_set_recompiled----------------------------- 2128 int Deoptimization::trap_state_set_recompiled(int trap_state, bool z) { 2129 if (z) return trap_state | DS_RECOMPILE_BIT; 2130 else return trap_state & ~DS_RECOMPILE_BIT; 2131 } 2132 //---------------------------format_trap_state--------------------------------- 2133 // This is used for debugging and diagnostics, including LogFile output. 2134 const char* Deoptimization::format_trap_state(char* buf, size_t buflen, 2135 int trap_state) { 2136 assert(buflen > 0, "sanity"); 2137 DeoptReason reason = trap_state_reason(trap_state); 2138 bool recomp_flag = trap_state_is_recompiled(trap_state); 2139 // Re-encode the state from its decoded components. 2140 int decoded_state = 0; 2141 if (reason_is_recorded_per_bytecode(reason) || reason == Reason_many) 2142 decoded_state = trap_state_add_reason(decoded_state, reason); 2143 if (recomp_flag) 2144 decoded_state = trap_state_set_recompiled(decoded_state, recomp_flag); 2145 // If the state re-encodes properly, format it symbolically. 2146 // Because this routine is used for debugging and diagnostics, 2147 // be robust even if the state is a strange value. 2148 size_t len; 2149 if (decoded_state != trap_state) { 2150 // Random buggy state that doesn't decode?? 2151 len = jio_snprintf(buf, buflen, "#%d", trap_state); 2152 } else { 2153 len = jio_snprintf(buf, buflen, "%s%s", 2154 trap_reason_name(reason), 2155 recomp_flag ? " recompiled" : ""); 2156 } 2157 return buf; 2158 } 2159 2160 2161 //--------------------------------statics-------------------------------------- 2162 const char* Deoptimization::_trap_reason_name[] = { 2163 // Note: Keep this in sync. with enum DeoptReason. 2164 "none", 2165 "null_check", 2166 "null_assert" JVMCI_ONLY("_or_unreached0"), 2167 "range_check", 2168 "class_check", 2169 "array_check", 2170 "intrinsic" JVMCI_ONLY("_or_type_checked_inlining"), 2171 "bimorphic" JVMCI_ONLY("_or_optimized_type_check"), 2172 "unloaded", 2173 "uninitialized", 2174 "unreached", 2175 "unhandled", 2176 "constraint", 2177 "div0_check", 2178 "age", 2179 "predicate", 2180 "loop_limit_check", 2181 "speculate_class_check", 2182 "speculate_null_check", 2183 "rtm_state_change", 2184 "unstable_if", 2185 "unstable_fused_if", 2186 #if INCLUDE_JVMCI 2187 "aliasing", 2188 "transfer_to_interpreter", 2189 "not_compiled_exception_handler", 2190 "unresolved", 2191 "jsr_mismatch", 2192 #endif 2193 "tenured" 2194 }; 2195 const char* Deoptimization::_trap_action_name[] = { 2196 // Note: Keep this in sync. with enum DeoptAction. 2197 "none", 2198 "maybe_recompile", 2199 "reinterpret", 2200 "make_not_entrant", 2201 "make_not_compilable" 2202 }; 2203 2204 const char* Deoptimization::trap_reason_name(int reason) { 2205 // Check that every reason has a name 2206 STATIC_ASSERT(sizeof(_trap_reason_name)/sizeof(const char*) == Reason_LIMIT); 2207 2208 if (reason == Reason_many) return "many"; 2209 if ((uint)reason < Reason_LIMIT) 2210 return _trap_reason_name[reason]; 2211 static char buf[20]; 2212 sprintf(buf, "reason%d", reason); 2213 return buf; 2214 } 2215 const char* Deoptimization::trap_action_name(int action) { 2216 // Check that every action has a name 2217 STATIC_ASSERT(sizeof(_trap_action_name)/sizeof(const char*) == Action_LIMIT); 2218 2219 if ((uint)action < Action_LIMIT) 2220 return _trap_action_name[action]; 2221 static char buf[20]; 2222 sprintf(buf, "action%d", action); 2223 return buf; 2224 } 2225 2226 // This is used for debugging and diagnostics, including LogFile output. 2227 const char* Deoptimization::format_trap_request(char* buf, size_t buflen, 2228 int trap_request) { 2229 jint unloaded_class_index = trap_request_index(trap_request); 2230 const char* reason = trap_reason_name(trap_request_reason(trap_request)); 2231 const char* action = trap_action_name(trap_request_action(trap_request)); 2232 #if INCLUDE_JVMCI 2233 int debug_id = trap_request_debug_id(trap_request); 2234 #endif 2235 size_t len; 2236 if (unloaded_class_index < 0) { 2237 len = jio_snprintf(buf, buflen, "reason='%s' action='%s'" JVMCI_ONLY(" debug_id='%d'"), 2238 reason, action 2239 #if INCLUDE_JVMCI 2240 ,debug_id 2241 #endif 2242 ); 2243 } else { 2244 len = jio_snprintf(buf, buflen, "reason='%s' action='%s' index='%d'" JVMCI_ONLY(" debug_id='%d'"), 2245 reason, action, unloaded_class_index 2246 #if INCLUDE_JVMCI 2247 ,debug_id 2248 #endif 2249 ); 2250 } 2251 return buf; 2252 } 2253 2254 juint Deoptimization::_deoptimization_hist 2255 [Deoptimization::Reason_LIMIT] 2256 [1 + Deoptimization::Action_LIMIT] 2257 [Deoptimization::BC_CASE_LIMIT] 2258 = {0}; 2259 2260 enum { 2261 LSB_BITS = 8, 2262 LSB_MASK = right_n_bits(LSB_BITS) 2263 }; 2264 2265 void Deoptimization::gather_statistics(DeoptReason reason, DeoptAction action, 2266 Bytecodes::Code bc) { 2267 assert(reason >= 0 && reason < Reason_LIMIT, "oob"); 2268 assert(action >= 0 && action < Action_LIMIT, "oob"); 2269 _deoptimization_hist[Reason_none][0][0] += 1; // total 2270 _deoptimization_hist[reason][0][0] += 1; // per-reason total 2271 juint* cases = _deoptimization_hist[reason][1+action]; 2272 juint* bc_counter_addr = NULL; 2273 juint bc_counter = 0; 2274 // Look for an unused counter, or an exact match to this BC. 2275 if (bc != Bytecodes::_illegal) { 2276 for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) { 2277 juint* counter_addr = &cases[bc_case]; 2278 juint counter = *counter_addr; 2279 if ((counter == 0 && bc_counter_addr == NULL) 2280 || (Bytecodes::Code)(counter & LSB_MASK) == bc) { 2281 // this counter is either free or is already devoted to this BC 2282 bc_counter_addr = counter_addr; 2283 bc_counter = counter | bc; 2284 } 2285 } 2286 } 2287 if (bc_counter_addr == NULL) { 2288 // Overflow, or no given bytecode. 2289 bc_counter_addr = &cases[BC_CASE_LIMIT-1]; 2290 bc_counter = (*bc_counter_addr & ~LSB_MASK); // clear LSB 2291 } 2292 *bc_counter_addr = bc_counter + (1 << LSB_BITS); 2293 } 2294 2295 jint Deoptimization::total_deoptimization_count() { 2296 return _deoptimization_hist[Reason_none][0][0]; 2297 } 2298 2299 jint Deoptimization::deoptimization_count(DeoptReason reason) { 2300 assert(reason >= 0 && reason < Reason_LIMIT, "oob"); 2301 return _deoptimization_hist[reason][0][0]; 2302 } 2303 2304 void Deoptimization::print_statistics() { 2305 juint total = total_deoptimization_count(); 2306 juint account = total; 2307 if (total != 0) { 2308 ttyLocker ttyl; 2309 if (xtty != NULL) xtty->head("statistics type='deoptimization'"); 2310 tty->print_cr("Deoptimization traps recorded:"); 2311 #define PRINT_STAT_LINE(name, r) \ 2312 tty->print_cr(" %4d (%4.1f%%) %s", (int)(r), ((r) * 100.0) / total, name); 2313 PRINT_STAT_LINE("total", total); 2314 // For each non-zero entry in the histogram, print the reason, 2315 // the action, and (if specifically known) the type of bytecode. 2316 for (int reason = 0; reason < Reason_LIMIT; reason++) { 2317 for (int action = 0; action < Action_LIMIT; action++) { 2318 juint* cases = _deoptimization_hist[reason][1+action]; 2319 for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) { 2320 juint counter = cases[bc_case]; 2321 if (counter != 0) { 2322 char name[1*K]; 2323 Bytecodes::Code bc = (Bytecodes::Code)(counter & LSB_MASK); 2324 if (bc_case == BC_CASE_LIMIT && (int)bc == 0) 2325 bc = Bytecodes::_illegal; 2326 sprintf(name, "%s/%s/%s", 2327 trap_reason_name(reason), 2328 trap_action_name(action), 2329 Bytecodes::is_defined(bc)? Bytecodes::name(bc): "other"); 2330 juint r = counter >> LSB_BITS; 2331 tty->print_cr(" %40s: " UINT32_FORMAT " (%.1f%%)", name, r, (r * 100.0) / total); 2332 account -= r; 2333 } 2334 } 2335 } 2336 } 2337 if (account != 0) { 2338 PRINT_STAT_LINE("unaccounted", account); 2339 } 2340 #undef PRINT_STAT_LINE 2341 if (xtty != NULL) xtty->tail("statistics"); 2342 } 2343 } 2344 #else // COMPILER2 || SHARK || INCLUDE_JVMCI 2345 2346 2347 // Stubs for C1 only system. 2348 bool Deoptimization::trap_state_is_recompiled(int trap_state) { 2349 return false; 2350 } 2351 2352 const char* Deoptimization::trap_reason_name(int reason) { 2353 return "unknown"; 2354 } 2355 2356 void Deoptimization::print_statistics() { 2357 // no output 2358 } 2359 2360 void 2361 Deoptimization::update_method_data_from_interpreter(MethodData* trap_mdo, int trap_bci, int reason) { 2362 // no udpate 2363 } 2364 2365 int Deoptimization::trap_state_has_reason(int trap_state, int reason) { 2366 return 0; 2367 } 2368 2369 void Deoptimization::gather_statistics(DeoptReason reason, DeoptAction action, 2370 Bytecodes::Code bc) { 2371 // no update 2372 } 2373 2374 const char* Deoptimization::format_trap_state(char* buf, size_t buflen, 2375 int trap_state) { 2376 jio_snprintf(buf, buflen, "#%d", trap_state); 2377 return buf; 2378 } 2379 2380 #endif // COMPILER2 || SHARK || INCLUDE_JVMCI