1 /* 2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "jvm.h" 27 #include "classfile/symbolTable.hpp" 28 #include "classfile/systemDictionary.hpp" 29 #include "code/codeCache.hpp" 30 #include "code/debugInfoRec.hpp" 31 #include "code/nmethod.hpp" 32 #include "code/pcDesc.hpp" 33 #include "code/scopeDesc.hpp" 34 #include "interpreter/bytecode.hpp" 35 #include "interpreter/interpreter.hpp" 36 #include "interpreter/oopMapCache.hpp" 37 #include "memory/allocation.inline.hpp" 38 #include "memory/oopFactory.hpp" 39 #include "memory/resourceArea.hpp" 40 #include "memory/universe.hpp" 41 #include "oops/constantPool.hpp" 42 #include "oops/method.hpp" 43 #include "oops/objArrayKlass.hpp" 44 #include "oops/objArrayOop.inline.hpp" 45 #include "oops/oop.inline.hpp" 46 #include "oops/fieldStreams.hpp" 47 #include "oops/typeArrayOop.inline.hpp" 48 #include "oops/verifyOopClosure.hpp" 49 #include "prims/jvmtiThreadState.hpp" 50 #include "runtime/biasedLocking.hpp" 51 #include "runtime/compilationPolicy.hpp" 52 #include "runtime/deoptimization.hpp" 53 #include "runtime/fieldDescriptor.hpp" 54 #include "runtime/fieldDescriptor.inline.hpp" 55 #include "runtime/frame.inline.hpp" 56 #include "runtime/jniHandles.inline.hpp" 57 #include "runtime/handles.inline.hpp" 58 #include "runtime/interfaceSupport.inline.hpp" 59 #include "runtime/safepointVerifiers.hpp" 60 #include "runtime/sharedRuntime.hpp" 61 #include "runtime/signature.hpp" 62 #include "runtime/stubRoutines.hpp" 63 #include "runtime/thread.hpp" 64 #include "runtime/threadSMR.hpp" 65 #include "runtime/vframe.hpp" 66 #include "runtime/vframeArray.hpp" 67 #include "runtime/vframe_hp.hpp" 68 #include "utilities/events.hpp" 69 #include "utilities/preserveException.hpp" 70 #include "utilities/xmlstream.hpp" 71 72 73 bool DeoptimizationMarker::_is_active = false; 74 75 Deoptimization::UnrollBlock::UnrollBlock(int size_of_deoptimized_frame, 76 int caller_adjustment, 77 int caller_actual_parameters, 78 int number_of_frames, 79 intptr_t* frame_sizes, 80 address* frame_pcs, 81 BasicType return_type, 82 int exec_mode) { 83 _size_of_deoptimized_frame = size_of_deoptimized_frame; 84 _caller_adjustment = caller_adjustment; 85 _caller_actual_parameters = caller_actual_parameters; 86 _number_of_frames = number_of_frames; 87 _frame_sizes = frame_sizes; 88 _frame_pcs = frame_pcs; 89 _register_block = NEW_C_HEAP_ARRAY(intptr_t, RegisterMap::reg_count * 2, mtCompiler); 90 _return_type = return_type; 91 _initial_info = 0; 92 // PD (x86 only) 93 _counter_temp = 0; 94 _unpack_kind = exec_mode; 95 _sender_sp_temp = 0; 96 97 _total_frame_sizes = size_of_frames(); 98 assert(exec_mode >= 0 && exec_mode < Unpack_LIMIT, "Unexpected exec_mode"); 99 } 100 101 102 Deoptimization::UnrollBlock::~UnrollBlock() { 103 FREE_C_HEAP_ARRAY(intptr_t, _frame_sizes); 104 FREE_C_HEAP_ARRAY(intptr_t, _frame_pcs); 105 FREE_C_HEAP_ARRAY(intptr_t, _register_block); 106 } 107 108 109 intptr_t* Deoptimization::UnrollBlock::value_addr_at(int register_number) const { 110 assert(register_number < RegisterMap::reg_count, "checking register number"); 111 return &_register_block[register_number * 2]; 112 } 113 114 115 116 int Deoptimization::UnrollBlock::size_of_frames() const { 117 // Acount first for the adjustment of the initial frame 118 int result = _caller_adjustment; 119 for (int index = 0; index < number_of_frames(); index++) { 120 result += frame_sizes()[index]; 121 } 122 return result; 123 } 124 125 126 void Deoptimization::UnrollBlock::print() { 127 ttyLocker ttyl; 128 tty->print_cr("UnrollBlock"); 129 tty->print_cr(" size_of_deoptimized_frame = %d", _size_of_deoptimized_frame); 130 tty->print( " frame_sizes: "); 131 for (int index = 0; index < number_of_frames(); index++) { 132 tty->print(INTX_FORMAT " ", frame_sizes()[index]); 133 } 134 tty->cr(); 135 } 136 137 138 // In order to make fetch_unroll_info work properly with escape 139 // analysis, The method was changed from JRT_LEAF to JRT_BLOCK_ENTRY and 140 // ResetNoHandleMark and HandleMark were removed from it. The actual reallocation 141 // of previously eliminated objects occurs in realloc_objects, which is 142 // called from the method fetch_unroll_info_helper below. 143 JRT_BLOCK_ENTRY(Deoptimization::UnrollBlock*, Deoptimization::fetch_unroll_info(JavaThread* thread, int exec_mode)) 144 // It is actually ok to allocate handles in a leaf method. It causes no safepoints, 145 // but makes the entry a little slower. There is however a little dance we have to 146 // do in debug mode to get around the NoHandleMark code in the JRT_LEAF macro 147 148 // fetch_unroll_info() is called at the beginning of the deoptimization 149 // handler. Note this fact before we start generating temporary frames 150 // that can confuse an asynchronous stack walker. This counter is 151 // decremented at the end of unpack_frames(). 152 if (TraceDeoptimization) { 153 tty->print_cr("Deoptimizing thread " INTPTR_FORMAT, p2i(thread)); 154 } 155 thread->inc_in_deopt_handler(); 156 157 return fetch_unroll_info_helper(thread, exec_mode); 158 JRT_END 159 160 161 // This is factored, since it is both called from a JRT_LEAF (deoptimization) and a JRT_ENTRY (uncommon_trap) 162 Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread* thread, int exec_mode) { 163 164 // Note: there is a safepoint safety issue here. No matter whether we enter 165 // via vanilla deopt or uncommon trap we MUST NOT stop at a safepoint once 166 // the vframeArray is created. 167 // 168 169 // Allocate our special deoptimization ResourceMark 170 DeoptResourceMark* dmark = new DeoptResourceMark(thread); 171 assert(thread->deopt_mark() == NULL, "Pending deopt!"); 172 thread->set_deopt_mark(dmark); 173 174 frame stub_frame = thread->last_frame(); // Makes stack walkable as side effect 175 RegisterMap map(thread, true); 176 RegisterMap dummy_map(thread, false); 177 // Now get the deoptee with a valid map 178 frame deoptee = stub_frame.sender(&map); 179 // Set the deoptee nmethod 180 assert(thread->deopt_compiled_method() == NULL, "Pending deopt!"); 181 CompiledMethod* cm = deoptee.cb()->as_compiled_method_or_null(); 182 thread->set_deopt_compiled_method(cm); 183 184 if (VerifyStack) { 185 thread->validate_frame_layout(); 186 } 187 188 // Create a growable array of VFrames where each VFrame represents an inlined 189 // Java frame. This storage is allocated with the usual system arena. 190 assert(deoptee.is_compiled_frame(), "Wrong frame type"); 191 GrowableArray<compiledVFrame*>* chunk = new GrowableArray<compiledVFrame*>(10); 192 vframe* vf = vframe::new_vframe(&deoptee, &map, thread); 193 while (!vf->is_top()) { 194 assert(vf->is_compiled_frame(), "Wrong frame type"); 195 chunk->push(compiledVFrame::cast(vf)); 196 vf = vf->sender(); 197 } 198 assert(vf->is_compiled_frame(), "Wrong frame type"); 199 chunk->push(compiledVFrame::cast(vf)); 200 201 bool realloc_failures = false; 202 203 #if COMPILER2_OR_JVMCI 204 // Reallocate the non-escaping objects and restore their fields. Then 205 // relock objects if synchronization on them was eliminated. 206 #if !INCLUDE_JVMCI 207 if (DoEscapeAnalysis || EliminateNestedLocks) { 208 if (EliminateAllocations) { 209 #endif // INCLUDE_JVMCI 210 assert (chunk->at(0)->scope() != NULL,"expect only compiled java frames"); 211 GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects(); 212 213 // The flag return_oop() indicates call sites which return oop 214 // in compiled code. Such sites include java method calls, 215 // runtime calls (for example, used to allocate new objects/arrays 216 // on slow code path) and any other calls generated in compiled code. 217 // It is not guaranteed that we can get such information here only 218 // by analyzing bytecode in deoptimized frames. This is why this flag 219 // is set during method compilation (see Compile::Process_OopMap_Node()). 220 // If the previous frame was popped or if we are dispatching an exception, 221 // we don't have an oop result. 222 bool save_oop_result = chunk->at(0)->scope()->return_oop() && !thread->popframe_forcing_deopt_reexecution() && (exec_mode == Unpack_deopt); 223 Handle return_value; 224 if (save_oop_result) { 225 // Reallocation may trigger GC. If deoptimization happened on return from 226 // call which returns oop we need to save it since it is not in oopmap. 227 oop result = deoptee.saved_oop_result(&map); 228 assert(oopDesc::is_oop_or_null(result), "must be oop"); 229 return_value = Handle(thread, result); 230 assert(Universe::heap()->is_in_or_null(result), "must be heap pointer"); 231 if (TraceDeoptimization) { 232 ttyLocker ttyl; 233 tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, p2i(result), p2i(thread)); 234 } 235 } 236 if (objects != NULL) { 237 JRT_BLOCK 238 realloc_failures = realloc_objects(thread, &deoptee, &map, objects, THREAD); 239 JRT_END 240 bool skip_internal = (cm != NULL) && !cm->is_compiled_by_jvmci(); 241 reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal); 242 #ifndef PRODUCT 243 if (TraceDeoptimization) { 244 ttyLocker ttyl; 245 tty->print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, p2i(thread)); 246 print_objects(objects, realloc_failures); 247 } 248 #endif 249 } 250 if (save_oop_result) { 251 // Restore result. 252 deoptee.set_saved_oop_result(&map, return_value()); 253 } 254 #if !INCLUDE_JVMCI 255 } 256 if (EliminateLocks) { 257 #endif // INCLUDE_JVMCI 258 #ifndef PRODUCT 259 bool first = true; 260 #endif 261 for (int i = 0; i < chunk->length(); i++) { 262 compiledVFrame* cvf = chunk->at(i); 263 assert (cvf->scope() != NULL,"expect only compiled java frames"); 264 GrowableArray<MonitorInfo*>* monitors = cvf->monitors(); 265 if (monitors->is_nonempty()) { 266 relock_objects(monitors, thread, realloc_failures); 267 #ifndef PRODUCT 268 if (PrintDeoptimizationDetails) { 269 ttyLocker ttyl; 270 for (int j = 0; j < monitors->length(); j++) { 271 MonitorInfo* mi = monitors->at(j); 272 if (mi->eliminated()) { 273 if (first) { 274 first = false; 275 tty->print_cr("RELOCK OBJECTS in thread " INTPTR_FORMAT, p2i(thread)); 276 } 277 if (mi->owner_is_scalar_replaced()) { 278 Klass* k = java_lang_Class::as_Klass(mi->owner_klass()); 279 tty->print_cr(" failed reallocation for klass %s", k->external_name()); 280 } else { 281 tty->print_cr(" object <" INTPTR_FORMAT "> locked", p2i(mi->owner())); 282 } 283 } 284 } 285 } 286 #endif // !PRODUCT 287 } 288 } 289 #if !INCLUDE_JVMCI 290 } 291 } 292 #endif // INCLUDE_JVMCI 293 #endif // COMPILER2_OR_JVMCI 294 295 ScopeDesc* trap_scope = chunk->at(0)->scope(); 296 Handle exceptionObject; 297 if (trap_scope->rethrow_exception()) { 298 if (PrintDeoptimizationDetails) { 299 tty->print_cr("Exception to be rethrown in the interpreter for method %s::%s at bci %d", trap_scope->method()->method_holder()->name()->as_C_string(), trap_scope->method()->name()->as_C_string(), trap_scope->bci()); 300 } 301 GrowableArray<ScopeValue*>* expressions = trap_scope->expressions(); 302 guarantee(expressions != NULL && expressions->length() > 0, "must have exception to throw"); 303 ScopeValue* topOfStack = expressions->top(); 304 exceptionObject = StackValue::create_stack_value(&deoptee, &map, topOfStack)->get_obj(); 305 guarantee(exceptionObject() != NULL, "exception oop can not be null"); 306 } 307 308 // Ensure that no safepoint is taken after pointers have been stored 309 // in fields of rematerialized objects. If a safepoint occurs from here on 310 // out the java state residing in the vframeArray will be missed. 311 NoSafepointVerifier no_safepoint; 312 313 vframeArray* array = create_vframeArray(thread, deoptee, &map, chunk, realloc_failures); 314 #if COMPILER2_OR_JVMCI 315 if (realloc_failures) { 316 pop_frames_failed_reallocs(thread, array); 317 } 318 #endif 319 320 assert(thread->vframe_array_head() == NULL, "Pending deopt!"); 321 thread->set_vframe_array_head(array); 322 323 // Now that the vframeArray has been created if we have any deferred local writes 324 // added by jvmti then we can free up that structure as the data is now in the 325 // vframeArray 326 327 if (thread->deferred_locals() != NULL) { 328 GrowableArray<jvmtiDeferredLocalVariableSet*>* list = thread->deferred_locals(); 329 int i = 0; 330 do { 331 // Because of inlining we could have multiple vframes for a single frame 332 // and several of the vframes could have deferred writes. Find them all. 333 if (list->at(i)->id() == array->original().id()) { 334 jvmtiDeferredLocalVariableSet* dlv = list->at(i); 335 list->remove_at(i); 336 // individual jvmtiDeferredLocalVariableSet are CHeapObj's 337 delete dlv; 338 } else { 339 i++; 340 } 341 } while ( i < list->length() ); 342 if (list->length() == 0) { 343 thread->set_deferred_locals(NULL); 344 // free the list and elements back to C heap. 345 delete list; 346 } 347 348 } 349 350 // Compute the caller frame based on the sender sp of stub_frame and stored frame sizes info. 351 CodeBlob* cb = stub_frame.cb(); 352 // Verify we have the right vframeArray 353 assert(cb->frame_size() >= 0, "Unexpected frame size"); 354 intptr_t* unpack_sp = stub_frame.sp() + cb->frame_size(); 355 356 // If the deopt call site is a MethodHandle invoke call site we have 357 // to adjust the unpack_sp. 358 nmethod* deoptee_nm = deoptee.cb()->as_nmethod_or_null(); 359 if (deoptee_nm != NULL && deoptee_nm->is_method_handle_return(deoptee.pc())) 360 unpack_sp = deoptee.unextended_sp(); 361 362 #ifdef ASSERT 363 assert(cb->is_deoptimization_stub() || 364 cb->is_uncommon_trap_stub() || 365 strcmp("Stub<DeoptimizationStub.deoptimizationHandler>", cb->name()) == 0 || 366 strcmp("Stub<UncommonTrapStub.uncommonTrapHandler>", cb->name()) == 0, 367 "unexpected code blob: %s", cb->name()); 368 #endif 369 370 // This is a guarantee instead of an assert because if vframe doesn't match 371 // we will unpack the wrong deoptimized frame and wind up in strange places 372 // where it will be very difficult to figure out what went wrong. Better 373 // to die an early death here than some very obscure death later when the 374 // trail is cold. 375 // Note: on ia64 this guarantee can be fooled by frames with no memory stack 376 // in that it will fail to detect a problem when there is one. This needs 377 // more work in tiger timeframe. 378 guarantee(array->unextended_sp() == unpack_sp, "vframe_array_head must contain the vframeArray to unpack"); 379 380 int number_of_frames = array->frames(); 381 382 // Compute the vframes' sizes. Note that frame_sizes[] entries are ordered from outermost to innermost 383 // virtual activation, which is the reverse of the elements in the vframes array. 384 intptr_t* frame_sizes = NEW_C_HEAP_ARRAY(intptr_t, number_of_frames, mtCompiler); 385 // +1 because we always have an interpreter return address for the final slot. 386 address* frame_pcs = NEW_C_HEAP_ARRAY(address, number_of_frames + 1, mtCompiler); 387 int popframe_extra_args = 0; 388 // Create an interpreter return address for the stub to use as its return 389 // address so the skeletal frames are perfectly walkable 390 frame_pcs[number_of_frames] = Interpreter::deopt_entry(vtos, 0); 391 392 // PopFrame requires that the preserved incoming arguments from the recently-popped topmost 393 // activation be put back on the expression stack of the caller for reexecution 394 if (JvmtiExport::can_pop_frame() && thread->popframe_forcing_deopt_reexecution()) { 395 popframe_extra_args = in_words(thread->popframe_preserved_args_size_in_words()); 396 } 397 398 // Find the current pc for sender of the deoptee. Since the sender may have been deoptimized 399 // itself since the deoptee vframeArray was created we must get a fresh value of the pc rather 400 // than simply use array->sender.pc(). This requires us to walk the current set of frames 401 // 402 frame deopt_sender = stub_frame.sender(&dummy_map); // First is the deoptee frame 403 deopt_sender = deopt_sender.sender(&dummy_map); // Now deoptee caller 404 405 // It's possible that the number of parameters at the call site is 406 // different than number of arguments in the callee when method 407 // handles are used. If the caller is interpreted get the real 408 // value so that the proper amount of space can be added to it's 409 // frame. 410 bool caller_was_method_handle = false; 411 if (deopt_sender.is_interpreted_frame()) { 412 methodHandle method = deopt_sender.interpreter_frame_method(); 413 Bytecode_invoke cur = Bytecode_invoke_check(method, deopt_sender.interpreter_frame_bci()); 414 if (cur.is_invokedynamic() || cur.is_invokehandle()) { 415 // Method handle invokes may involve fairly arbitrary chains of 416 // calls so it's impossible to know how much actual space the 417 // caller has for locals. 418 caller_was_method_handle = true; 419 } 420 } 421 422 // 423 // frame_sizes/frame_pcs[0] oldest frame (int or c2i) 424 // frame_sizes/frame_pcs[1] next oldest frame (int) 425 // frame_sizes/frame_pcs[n] youngest frame (int) 426 // 427 // Now a pc in frame_pcs is actually the return address to the frame's caller (a frame 428 // owns the space for the return address to it's caller). Confusing ain't it. 429 // 430 // The vframe array can address vframes with indices running from 431 // 0.._frames-1. Index 0 is the youngest frame and _frame - 1 is the oldest (root) frame. 432 // When we create the skeletal frames we need the oldest frame to be in the zero slot 433 // in the frame_sizes/frame_pcs so the assembly code can do a trivial walk. 434 // so things look a little strange in this loop. 435 // 436 int callee_parameters = 0; 437 int callee_locals = 0; 438 for (int index = 0; index < array->frames(); index++ ) { 439 // frame[number_of_frames - 1 ] = on_stack_size(youngest) 440 // frame[number_of_frames - 2 ] = on_stack_size(sender(youngest)) 441 // frame[number_of_frames - 3 ] = on_stack_size(sender(sender(youngest))) 442 frame_sizes[number_of_frames - 1 - index] = BytesPerWord * array->element(index)->on_stack_size(callee_parameters, 443 callee_locals, 444 index == 0, 445 popframe_extra_args); 446 // This pc doesn't have to be perfect just good enough to identify the frame 447 // as interpreted so the skeleton frame will be walkable 448 // The correct pc will be set when the skeleton frame is completely filled out 449 // The final pc we store in the loop is wrong and will be overwritten below 450 frame_pcs[number_of_frames - 1 - index ] = Interpreter::deopt_entry(vtos, 0) - frame::pc_return_offset; 451 452 callee_parameters = array->element(index)->method()->size_of_parameters(); 453 callee_locals = array->element(index)->method()->max_locals(); 454 popframe_extra_args = 0; 455 } 456 457 // Compute whether the root vframe returns a float or double value. 458 BasicType return_type; 459 { 460 methodHandle method(thread, array->element(0)->method()); 461 Bytecode_invoke invoke = Bytecode_invoke_check(method, array->element(0)->bci()); 462 return_type = invoke.is_valid() ? invoke.result_type() : T_ILLEGAL; 463 } 464 465 // Compute information for handling adapters and adjusting the frame size of the caller. 466 int caller_adjustment = 0; 467 468 // Compute the amount the oldest interpreter frame will have to adjust 469 // its caller's stack by. If the caller is a compiled frame then 470 // we pretend that the callee has no parameters so that the 471 // extension counts for the full amount of locals and not just 472 // locals-parms. This is because without a c2i adapter the parm 473 // area as created by the compiled frame will not be usable by 474 // the interpreter. (Depending on the calling convention there 475 // may not even be enough space). 476 477 // QQQ I'd rather see this pushed down into last_frame_adjust 478 // and have it take the sender (aka caller). 479 480 if (deopt_sender.is_compiled_frame() || caller_was_method_handle) { 481 caller_adjustment = last_frame_adjust(0, callee_locals); 482 } else if (callee_locals > callee_parameters) { 483 // The caller frame may need extending to accommodate 484 // non-parameter locals of the first unpacked interpreted frame. 485 // Compute that adjustment. 486 caller_adjustment = last_frame_adjust(callee_parameters, callee_locals); 487 } 488 489 // If the sender is deoptimized the we must retrieve the address of the handler 490 // since the frame will "magically" show the original pc before the deopt 491 // and we'd undo the deopt. 492 493 frame_pcs[0] = deopt_sender.raw_pc(); 494 495 assert(CodeCache::find_blob_unsafe(frame_pcs[0]) != NULL, "bad pc"); 496 497 #if INCLUDE_JVMCI 498 if (exceptionObject() != NULL) { 499 thread->set_exception_oop(exceptionObject()); 500 exec_mode = Unpack_exception; 501 } 502 #endif 503 504 if (thread->frames_to_pop_failed_realloc() > 0 && exec_mode != Unpack_uncommon_trap) { 505 assert(thread->has_pending_exception(), "should have thrown OOME"); 506 thread->set_exception_oop(thread->pending_exception()); 507 thread->clear_pending_exception(); 508 exec_mode = Unpack_exception; 509 } 510 511 #if INCLUDE_JVMCI 512 if (thread->frames_to_pop_failed_realloc() > 0) { 513 thread->set_pending_monitorenter(false); 514 } 515 #endif 516 517 UnrollBlock* info = new UnrollBlock(array->frame_size() * BytesPerWord, 518 caller_adjustment * BytesPerWord, 519 caller_was_method_handle ? 0 : callee_parameters, 520 number_of_frames, 521 frame_sizes, 522 frame_pcs, 523 return_type, 524 exec_mode); 525 // On some platforms, we need a way to pass some platform dependent 526 // information to the unpacking code so the skeletal frames come out 527 // correct (initial fp value, unextended sp, ...) 528 info->set_initial_info((intptr_t) array->sender().initial_deoptimization_info()); 529 530 if (array->frames() > 1) { 531 if (VerifyStack && TraceDeoptimization) { 532 ttyLocker ttyl; 533 tty->print_cr("Deoptimizing method containing inlining"); 534 } 535 } 536 537 array->set_unroll_block(info); 538 return info; 539 } 540 541 // Called to cleanup deoptimization data structures in normal case 542 // after unpacking to stack and when stack overflow error occurs 543 void Deoptimization::cleanup_deopt_info(JavaThread *thread, 544 vframeArray *array) { 545 546 // Get array if coming from exception 547 if (array == NULL) { 548 array = thread->vframe_array_head(); 549 } 550 thread->set_vframe_array_head(NULL); 551 552 // Free the previous UnrollBlock 553 vframeArray* old_array = thread->vframe_array_last(); 554 thread->set_vframe_array_last(array); 555 556 if (old_array != NULL) { 557 UnrollBlock* old_info = old_array->unroll_block(); 558 old_array->set_unroll_block(NULL); 559 delete old_info; 560 delete old_array; 561 } 562 563 // Deallocate any resource creating in this routine and any ResourceObjs allocated 564 // inside the vframeArray (StackValueCollections) 565 566 delete thread->deopt_mark(); 567 thread->set_deopt_mark(NULL); 568 thread->set_deopt_compiled_method(NULL); 569 570 571 if (JvmtiExport::can_pop_frame()) { 572 #ifndef CC_INTERP 573 // Regardless of whether we entered this routine with the pending 574 // popframe condition bit set, we should always clear it now 575 thread->clear_popframe_condition(); 576 #else 577 // C++ interpreter will clear has_pending_popframe when it enters 578 // with method_resume. For deopt_resume2 we clear it now. 579 if (thread->popframe_forcing_deopt_reexecution()) 580 thread->clear_popframe_condition(); 581 #endif /* CC_INTERP */ 582 } 583 584 // unpack_frames() is called at the end of the deoptimization handler 585 // and (in C2) at the end of the uncommon trap handler. Note this fact 586 // so that an asynchronous stack walker can work again. This counter is 587 // incremented at the beginning of fetch_unroll_info() and (in C2) at 588 // the beginning of uncommon_trap(). 589 thread->dec_in_deopt_handler(); 590 } 591 592 // Moved from cpu directories because none of the cpus has callee save values. 593 // If a cpu implements callee save values, move this to deoptimization_<cpu>.cpp. 594 void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) { 595 596 // This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in 597 // the days we had adapter frames. When we deoptimize a situation where a 598 // compiled caller calls a compiled caller will have registers it expects 599 // to survive the call to the callee. If we deoptimize the callee the only 600 // way we can restore these registers is to have the oldest interpreter 601 // frame that we create restore these values. That is what this routine 602 // will accomplish. 603 604 // At the moment we have modified c2 to not have any callee save registers 605 // so this problem does not exist and this routine is just a place holder. 606 607 assert(f->is_interpreted_frame(), "must be interpreted"); 608 } 609 610 // Return BasicType of value being returned 611 JRT_LEAF(BasicType, Deoptimization::unpack_frames(JavaThread* thread, int exec_mode)) 612 613 // We are already active in the special DeoptResourceMark any ResourceObj's we 614 // allocate will be freed at the end of the routine. 615 616 // It is actually ok to allocate handles in a leaf method. It causes no safepoints, 617 // but makes the entry a little slower. There is however a little dance we have to 618 // do in debug mode to get around the NoHandleMark code in the JRT_LEAF macro 619 ResetNoHandleMark rnhm; // No-op in release/product versions 620 HandleMark hm; 621 622 frame stub_frame = thread->last_frame(); 623 624 // Since the frame to unpack is the top frame of this thread, the vframe_array_head 625 // must point to the vframeArray for the unpack frame. 626 vframeArray* array = thread->vframe_array_head(); 627 628 #ifndef PRODUCT 629 if (TraceDeoptimization) { 630 ttyLocker ttyl; 631 tty->print_cr("DEOPT UNPACKING thread " INTPTR_FORMAT " vframeArray " INTPTR_FORMAT " mode %d", 632 p2i(thread), p2i(array), exec_mode); 633 } 634 #endif 635 Events::log_deopt_message(thread, "DEOPT UNPACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT " mode %d", 636 p2i(stub_frame.pc()), p2i(stub_frame.sp()), exec_mode); 637 638 UnrollBlock* info = array->unroll_block(); 639 640 // Unpack the interpreter frames and any adapter frame (c2 only) we might create. 641 array->unpack_to_stack(stub_frame, exec_mode, info->caller_actual_parameters()); 642 643 BasicType bt = info->return_type(); 644 645 // If we have an exception pending, claim that the return type is an oop 646 // so the deopt_blob does not overwrite the exception_oop. 647 648 if (exec_mode == Unpack_exception) 649 bt = T_OBJECT; 650 651 // Cleanup thread deopt data 652 cleanup_deopt_info(thread, array); 653 654 #ifndef PRODUCT 655 if (VerifyStack) { 656 ResourceMark res_mark; 657 // Clear pending exception to not break verification code (restored afterwards) 658 PRESERVE_EXCEPTION_MARK; 659 660 thread->validate_frame_layout(); 661 662 // Verify that the just-unpacked frames match the interpreter's 663 // notions of expression stack and locals 664 vframeArray* cur_array = thread->vframe_array_last(); 665 RegisterMap rm(thread, false); 666 rm.set_include_argument_oops(false); 667 bool is_top_frame = true; 668 int callee_size_of_parameters = 0; 669 int callee_max_locals = 0; 670 for (int i = 0; i < cur_array->frames(); i++) { 671 vframeArrayElement* el = cur_array->element(i); 672 frame* iframe = el->iframe(); 673 guarantee(iframe->is_interpreted_frame(), "Wrong frame type"); 674 675 // Get the oop map for this bci 676 InterpreterOopMap mask; 677 int cur_invoke_parameter_size = 0; 678 bool try_next_mask = false; 679 int next_mask_expression_stack_size = -1; 680 int top_frame_expression_stack_adjustment = 0; 681 methodHandle mh(thread, iframe->interpreter_frame_method()); 682 OopMapCache::compute_one_oop_map(mh, iframe->interpreter_frame_bci(), &mask); 683 BytecodeStream str(mh, iframe->interpreter_frame_bci()); 684 int max_bci = mh->code_size(); 685 // Get to the next bytecode if possible 686 assert(str.bci() < max_bci, "bci in interpreter frame out of bounds"); 687 // Check to see if we can grab the number of outgoing arguments 688 // at an uncommon trap for an invoke (where the compiler 689 // generates debug info before the invoke has executed) 690 Bytecodes::Code cur_code = str.next(); 691 if (Bytecodes::is_invoke(cur_code)) { 692 Bytecode_invoke invoke(mh, iframe->interpreter_frame_bci()); 693 cur_invoke_parameter_size = invoke.size_of_parameters(); 694 if (i != 0 && !invoke.is_invokedynamic() && MethodHandles::has_member_arg(invoke.klass(), invoke.name())) { 695 callee_size_of_parameters++; 696 } 697 } 698 if (str.bci() < max_bci) { 699 Bytecodes::Code next_code = str.next(); 700 if (next_code >= 0) { 701 // The interpreter oop map generator reports results before 702 // the current bytecode has executed except in the case of 703 // calls. It seems to be hard to tell whether the compiler 704 // has emitted debug information matching the "state before" 705 // a given bytecode or the state after, so we try both 706 if (!Bytecodes::is_invoke(cur_code) && cur_code != Bytecodes::_athrow) { 707 // Get expression stack size for the next bytecode 708 InterpreterOopMap next_mask; 709 OopMapCache::compute_one_oop_map(mh, str.bci(), &next_mask); 710 next_mask_expression_stack_size = next_mask.expression_stack_size(); 711 if (Bytecodes::is_invoke(next_code)) { 712 Bytecode_invoke invoke(mh, str.bci()); 713 next_mask_expression_stack_size += invoke.size_of_parameters(); 714 } 715 // Need to subtract off the size of the result type of 716 // the bytecode because this is not described in the 717 // debug info but returned to the interpreter in the TOS 718 // caching register 719 BasicType bytecode_result_type = Bytecodes::result_type(cur_code); 720 if (bytecode_result_type != T_ILLEGAL) { 721 top_frame_expression_stack_adjustment = type2size[bytecode_result_type]; 722 } 723 assert(top_frame_expression_stack_adjustment >= 0, "stack adjustment must be positive"); 724 try_next_mask = true; 725 } 726 } 727 } 728 729 // Verify stack depth and oops in frame 730 // This assertion may be dependent on the platform we're running on and may need modification (tested on x86 and sparc) 731 if (!( 732 /* SPARC */ 733 (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + callee_size_of_parameters) || 734 /* x86 */ 735 (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + callee_max_locals) || 736 (try_next_mask && 737 (iframe->interpreter_frame_expression_stack_size() == (next_mask_expression_stack_size - 738 top_frame_expression_stack_adjustment))) || 739 (is_top_frame && (exec_mode == Unpack_exception) && iframe->interpreter_frame_expression_stack_size() == 0) || 740 (is_top_frame && (exec_mode == Unpack_uncommon_trap || exec_mode == Unpack_reexecute || el->should_reexecute()) && 741 (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + cur_invoke_parameter_size)) 742 )) { 743 { 744 ttyLocker ttyl; 745 746 // Print out some information that will help us debug the problem 747 tty->print_cr("Wrong number of expression stack elements during deoptimization"); 748 tty->print_cr(" Error occurred while verifying frame %d (0..%d, 0 is topmost)", i, cur_array->frames() - 1); 749 tty->print_cr(" Fabricated interpreter frame had %d expression stack elements", 750 iframe->interpreter_frame_expression_stack_size()); 751 tty->print_cr(" Interpreter oop map had %d expression stack elements", mask.expression_stack_size()); 752 tty->print_cr(" try_next_mask = %d", try_next_mask); 753 tty->print_cr(" next_mask_expression_stack_size = %d", next_mask_expression_stack_size); 754 tty->print_cr(" callee_size_of_parameters = %d", callee_size_of_parameters); 755 tty->print_cr(" callee_max_locals = %d", callee_max_locals); 756 tty->print_cr(" top_frame_expression_stack_adjustment = %d", top_frame_expression_stack_adjustment); 757 tty->print_cr(" exec_mode = %d", exec_mode); 758 tty->print_cr(" cur_invoke_parameter_size = %d", cur_invoke_parameter_size); 759 tty->print_cr(" Thread = " INTPTR_FORMAT ", thread ID = %d", p2i(thread), thread->osthread()->thread_id()); 760 tty->print_cr(" Interpreted frames:"); 761 for (int k = 0; k < cur_array->frames(); k++) { 762 vframeArrayElement* el = cur_array->element(k); 763 tty->print_cr(" %s (bci %d)", el->method()->name_and_sig_as_C_string(), el->bci()); 764 } 765 cur_array->print_on_2(tty); 766 } // release tty lock before calling guarantee 767 guarantee(false, "wrong number of expression stack elements during deopt"); 768 } 769 VerifyOopClosure verify; 770 iframe->oops_interpreted_do(&verify, &rm, false); 771 callee_size_of_parameters = mh->size_of_parameters(); 772 callee_max_locals = mh->max_locals(); 773 is_top_frame = false; 774 } 775 } 776 #endif /* !PRODUCT */ 777 778 779 return bt; 780 JRT_END 781 782 class DeoptimizeMarkedTC : public ThreadClosure { 783 bool _in_handshake; 784 public: 785 DeoptimizeMarkedTC(bool in_handshake) : _in_handshake(in_handshake) {} 786 virtual void do_thread(Thread* thread) { 787 assert(thread->is_Java_thread(), "must be"); 788 JavaThread* jt = (JavaThread*)thread; 789 jt->deoptimize_marked_methods(_in_handshake); 790 } 791 }; 792 793 void Deoptimization::deoptimize_all_marked() { 794 ResourceMark rm; 795 DeoptimizationMarker dm; 796 797 if (SafepointSynchronize::is_at_safepoint()) { 798 DeoptimizeMarkedTC deopt(false); 799 // Make the dependent methods not entrant 800 CodeCache::make_marked_nmethods_not_entrant(); 801 Threads::java_threads_do(&deopt); 802 } else { 803 // Make the dependent methods not entrant 804 { 805 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 806 CodeCache::make_marked_nmethods_not_entrant(); 807 } 808 DeoptimizeMarkedTC deopt(true); 809 Handshake::execute(&deopt); 810 } 811 } 812 813 Deoptimization::DeoptAction Deoptimization::_unloaded_action 814 = Deoptimization::Action_reinterpret; 815 816 817 818 #if INCLUDE_JVMCI || INCLUDE_AOT 819 template<typename CacheType> 820 class BoxCacheBase : public CHeapObj<mtCompiler> { 821 protected: 822 static InstanceKlass* find_cache_klass(Symbol* klass_name, TRAPS) { 823 ResourceMark rm; 824 char* klass_name_str = klass_name->as_C_string(); 825 Klass* k = SystemDictionary::find(klass_name, Handle(), Handle(), THREAD); 826 guarantee(k != NULL, "%s must be loaded", klass_name_str); 827 InstanceKlass* ik = InstanceKlass::cast(k); 828 guarantee(ik->is_initialized(), "%s must be initialized", klass_name_str); 829 CacheType::compute_offsets(ik); 830 return ik; 831 } 832 }; 833 834 template<typename PrimitiveType, typename CacheType, typename BoxType> class BoxCache : public BoxCacheBase<CacheType> { 835 PrimitiveType _low; 836 PrimitiveType _high; 837 jobject _cache; 838 protected: 839 static BoxCache<PrimitiveType, CacheType, BoxType> *_singleton; 840 BoxCache(Thread* thread) { 841 InstanceKlass* ik = BoxCacheBase<CacheType>::find_cache_klass(CacheType::symbol(), thread); 842 objArrayOop cache = CacheType::cache(ik); 843 assert(cache->length() > 0, "Empty cache"); 844 _low = BoxType::value(cache->obj_at(0)); 845 _high = _low + cache->length() - 1; 846 _cache = JNIHandles::make_global(Handle(thread, cache)); 847 } 848 ~BoxCache() { 849 JNIHandles::destroy_global(_cache); 850 } 851 public: 852 static BoxCache<PrimitiveType, CacheType, BoxType>* singleton(Thread* thread) { 853 if (_singleton == NULL) { 854 BoxCache<PrimitiveType, CacheType, BoxType>* s = new BoxCache<PrimitiveType, CacheType, BoxType>(thread); 855 if (!Atomic::replace_if_null(s, &_singleton)) { 856 delete s; 857 } 858 } 859 return _singleton; 860 } 861 oop lookup(PrimitiveType value) { 862 if (_low <= value && value <= _high) { 863 int offset = value - _low; 864 return objArrayOop(JNIHandles::resolve_non_null(_cache))->obj_at(offset); 865 } 866 return NULL; 867 } 868 oop lookup_raw(intptr_t raw_value) { 869 // Have to cast to avoid little/big-endian problems. 870 if (sizeof(PrimitiveType) > sizeof(jint)) { 871 jlong value = (jlong)raw_value; 872 return lookup(value); 873 } 874 PrimitiveType value = (PrimitiveType)*((jint*)&raw_value); 875 return lookup(value); 876 } 877 }; 878 879 typedef BoxCache<jint, java_lang_Integer_IntegerCache, java_lang_Integer> IntegerBoxCache; 880 typedef BoxCache<jlong, java_lang_Long_LongCache, java_lang_Long> LongBoxCache; 881 typedef BoxCache<jchar, java_lang_Character_CharacterCache, java_lang_Character> CharacterBoxCache; 882 typedef BoxCache<jshort, java_lang_Short_ShortCache, java_lang_Short> ShortBoxCache; 883 typedef BoxCache<jbyte, java_lang_Byte_ByteCache, java_lang_Byte> ByteBoxCache; 884 885 template<> BoxCache<jint, java_lang_Integer_IntegerCache, java_lang_Integer>* BoxCache<jint, java_lang_Integer_IntegerCache, java_lang_Integer>::_singleton = NULL; 886 template<> BoxCache<jlong, java_lang_Long_LongCache, java_lang_Long>* BoxCache<jlong, java_lang_Long_LongCache, java_lang_Long>::_singleton = NULL; 887 template<> BoxCache<jchar, java_lang_Character_CharacterCache, java_lang_Character>* BoxCache<jchar, java_lang_Character_CharacterCache, java_lang_Character>::_singleton = NULL; 888 template<> BoxCache<jshort, java_lang_Short_ShortCache, java_lang_Short>* BoxCache<jshort, java_lang_Short_ShortCache, java_lang_Short>::_singleton = NULL; 889 template<> BoxCache<jbyte, java_lang_Byte_ByteCache, java_lang_Byte>* BoxCache<jbyte, java_lang_Byte_ByteCache, java_lang_Byte>::_singleton = NULL; 890 891 class BooleanBoxCache : public BoxCacheBase<java_lang_Boolean> { 892 jobject _true_cache; 893 jobject _false_cache; 894 protected: 895 static BooleanBoxCache *_singleton; 896 BooleanBoxCache(Thread *thread) { 897 InstanceKlass* ik = find_cache_klass(java_lang_Boolean::symbol(), thread); 898 _true_cache = JNIHandles::make_global(Handle(thread, java_lang_Boolean::get_TRUE(ik))); 899 _false_cache = JNIHandles::make_global(Handle(thread, java_lang_Boolean::get_FALSE(ik))); 900 } 901 ~BooleanBoxCache() { 902 JNIHandles::destroy_global(_true_cache); 903 JNIHandles::destroy_global(_false_cache); 904 } 905 public: 906 static BooleanBoxCache* singleton(Thread* thread) { 907 if (_singleton == NULL) { 908 BooleanBoxCache* s = new BooleanBoxCache(thread); 909 if (!Atomic::replace_if_null(s, &_singleton)) { 910 delete s; 911 } 912 } 913 return _singleton; 914 } 915 oop lookup_raw(intptr_t raw_value) { 916 // Have to cast to avoid little/big-endian problems. 917 jboolean value = (jboolean)*((jint*)&raw_value); 918 return lookup(value); 919 } 920 oop lookup(jboolean value) { 921 if (value != 0) { 922 return JNIHandles::resolve_non_null(_true_cache); 923 } 924 return JNIHandles::resolve_non_null(_false_cache); 925 } 926 }; 927 928 BooleanBoxCache* BooleanBoxCache::_singleton = NULL; 929 930 oop Deoptimization::get_cached_box(AutoBoxObjectValue* bv, frame* fr, RegisterMap* reg_map, TRAPS) { 931 Klass* k = java_lang_Class::as_Klass(bv->klass()->as_ConstantOopReadValue()->value()()); 932 BasicType box_type = SystemDictionary::box_klass_type(k); 933 if (box_type != T_OBJECT) { 934 StackValue* value = StackValue::create_stack_value(fr, reg_map, bv->field_at(box_type == T_LONG ? 1 : 0)); 935 switch(box_type) { 936 case T_INT: return IntegerBoxCache::singleton(THREAD)->lookup_raw(value->get_int()); 937 case T_CHAR: return CharacterBoxCache::singleton(THREAD)->lookup_raw(value->get_int()); 938 case T_SHORT: return ShortBoxCache::singleton(THREAD)->lookup_raw(value->get_int()); 939 case T_BYTE: return ByteBoxCache::singleton(THREAD)->lookup_raw(value->get_int()); 940 case T_BOOLEAN: return BooleanBoxCache::singleton(THREAD)->lookup_raw(value->get_int()); 941 case T_LONG: return LongBoxCache::singleton(THREAD)->lookup_raw(value->get_int()); 942 default:; 943 } 944 } 945 return NULL; 946 } 947 #endif // INCLUDE_JVMCI || INCLUDE_AOT 948 949 #if COMPILER2_OR_JVMCI 950 bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, TRAPS) { 951 Handle pending_exception(THREAD, thread->pending_exception()); 952 const char* exception_file = thread->exception_file(); 953 int exception_line = thread->exception_line(); 954 thread->clear_pending_exception(); 955 956 bool failures = false; 957 958 for (int i = 0; i < objects->length(); i++) { 959 assert(objects->at(i)->is_object(), "invalid debug information"); 960 ObjectValue* sv = (ObjectValue*) objects->at(i); 961 962 Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()()); 963 oop obj = NULL; 964 965 if (k->is_instance_klass()) { 966 #if INCLUDE_JVMCI || INCLUDE_AOT 967 CompiledMethod* cm = fr->cb()->as_compiled_method_or_null(); 968 if (cm->is_compiled_by_jvmci() && sv->is_auto_box()) { 969 AutoBoxObjectValue* abv = (AutoBoxObjectValue*) sv; 970 obj = get_cached_box(abv, fr, reg_map, THREAD); 971 if (obj != NULL) { 972 // Set the flag to indicate the box came from a cache, so that we can skip the field reassignment for it. 973 abv->set_cached(true); 974 } 975 } 976 #endif // INCLUDE_JVMCI || INCLUDE_AOT 977 InstanceKlass* ik = InstanceKlass::cast(k); 978 if (obj == NULL) { 979 obj = ik->allocate_instance(THREAD); 980 } 981 } else if (k->is_typeArray_klass()) { 982 TypeArrayKlass* ak = TypeArrayKlass::cast(k); 983 assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length"); 984 int len = sv->field_size() / type2size[ak->element_type()]; 985 obj = ak->allocate(len, THREAD); 986 } else if (k->is_objArray_klass()) { 987 ObjArrayKlass* ak = ObjArrayKlass::cast(k); 988 obj = ak->allocate(sv->field_size(), THREAD); 989 } 990 991 if (obj == NULL) { 992 failures = true; 993 } 994 995 assert(sv->value().is_null(), "redundant reallocation"); 996 assert(obj != NULL || HAS_PENDING_EXCEPTION, "allocation should succeed or we should get an exception"); 997 CLEAR_PENDING_EXCEPTION; 998 sv->set_value(obj); 999 } 1000 1001 if (failures) { 1002 THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures); 1003 } else if (pending_exception.not_null()) { 1004 thread->set_pending_exception(pending_exception(), exception_file, exception_line); 1005 } 1006 1007 return failures; 1008 } 1009 1010 // restore elements of an eliminated type array 1011 void Deoptimization::reassign_type_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, typeArrayOop obj, BasicType type) { 1012 int index = 0; 1013 intptr_t val; 1014 1015 for (int i = 0; i < sv->field_size(); i++) { 1016 StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i)); 1017 switch(type) { 1018 case T_LONG: case T_DOUBLE: { 1019 assert(value->type() == T_INT, "Agreement."); 1020 StackValue* low = 1021 StackValue::create_stack_value(fr, reg_map, sv->field_at(++i)); 1022 #ifdef _LP64 1023 jlong res = (jlong)low->get_int(); 1024 #else 1025 #ifdef SPARC 1026 // For SPARC we have to swap high and low words. 1027 jlong res = jlong_from((jint)low->get_int(), (jint)value->get_int()); 1028 #else 1029 jlong res = jlong_from((jint)value->get_int(), (jint)low->get_int()); 1030 #endif //SPARC 1031 #endif 1032 obj->long_at_put(index, res); 1033 break; 1034 } 1035 1036 // Have to cast to INT (32 bits) pointer to avoid little/big-endian problem. 1037 case T_INT: case T_FLOAT: { // 4 bytes. 1038 assert(value->type() == T_INT, "Agreement."); 1039 bool big_value = false; 1040 if (i + 1 < sv->field_size() && type == T_INT) { 1041 if (sv->field_at(i)->is_location()) { 1042 Location::Type type = ((LocationValue*) sv->field_at(i))->location().type(); 1043 if (type == Location::dbl || type == Location::lng) { 1044 big_value = true; 1045 } 1046 } else if (sv->field_at(i)->is_constant_int()) { 1047 ScopeValue* next_scope_field = sv->field_at(i + 1); 1048 if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) { 1049 big_value = true; 1050 } 1051 } 1052 } 1053 1054 if (big_value) { 1055 StackValue* low = StackValue::create_stack_value(fr, reg_map, sv->field_at(++i)); 1056 #ifdef _LP64 1057 jlong res = (jlong)low->get_int(); 1058 #else 1059 #ifdef SPARC 1060 // For SPARC we have to swap high and low words. 1061 jlong res = jlong_from((jint)low->get_int(), (jint)value->get_int()); 1062 #else 1063 jlong res = jlong_from((jint)value->get_int(), (jint)low->get_int()); 1064 #endif //SPARC 1065 #endif 1066 obj->int_at_put(index, (jint)*((jint*)&res)); 1067 obj->int_at_put(++index, (jint)*(((jint*)&res) + 1)); 1068 } else { 1069 val = value->get_int(); 1070 obj->int_at_put(index, (jint)*((jint*)&val)); 1071 } 1072 break; 1073 } 1074 1075 case T_SHORT: 1076 assert(value->type() == T_INT, "Agreement."); 1077 val = value->get_int(); 1078 obj->short_at_put(index, (jshort)*((jint*)&val)); 1079 break; 1080 1081 case T_CHAR: 1082 assert(value->type() == T_INT, "Agreement."); 1083 val = value->get_int(); 1084 obj->char_at_put(index, (jchar)*((jint*)&val)); 1085 break; 1086 1087 case T_BYTE: 1088 assert(value->type() == T_INT, "Agreement."); 1089 val = value->get_int(); 1090 obj->byte_at_put(index, (jbyte)*((jint*)&val)); 1091 break; 1092 1093 case T_BOOLEAN: 1094 assert(value->type() == T_INT, "Agreement."); 1095 val = value->get_int(); 1096 obj->bool_at_put(index, (jboolean)*((jint*)&val)); 1097 break; 1098 1099 default: 1100 ShouldNotReachHere(); 1101 } 1102 index++; 1103 } 1104 } 1105 1106 1107 // restore fields of an eliminated object array 1108 void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) { 1109 for (int i = 0; i < sv->field_size(); i++) { 1110 StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i)); 1111 assert(value->type() == T_OBJECT, "object element expected"); 1112 obj->obj_at_put(i, value->get_obj()()); 1113 } 1114 } 1115 1116 class ReassignedField { 1117 public: 1118 int _offset; 1119 BasicType _type; 1120 public: 1121 ReassignedField() { 1122 _offset = 0; 1123 _type = T_ILLEGAL; 1124 } 1125 }; 1126 1127 int compare(ReassignedField* left, ReassignedField* right) { 1128 return left->_offset - right->_offset; 1129 } 1130 1131 // Restore fields of an eliminated instance object using the same field order 1132 // returned by HotSpotResolvedObjectTypeImpl.getInstanceFields(true) 1133 static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool skip_internal) { 1134 if (klass->superklass() != NULL) { 1135 svIndex = reassign_fields_by_klass(klass->superklass(), fr, reg_map, sv, svIndex, obj, skip_internal); 1136 } 1137 1138 GrowableArray<ReassignedField>* fields = new GrowableArray<ReassignedField>(); 1139 for (AllFieldStream fs(klass); !fs.done(); fs.next()) { 1140 if (!fs.access_flags().is_static() && (!skip_internal || !fs.access_flags().is_internal())) { 1141 ReassignedField field; 1142 field._offset = fs.offset(); 1143 field._type = FieldType::basic_type(fs.signature()); 1144 fields->append(field); 1145 } 1146 } 1147 fields->sort(compare); 1148 for (int i = 0; i < fields->length(); i++) { 1149 intptr_t val; 1150 ScopeValue* scope_field = sv->field_at(svIndex); 1151 StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field); 1152 int offset = fields->at(i)._offset; 1153 BasicType type = fields->at(i)._type; 1154 switch (type) { 1155 case T_OBJECT: case T_ARRAY: 1156 assert(value->type() == T_OBJECT, "Agreement."); 1157 obj->obj_field_put(offset, value->get_obj()()); 1158 break; 1159 1160 // Have to cast to INT (32 bits) pointer to avoid little/big-endian problem. 1161 case T_INT: case T_FLOAT: { // 4 bytes. 1162 assert(value->type() == T_INT, "Agreement."); 1163 bool big_value = false; 1164 if (i+1 < fields->length() && fields->at(i+1)._type == T_INT) { 1165 if (scope_field->is_location()) { 1166 Location::Type type = ((LocationValue*) scope_field)->location().type(); 1167 if (type == Location::dbl || type == Location::lng) { 1168 big_value = true; 1169 } 1170 } 1171 if (scope_field->is_constant_int()) { 1172 ScopeValue* next_scope_field = sv->field_at(svIndex + 1); 1173 if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) { 1174 big_value = true; 1175 } 1176 } 1177 } 1178 1179 if (big_value) { 1180 i++; 1181 assert(i < fields->length(), "second T_INT field needed"); 1182 assert(fields->at(i)._type == T_INT, "T_INT field needed"); 1183 } else { 1184 val = value->get_int(); 1185 obj->int_field_put(offset, (jint)*((jint*)&val)); 1186 break; 1187 } 1188 } 1189 /* no break */ 1190 1191 case T_LONG: case T_DOUBLE: { 1192 assert(value->type() == T_INT, "Agreement."); 1193 StackValue* low = StackValue::create_stack_value(fr, reg_map, sv->field_at(++svIndex)); 1194 #ifdef _LP64 1195 jlong res = (jlong)low->get_int(); 1196 #else 1197 #ifdef SPARC 1198 // For SPARC we have to swap high and low words. 1199 jlong res = jlong_from((jint)low->get_int(), (jint)value->get_int()); 1200 #else 1201 jlong res = jlong_from((jint)value->get_int(), (jint)low->get_int()); 1202 #endif //SPARC 1203 #endif 1204 obj->long_field_put(offset, res); 1205 break; 1206 } 1207 1208 case T_SHORT: 1209 assert(value->type() == T_INT, "Agreement."); 1210 val = value->get_int(); 1211 obj->short_field_put(offset, (jshort)*((jint*)&val)); 1212 break; 1213 1214 case T_CHAR: 1215 assert(value->type() == T_INT, "Agreement."); 1216 val = value->get_int(); 1217 obj->char_field_put(offset, (jchar)*((jint*)&val)); 1218 break; 1219 1220 case T_BYTE: 1221 assert(value->type() == T_INT, "Agreement."); 1222 val = value->get_int(); 1223 obj->byte_field_put(offset, (jbyte)*((jint*)&val)); 1224 break; 1225 1226 case T_BOOLEAN: 1227 assert(value->type() == T_INT, "Agreement."); 1228 val = value->get_int(); 1229 obj->bool_field_put(offset, (jboolean)*((jint*)&val)); 1230 break; 1231 1232 default: 1233 ShouldNotReachHere(); 1234 } 1235 svIndex++; 1236 } 1237 return svIndex; 1238 } 1239 1240 // restore fields of all eliminated objects and arrays 1241 void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures, bool skip_internal) { 1242 for (int i = 0; i < objects->length(); i++) { 1243 ObjectValue* sv = (ObjectValue*) objects->at(i); 1244 Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()()); 1245 Handle obj = sv->value(); 1246 assert(obj.not_null() || realloc_failures, "reallocation was missed"); 1247 if (PrintDeoptimizationDetails) { 1248 tty->print_cr("reassign fields for object of type %s!", k->name()->as_C_string()); 1249 } 1250 if (obj.is_null()) { 1251 continue; 1252 } 1253 #if INCLUDE_JVMCI || INCLUDE_AOT 1254 // Don't reassign fields of boxes that came from a cache. Caches may be in CDS. 1255 if (sv->is_auto_box() && ((AutoBoxObjectValue*) sv)->is_cached()) { 1256 continue; 1257 } 1258 #endif // INCLUDE_JVMCI || INCLUDE_AOT 1259 if (k->is_instance_klass()) { 1260 InstanceKlass* ik = InstanceKlass::cast(k); 1261 reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), skip_internal); 1262 } else if (k->is_typeArray_klass()) { 1263 TypeArrayKlass* ak = TypeArrayKlass::cast(k); 1264 reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type()); 1265 } else if (k->is_objArray_klass()) { 1266 reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj()); 1267 } 1268 } 1269 } 1270 1271 1272 // relock objects for which synchronization was eliminated 1273 void Deoptimization::relock_objects(GrowableArray<MonitorInfo*>* monitors, JavaThread* thread, bool realloc_failures) { 1274 for (int i = 0; i < monitors->length(); i++) { 1275 MonitorInfo* mon_info = monitors->at(i); 1276 if (mon_info->eliminated()) { 1277 assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed"); 1278 if (!mon_info->owner_is_scalar_replaced()) { 1279 Handle obj(thread, mon_info->owner()); 1280 markWord mark = obj->mark(); 1281 if (UseBiasedLocking && mark.has_bias_pattern()) { 1282 // New allocated objects may have the mark set to anonymously biased. 1283 // Also the deoptimized method may called methods with synchronization 1284 // where the thread-local object is bias locked to the current thread. 1285 assert(mark.is_biased_anonymously() || 1286 mark.biased_locker() == thread, "should be locked to current thread"); 1287 // Reset mark word to unbiased prototype. 1288 markWord unbiased_prototype = markWord::prototype().set_age(mark.age()); 1289 obj->set_mark(unbiased_prototype); 1290 } 1291 BasicLock* lock = mon_info->lock(); 1292 ObjectSynchronizer::enter(obj, lock, thread); 1293 assert(mon_info->owner()->is_locked(), "object must be locked now"); 1294 } 1295 } 1296 } 1297 } 1298 1299 1300 #ifndef PRODUCT 1301 // print information about reallocated objects 1302 void Deoptimization::print_objects(GrowableArray<ScopeValue*>* objects, bool realloc_failures) { 1303 fieldDescriptor fd; 1304 1305 for (int i = 0; i < objects->length(); i++) { 1306 ObjectValue* sv = (ObjectValue*) objects->at(i); 1307 Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()()); 1308 Handle obj = sv->value(); 1309 1310 tty->print(" object <" INTPTR_FORMAT "> of type ", p2i(sv->value()())); 1311 k->print_value(); 1312 assert(obj.not_null() || realloc_failures, "reallocation was missed"); 1313 if (obj.is_null()) { 1314 tty->print(" allocation failed"); 1315 } else { 1316 tty->print(" allocated (%d bytes)", obj->size() * HeapWordSize); 1317 } 1318 tty->cr(); 1319 1320 if (Verbose && !obj.is_null()) { 1321 k->oop_print_on(obj(), tty); 1322 } 1323 } 1324 } 1325 #endif 1326 #endif // COMPILER2_OR_JVMCI 1327 1328 vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures) { 1329 Events::log_deopt_message(thread, "DEOPT PACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT, p2i(fr.pc()), p2i(fr.sp())); 1330 1331 #ifndef PRODUCT 1332 if (PrintDeoptimizationDetails) { 1333 ttyLocker ttyl; 1334 tty->print("DEOPT PACKING thread " INTPTR_FORMAT " ", p2i(thread)); 1335 fr.print_on(tty); 1336 tty->print_cr(" Virtual frames (innermost first):"); 1337 for (int index = 0; index < chunk->length(); index++) { 1338 compiledVFrame* vf = chunk->at(index); 1339 tty->print(" %2d - ", index); 1340 vf->print_value(); 1341 int bci = chunk->at(index)->raw_bci(); 1342 const char* code_name; 1343 if (bci == SynchronizationEntryBCI) { 1344 code_name = "sync entry"; 1345 } else { 1346 Bytecodes::Code code = vf->method()->code_at(bci); 1347 code_name = Bytecodes::name(code); 1348 } 1349 tty->print(" - %s", code_name); 1350 tty->print_cr(" @ bci %d ", bci); 1351 if (Verbose) { 1352 vf->print(); 1353 tty->cr(); 1354 } 1355 } 1356 } 1357 #endif 1358 1359 // Register map for next frame (used for stack crawl). We capture 1360 // the state of the deopt'ing frame's caller. Thus if we need to 1361 // stuff a C2I adapter we can properly fill in the callee-save 1362 // register locations. 1363 frame caller = fr.sender(reg_map); 1364 int frame_size = caller.sp() - fr.sp(); 1365 1366 frame sender = caller; 1367 1368 // Since the Java thread being deoptimized will eventually adjust it's own stack, 1369 // the vframeArray containing the unpacking information is allocated in the C heap. 1370 // For Compiler1, the caller of the deoptimized frame is saved for use by unpack_frames(). 1371 vframeArray* array = vframeArray::allocate(thread, frame_size, chunk, reg_map, sender, caller, fr, realloc_failures); 1372 1373 // Compare the vframeArray to the collected vframes 1374 assert(array->structural_compare(thread, chunk), "just checking"); 1375 1376 #ifndef PRODUCT 1377 if (PrintDeoptimizationDetails) { 1378 ttyLocker ttyl; 1379 tty->print_cr(" Created vframeArray " INTPTR_FORMAT, p2i(array)); 1380 } 1381 #endif // PRODUCT 1382 1383 return array; 1384 } 1385 1386 #if COMPILER2_OR_JVMCI 1387 void Deoptimization::pop_frames_failed_reallocs(JavaThread* thread, vframeArray* array) { 1388 // Reallocation of some scalar replaced objects failed. Record 1389 // that we need to pop all the interpreter frames for the 1390 // deoptimized compiled frame. 1391 assert(thread->frames_to_pop_failed_realloc() == 0, "missed frames to pop?"); 1392 thread->set_frames_to_pop_failed_realloc(array->frames()); 1393 // Unlock all monitors here otherwise the interpreter will see a 1394 // mix of locked and unlocked monitors (because of failed 1395 // reallocations of synchronized objects) and be confused. 1396 for (int i = 0; i < array->frames(); i++) { 1397 MonitorChunk* monitors = array->element(i)->monitors(); 1398 if (monitors != NULL) { 1399 for (int j = 0; j < monitors->number_of_monitors(); j++) { 1400 BasicObjectLock* src = monitors->at(j); 1401 if (src->obj() != NULL) { 1402 ObjectSynchronizer::exit(src->obj(), src->lock(), thread); 1403 } 1404 } 1405 array->element(i)->free_monitors(thread); 1406 #ifdef ASSERT 1407 array->element(i)->set_removed_monitors(); 1408 #endif 1409 } 1410 } 1411 } 1412 #endif 1413 1414 static void collect_monitors(compiledVFrame* cvf, GrowableArray<Handle>* objects_to_revoke) { 1415 GrowableArray<MonitorInfo*>* monitors = cvf->monitors(); 1416 Thread* thread = Thread::current(); 1417 for (int i = 0; i < monitors->length(); i++) { 1418 MonitorInfo* mon_info = monitors->at(i); 1419 if (!mon_info->eliminated() && mon_info->owner() != NULL) { 1420 objects_to_revoke->append(Handle(thread, mon_info->owner())); 1421 } 1422 } 1423 } 1424 1425 static void get_monitors_from_stack(GrowableArray<Handle>* objects_to_revoke, JavaThread* thread, frame fr, RegisterMap* map) { 1426 // Unfortunately we don't have a RegisterMap available in most of 1427 // the places we want to call this routine so we need to walk the 1428 // stack again to update the register map. 1429 if (map == NULL || !map->update_map()) { 1430 StackFrameStream sfs(thread, true); 1431 bool found = false; 1432 while (!found && !sfs.is_done()) { 1433 frame* cur = sfs.current(); 1434 sfs.next(); 1435 found = cur->id() == fr.id(); 1436 } 1437 assert(found, "frame to be deoptimized not found on target thread's stack"); 1438 map = sfs.register_map(); 1439 } 1440 1441 vframe* vf = vframe::new_vframe(&fr, map, thread); 1442 compiledVFrame* cvf = compiledVFrame::cast(vf); 1443 // Revoke monitors' biases in all scopes 1444 while (!cvf->is_top()) { 1445 collect_monitors(cvf, objects_to_revoke); 1446 cvf = compiledVFrame::cast(cvf->sender()); 1447 } 1448 collect_monitors(cvf, objects_to_revoke); 1449 } 1450 1451 void Deoptimization::revoke_using_safepoint(JavaThread* thread, frame fr, RegisterMap* map) { 1452 if (!UseBiasedLocking) { 1453 return; 1454 } 1455 GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>(); 1456 get_monitors_from_stack(objects_to_revoke, thread, fr, map); 1457 1458 if (SafepointSynchronize::is_at_safepoint()) { 1459 BiasedLocking::revoke_at_safepoint(objects_to_revoke); 1460 } else { 1461 BiasedLocking::revoke(objects_to_revoke, thread); 1462 } 1463 } 1464 1465 void Deoptimization::revoke_using_handshake(JavaThread* thread, frame fr, RegisterMap* map) { 1466 if (!UseBiasedLocking) { 1467 return; 1468 } 1469 GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>(); 1470 get_monitors_from_stack(objects_to_revoke, thread, fr, map); 1471 1472 int len = objects_to_revoke->length(); 1473 for (int i = 0; i < len; i++) { 1474 oop obj = (objects_to_revoke->at(i))(); 1475 BiasedLocking::revoke_own_locks_in_handshake(objects_to_revoke->at(i), thread); 1476 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 1477 } 1478 } 1479 1480 1481 void Deoptimization::deoptimize_single_frame(JavaThread* thread, frame fr, Deoptimization::DeoptReason reason) { 1482 assert(fr.can_be_deoptimized(), "checking frame type"); 1483 1484 gather_statistics(reason, Action_none, Bytecodes::_illegal); 1485 1486 if (LogCompilation && xtty != NULL) { 1487 CompiledMethod* cm = fr.cb()->as_compiled_method_or_null(); 1488 assert(cm != NULL, "only compiled methods can deopt"); 1489 1490 ttyLocker ttyl; 1491 xtty->begin_head("deoptimized thread='" UINTX_FORMAT "' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc())); 1492 cm->log_identity(xtty); 1493 xtty->end_head(); 1494 for (ScopeDesc* sd = cm->scope_desc_at(fr.pc()); ; sd = sd->sender()) { 1495 xtty->begin_elem("jvms bci='%d'", sd->bci()); 1496 xtty->method(sd->method()); 1497 xtty->end_elem(); 1498 if (sd->is_top()) break; 1499 } 1500 xtty->tail("deoptimized"); 1501 } 1502 1503 // Patch the compiled method so that when execution returns to it we will 1504 // deopt the execution state and return to the interpreter. 1505 fr.deoptimize(thread); 1506 } 1507 1508 void Deoptimization::deoptimize(JavaThread* thread, frame fr, RegisterMap *map, bool in_handshake) { 1509 deopt_thread(in_handshake, thread, fr, map, Reason_constraint); 1510 } 1511 1512 void Deoptimization::deoptimize(JavaThread* thread, frame fr, RegisterMap *map, DeoptReason reason) { 1513 deopt_thread(false, thread, fr, map, reason); 1514 } 1515 1516 void Deoptimization::deopt_thread(bool in_handshake, JavaThread* thread, 1517 frame fr, RegisterMap *map, DeoptReason reason) { 1518 // Deoptimize only if the frame comes from compile code. 1519 // Do not deoptimize the frame which is already patched 1520 // during the execution of the loops below. 1521 if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) { 1522 return; 1523 } 1524 ResourceMark rm; 1525 DeoptimizationMarker dm; 1526 if (UseBiasedLocking) { 1527 if (in_handshake) { 1528 revoke_using_handshake(thread, fr, map); 1529 } else { 1530 revoke_using_safepoint(thread, fr, map); 1531 } 1532 } 1533 deoptimize_single_frame(thread, fr, reason); 1534 1535 } 1536 1537 #if INCLUDE_JVMCI 1538 address Deoptimization::deoptimize_for_missing_exception_handler(CompiledMethod* cm) { 1539 // there is no exception handler for this pc => deoptimize 1540 cm->make_not_entrant(); 1541 1542 // Use Deoptimization::deoptimize for all of its side-effects: 1543 // revoking biases of monitors, gathering traps statistics, logging... 1544 // it also patches the return pc but we do not care about that 1545 // since we return a continuation to the deopt_blob below. 1546 JavaThread* thread = JavaThread::current(); 1547 RegisterMap reg_map(thread, UseBiasedLocking); 1548 frame runtime_frame = thread->last_frame(); 1549 frame caller_frame = runtime_frame.sender(®_map); 1550 assert(caller_frame.cb()->as_compiled_method_or_null() == cm, "expect top frame compiled method"); 1551 Deoptimization::deoptimize(thread, caller_frame, ®_map, Deoptimization::Reason_not_compiled_exception_handler); 1552 1553 MethodData* trap_mdo = get_method_data(thread, cm->method(), true); 1554 if (trap_mdo != NULL) { 1555 trap_mdo->inc_trap_count(Deoptimization::Reason_not_compiled_exception_handler); 1556 } 1557 1558 return SharedRuntime::deopt_blob()->unpack_with_exception_in_tls(); 1559 } 1560 #endif 1561 1562 void Deoptimization::deoptimize_frame_internal(JavaThread* thread, intptr_t* id, DeoptReason reason) { 1563 assert(thread == Thread::current() || SafepointSynchronize::is_at_safepoint(), 1564 "can only deoptimize other thread at a safepoint"); 1565 // Compute frame and register map based on thread and sp. 1566 RegisterMap reg_map(thread, UseBiasedLocking); 1567 frame fr = thread->last_frame(); 1568 while (fr.id() != id) { 1569 fr = fr.sender(®_map); 1570 } 1571 deoptimize(thread, fr, ®_map, reason); 1572 } 1573 1574 1575 void Deoptimization::deoptimize_frame(JavaThread* thread, intptr_t* id, DeoptReason reason) { 1576 if (thread == Thread::current()) { 1577 Deoptimization::deoptimize_frame_internal(thread, id, reason); 1578 } else { 1579 VM_DeoptimizeFrame deopt(thread, id, reason); 1580 VMThread::execute(&deopt); 1581 } 1582 } 1583 1584 void Deoptimization::deoptimize_frame(JavaThread* thread, intptr_t* id) { 1585 deoptimize_frame(thread, id, Reason_constraint); 1586 } 1587 1588 // JVMTI PopFrame support 1589 JRT_LEAF(void, Deoptimization::popframe_preserve_args(JavaThread* thread, int bytes_to_save, void* start_address)) 1590 { 1591 thread->popframe_preserve_args(in_ByteSize(bytes_to_save), start_address); 1592 } 1593 JRT_END 1594 1595 MethodData* 1596 Deoptimization::get_method_data(JavaThread* thread, const methodHandle& m, 1597 bool create_if_missing) { 1598 Thread* THREAD = thread; 1599 MethodData* mdo = m()->method_data(); 1600 if (mdo == NULL && create_if_missing && !HAS_PENDING_EXCEPTION) { 1601 // Build an MDO. Ignore errors like OutOfMemory; 1602 // that simply means we won't have an MDO to update. 1603 Method::build_interpreter_method_data(m, THREAD); 1604 if (HAS_PENDING_EXCEPTION) { 1605 assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here"); 1606 CLEAR_PENDING_EXCEPTION; 1607 } 1608 mdo = m()->method_data(); 1609 } 1610 return mdo; 1611 } 1612 1613 #if COMPILER2_OR_JVMCI 1614 void Deoptimization::load_class_by_index(const constantPoolHandle& constant_pool, int index, TRAPS) { 1615 // in case of an unresolved klass entry, load the class. 1616 if (constant_pool->tag_at(index).is_unresolved_klass()) { 1617 Klass* tk = constant_pool->klass_at_ignore_error(index, CHECK); 1618 return; 1619 } 1620 1621 if (!constant_pool->tag_at(index).is_symbol()) return; 1622 1623 Handle class_loader (THREAD, constant_pool->pool_holder()->class_loader()); 1624 Symbol* symbol = constant_pool->symbol_at(index); 1625 1626 // class name? 1627 if (symbol->char_at(0) != '(') { 1628 Handle protection_domain (THREAD, constant_pool->pool_holder()->protection_domain()); 1629 SystemDictionary::resolve_or_null(symbol, class_loader, protection_domain, CHECK); 1630 return; 1631 } 1632 1633 // then it must be a signature! 1634 ResourceMark rm(THREAD); 1635 for (SignatureStream ss(symbol); !ss.is_done(); ss.next()) { 1636 if (ss.is_object()) { 1637 Symbol* class_name = ss.as_symbol(); 1638 Handle protection_domain (THREAD, constant_pool->pool_holder()->protection_domain()); 1639 SystemDictionary::resolve_or_null(class_name, class_loader, protection_domain, CHECK); 1640 } 1641 } 1642 } 1643 1644 1645 void Deoptimization::load_class_by_index(const constantPoolHandle& constant_pool, int index) { 1646 EXCEPTION_MARK; 1647 load_class_by_index(constant_pool, index, THREAD); 1648 if (HAS_PENDING_EXCEPTION) { 1649 // Exception happened during classloading. We ignore the exception here, since it 1650 // is going to be rethrown since the current activation is going to be deoptimized and 1651 // the interpreter will re-execute the bytecode. 1652 CLEAR_PENDING_EXCEPTION; 1653 // Class loading called java code which may have caused a stack 1654 // overflow. If the exception was thrown right before the return 1655 // to the runtime the stack is no longer guarded. Reguard the 1656 // stack otherwise if we return to the uncommon trap blob and the 1657 // stack bang causes a stack overflow we crash. 1658 assert(THREAD->is_Java_thread(), "only a java thread can be here"); 1659 JavaThread* thread = (JavaThread*)THREAD; 1660 bool guard_pages_enabled = thread->stack_guards_enabled(); 1661 if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack(); 1662 assert(guard_pages_enabled, "stack banging in uncommon trap blob may cause crash"); 1663 } 1664 } 1665 1666 JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* thread, jint trap_request)) { 1667 HandleMark hm; 1668 1669 // uncommon_trap() is called at the beginning of the uncommon trap 1670 // handler. Note this fact before we start generating temporary frames 1671 // that can confuse an asynchronous stack walker. This counter is 1672 // decremented at the end of unpack_frames(). 1673 thread->inc_in_deopt_handler(); 1674 1675 // We need to update the map if we have biased locking. 1676 #if INCLUDE_JVMCI 1677 // JVMCI might need to get an exception from the stack, which in turn requires the register map to be valid 1678 RegisterMap reg_map(thread, true); 1679 #else 1680 RegisterMap reg_map(thread, UseBiasedLocking); 1681 #endif 1682 frame stub_frame = thread->last_frame(); 1683 frame fr = stub_frame.sender(®_map); 1684 // Make sure the calling nmethod is not getting deoptimized and removed 1685 // before we are done with it. 1686 nmethodLocker nl(fr.pc()); 1687 1688 // Log a message 1689 Events::log_deopt_message(thread, "Uncommon trap: trap_request=" PTR32_FORMAT " fr.pc=" INTPTR_FORMAT " relative=" INTPTR_FORMAT, 1690 trap_request, p2i(fr.pc()), fr.pc() - fr.cb()->code_begin()); 1691 1692 { 1693 ResourceMark rm; 1694 1695 // Revoke biases of any monitors in the frame to ensure we can migrate them 1696 revoke_biases_of_monitors(thread, fr, ®_map); 1697 1698 DeoptReason reason = trap_request_reason(trap_request); 1699 DeoptAction action = trap_request_action(trap_request); 1700 #if INCLUDE_JVMCI 1701 int debug_id = trap_request_debug_id(trap_request); 1702 #endif 1703 jint unloaded_class_index = trap_request_index(trap_request); // CP idx or -1 1704 1705 vframe* vf = vframe::new_vframe(&fr, ®_map, thread); 1706 compiledVFrame* cvf = compiledVFrame::cast(vf); 1707 1708 CompiledMethod* nm = cvf->code(); 1709 1710 ScopeDesc* trap_scope = cvf->scope(); 1711 1712 if (TraceDeoptimization) { 1713 ttyLocker ttyl; 1714 tty->print_cr(" bci=%d pc=" INTPTR_FORMAT ", relative_pc=" INTPTR_FORMAT ", method=%s" JVMCI_ONLY(", debug_id=%d"), trap_scope->bci(), p2i(fr.pc()), fr.pc() - nm->code_begin(), trap_scope->method()->name_and_sig_as_C_string() 1715 #if INCLUDE_JVMCI 1716 , debug_id 1717 #endif 1718 ); 1719 } 1720 1721 methodHandle trap_method = trap_scope->method(); 1722 int trap_bci = trap_scope->bci(); 1723 #if INCLUDE_JVMCI 1724 jlong speculation = thread->pending_failed_speculation(); 1725 if (nm->is_compiled_by_jvmci() && nm->is_nmethod()) { // Exclude AOTed methods 1726 nm->as_nmethod()->update_speculation(thread); 1727 } else { 1728 assert(speculation == 0, "There should not be a speculation for methods compiled by non-JVMCI compilers"); 1729 } 1730 1731 if (trap_bci == SynchronizationEntryBCI) { 1732 trap_bci = 0; 1733 thread->set_pending_monitorenter(true); 1734 } 1735 1736 if (reason == Deoptimization::Reason_transfer_to_interpreter) { 1737 thread->set_pending_transfer_to_interpreter(true); 1738 } 1739 #endif 1740 1741 Bytecodes::Code trap_bc = trap_method->java_code_at(trap_bci); 1742 // Record this event in the histogram. 1743 gather_statistics(reason, action, trap_bc); 1744 1745 // Ensure that we can record deopt. history: 1746 // Need MDO to record RTM code generation state. 1747 bool create_if_missing = ProfileTraps || UseCodeAging RTM_OPT_ONLY( || UseRTMLocking ); 1748 1749 methodHandle profiled_method; 1750 #if INCLUDE_JVMCI 1751 if (nm->is_compiled_by_jvmci()) { 1752 profiled_method = nm->method(); 1753 } else { 1754 profiled_method = trap_method; 1755 } 1756 #else 1757 profiled_method = trap_method; 1758 #endif 1759 1760 MethodData* trap_mdo = 1761 get_method_data(thread, profiled_method, create_if_missing); 1762 1763 // Log a message 1764 Events::log_deopt_message(thread, "Uncommon trap: reason=%s action=%s pc=" INTPTR_FORMAT " method=%s @ %d %s", 1765 trap_reason_name(reason), trap_action_name(action), p2i(fr.pc()), 1766 trap_method->name_and_sig_as_C_string(), trap_bci, nm->compiler_name()); 1767 1768 // Print a bunch of diagnostics, if requested. 1769 if (TraceDeoptimization || LogCompilation) { 1770 ResourceMark rm; 1771 ttyLocker ttyl; 1772 char buf[100]; 1773 if (xtty != NULL) { 1774 xtty->begin_head("uncommon_trap thread='" UINTX_FORMAT "' %s", 1775 os::current_thread_id(), 1776 format_trap_request(buf, sizeof(buf), trap_request)); 1777 #if INCLUDE_JVMCI 1778 if (speculation != 0) { 1779 xtty->print(" speculation='" JLONG_FORMAT "'", speculation); 1780 } 1781 #endif 1782 nm->log_identity(xtty); 1783 } 1784 Symbol* class_name = NULL; 1785 bool unresolved = false; 1786 if (unloaded_class_index >= 0) { 1787 constantPoolHandle constants (THREAD, trap_method->constants()); 1788 if (constants->tag_at(unloaded_class_index).is_unresolved_klass()) { 1789 class_name = constants->klass_name_at(unloaded_class_index); 1790 unresolved = true; 1791 if (xtty != NULL) 1792 xtty->print(" unresolved='1'"); 1793 } else if (constants->tag_at(unloaded_class_index).is_symbol()) { 1794 class_name = constants->symbol_at(unloaded_class_index); 1795 } 1796 if (xtty != NULL) 1797 xtty->name(class_name); 1798 } 1799 if (xtty != NULL && trap_mdo != NULL && (int)reason < (int)MethodData::_trap_hist_limit) { 1800 // Dump the relevant MDO state. 1801 // This is the deopt count for the current reason, any previous 1802 // reasons or recompiles seen at this point. 1803 int dcnt = trap_mdo->trap_count(reason); 1804 if (dcnt != 0) 1805 xtty->print(" count='%d'", dcnt); 1806 ProfileData* pdata = trap_mdo->bci_to_data(trap_bci); 1807 int dos = (pdata == NULL)? 0: pdata->trap_state(); 1808 if (dos != 0) { 1809 xtty->print(" state='%s'", format_trap_state(buf, sizeof(buf), dos)); 1810 if (trap_state_is_recompiled(dos)) { 1811 int recnt2 = trap_mdo->overflow_recompile_count(); 1812 if (recnt2 != 0) 1813 xtty->print(" recompiles2='%d'", recnt2); 1814 } 1815 } 1816 } 1817 if (xtty != NULL) { 1818 xtty->stamp(); 1819 xtty->end_head(); 1820 } 1821 if (TraceDeoptimization) { // make noise on the tty 1822 tty->print("Uncommon trap occurred in"); 1823 nm->method()->print_short_name(tty); 1824 tty->print(" compiler=%s compile_id=%d", nm->compiler_name(), nm->compile_id()); 1825 #if INCLUDE_JVMCI 1826 if (nm->is_nmethod()) { 1827 const char* installed_code_name = nm->as_nmethod()->jvmci_name(); 1828 if (installed_code_name != NULL) { 1829 tty->print(" (JVMCI: installed code name=%s) ", installed_code_name); 1830 } 1831 } 1832 #endif 1833 tty->print(" (@" INTPTR_FORMAT ") thread=" UINTX_FORMAT " reason=%s action=%s unloaded_class_index=%d" JVMCI_ONLY(" debug_id=%d"), 1834 p2i(fr.pc()), 1835 os::current_thread_id(), 1836 trap_reason_name(reason), 1837 trap_action_name(action), 1838 unloaded_class_index 1839 #if INCLUDE_JVMCI 1840 , debug_id 1841 #endif 1842 ); 1843 if (class_name != NULL) { 1844 tty->print(unresolved ? " unresolved class: " : " symbol: "); 1845 class_name->print_symbol_on(tty); 1846 } 1847 tty->cr(); 1848 } 1849 if (xtty != NULL) { 1850 // Log the precise location of the trap. 1851 for (ScopeDesc* sd = trap_scope; ; sd = sd->sender()) { 1852 xtty->begin_elem("jvms bci='%d'", sd->bci()); 1853 xtty->method(sd->method()); 1854 xtty->end_elem(); 1855 if (sd->is_top()) break; 1856 } 1857 xtty->tail("uncommon_trap"); 1858 } 1859 } 1860 // (End diagnostic printout.) 1861 1862 // Load class if necessary 1863 if (unloaded_class_index >= 0) { 1864 constantPoolHandle constants(THREAD, trap_method->constants()); 1865 load_class_by_index(constants, unloaded_class_index); 1866 } 1867 1868 // Flush the nmethod if necessary and desirable. 1869 // 1870 // We need to avoid situations where we are re-flushing the nmethod 1871 // because of a hot deoptimization site. Repeated flushes at the same 1872 // point need to be detected by the compiler and avoided. If the compiler 1873 // cannot avoid them (or has a bug and "refuses" to avoid them), this 1874 // module must take measures to avoid an infinite cycle of recompilation 1875 // and deoptimization. There are several such measures: 1876 // 1877 // 1. If a recompilation is ordered a second time at some site X 1878 // and for the same reason R, the action is adjusted to 'reinterpret', 1879 // to give the interpreter time to exercise the method more thoroughly. 1880 // If this happens, the method's overflow_recompile_count is incremented. 1881 // 1882 // 2. If the compiler fails to reduce the deoptimization rate, then 1883 // the method's overflow_recompile_count will begin to exceed the set 1884 // limit PerBytecodeRecompilationCutoff. If this happens, the action 1885 // is adjusted to 'make_not_compilable', and the method is abandoned 1886 // to the interpreter. This is a performance hit for hot methods, 1887 // but is better than a disastrous infinite cycle of recompilations. 1888 // (Actually, only the method containing the site X is abandoned.) 1889 // 1890 // 3. In parallel with the previous measures, if the total number of 1891 // recompilations of a method exceeds the much larger set limit 1892 // PerMethodRecompilationCutoff, the method is abandoned. 1893 // This should only happen if the method is very large and has 1894 // many "lukewarm" deoptimizations. The code which enforces this 1895 // limit is elsewhere (class nmethod, class Method). 1896 // 1897 // Note that the per-BCI 'is_recompiled' bit gives the compiler one chance 1898 // to recompile at each bytecode independently of the per-BCI cutoff. 1899 // 1900 // The decision to update code is up to the compiler, and is encoded 1901 // in the Action_xxx code. If the compiler requests Action_none 1902 // no trap state is changed, no compiled code is changed, and the 1903 // computation suffers along in the interpreter. 1904 // 1905 // The other action codes specify various tactics for decompilation 1906 // and recompilation. Action_maybe_recompile is the loosest, and 1907 // allows the compiled code to stay around until enough traps are seen, 1908 // and until the compiler gets around to recompiling the trapping method. 1909 // 1910 // The other actions cause immediate removal of the present code. 1911 1912 // Traps caused by injected profile shouldn't pollute trap counts. 1913 bool injected_profile_trap = trap_method->has_injected_profile() && 1914 (reason == Reason_intrinsic || reason == Reason_unreached); 1915 1916 bool update_trap_state = (reason != Reason_tenured) && !injected_profile_trap; 1917 bool make_not_entrant = false; 1918 bool make_not_compilable = false; 1919 bool reprofile = false; 1920 switch (action) { 1921 case Action_none: 1922 // Keep the old code. 1923 update_trap_state = false; 1924 break; 1925 case Action_maybe_recompile: 1926 // Do not need to invalidate the present code, but we can 1927 // initiate another 1928 // Start compiler without (necessarily) invalidating the nmethod. 1929 // The system will tolerate the old code, but new code should be 1930 // generated when possible. 1931 break; 1932 case Action_reinterpret: 1933 // Go back into the interpreter for a while, and then consider 1934 // recompiling form scratch. 1935 make_not_entrant = true; 1936 // Reset invocation counter for outer most method. 1937 // This will allow the interpreter to exercise the bytecodes 1938 // for a while before recompiling. 1939 // By contrast, Action_make_not_entrant is immediate. 1940 // 1941 // Note that the compiler will track null_check, null_assert, 1942 // range_check, and class_check events and log them as if they 1943 // had been traps taken from compiled code. This will update 1944 // the MDO trap history so that the next compilation will 1945 // properly detect hot trap sites. 1946 reprofile = true; 1947 break; 1948 case Action_make_not_entrant: 1949 // Request immediate recompilation, and get rid of the old code. 1950 // Make them not entrant, so next time they are called they get 1951 // recompiled. Unloaded classes are loaded now so recompile before next 1952 // time they are called. Same for uninitialized. The interpreter will 1953 // link the missing class, if any. 1954 make_not_entrant = true; 1955 break; 1956 case Action_make_not_compilable: 1957 // Give up on compiling this method at all. 1958 make_not_entrant = true; 1959 make_not_compilable = true; 1960 break; 1961 default: 1962 ShouldNotReachHere(); 1963 } 1964 1965 // Setting +ProfileTraps fixes the following, on all platforms: 1966 // 4852688: ProfileInterpreter is off by default for ia64. The result is 1967 // infinite heroic-opt-uncommon-trap/deopt/recompile cycles, since the 1968 // recompile relies on a MethodData* to record heroic opt failures. 1969 1970 // Whether the interpreter is producing MDO data or not, we also need 1971 // to use the MDO to detect hot deoptimization points and control 1972 // aggressive optimization. 1973 bool inc_recompile_count = false; 1974 ProfileData* pdata = NULL; 1975 if (ProfileTraps && !is_client_compilation_mode_vm() && update_trap_state && trap_mdo != NULL) { 1976 assert(trap_mdo == get_method_data(thread, profiled_method, false), "sanity"); 1977 uint this_trap_count = 0; 1978 bool maybe_prior_trap = false; 1979 bool maybe_prior_recompile = false; 1980 pdata = query_update_method_data(trap_mdo, trap_bci, reason, true, 1981 #if INCLUDE_JVMCI 1982 nm->is_compiled_by_jvmci() && nm->is_osr_method(), 1983 #endif 1984 nm->method(), 1985 //outputs: 1986 this_trap_count, 1987 maybe_prior_trap, 1988 maybe_prior_recompile); 1989 // Because the interpreter also counts null, div0, range, and class 1990 // checks, these traps from compiled code are double-counted. 1991 // This is harmless; it just means that the PerXTrapLimit values 1992 // are in effect a little smaller than they look. 1993 1994 DeoptReason per_bc_reason = reason_recorded_per_bytecode_if_any(reason); 1995 if (per_bc_reason != Reason_none) { 1996 // Now take action based on the partially known per-BCI history. 1997 if (maybe_prior_trap 1998 && this_trap_count >= (uint)PerBytecodeTrapLimit) { 1999 // If there are too many traps at this BCI, force a recompile. 2000 // This will allow the compiler to see the limit overflow, and 2001 // take corrective action, if possible. The compiler generally 2002 // does not use the exact PerBytecodeTrapLimit value, but instead 2003 // changes its tactics if it sees any traps at all. This provides 2004 // a little hysteresis, delaying a recompile until a trap happens 2005 // several times. 2006 // 2007 // Actually, since there is only one bit of counter per BCI, 2008 // the possible per-BCI counts are {0,1,(per-method count)}. 2009 // This produces accurate results if in fact there is only 2010 // one hot trap site, but begins to get fuzzy if there are 2011 // many sites. For example, if there are ten sites each 2012 // trapping two or more times, they each get the blame for 2013 // all of their traps. 2014 make_not_entrant = true; 2015 } 2016 2017 // Detect repeated recompilation at the same BCI, and enforce a limit. 2018 if (make_not_entrant && maybe_prior_recompile) { 2019 // More than one recompile at this point. 2020 inc_recompile_count = maybe_prior_trap; 2021 } 2022 } else { 2023 // For reasons which are not recorded per-bytecode, we simply 2024 // force recompiles unconditionally. 2025 // (Note that PerMethodRecompilationCutoff is enforced elsewhere.) 2026 make_not_entrant = true; 2027 } 2028 2029 // Go back to the compiler if there are too many traps in this method. 2030 if (this_trap_count >= per_method_trap_limit(reason)) { 2031 // If there are too many traps in this method, force a recompile. 2032 // This will allow the compiler to see the limit overflow, and 2033 // take corrective action, if possible. 2034 // (This condition is an unlikely backstop only, because the 2035 // PerBytecodeTrapLimit is more likely to take effect first, 2036 // if it is applicable.) 2037 make_not_entrant = true; 2038 } 2039 2040 // Here's more hysteresis: If there has been a recompile at 2041 // this trap point already, run the method in the interpreter 2042 // for a while to exercise it more thoroughly. 2043 if (make_not_entrant && maybe_prior_recompile && maybe_prior_trap) { 2044 reprofile = true; 2045 } 2046 } 2047 2048 // Take requested actions on the method: 2049 2050 // Recompile 2051 if (make_not_entrant) { 2052 if (!nm->make_not_entrant()) { 2053 return; // the call did not change nmethod's state 2054 } 2055 2056 if (pdata != NULL) { 2057 // Record the recompilation event, if any. 2058 int tstate0 = pdata->trap_state(); 2059 int tstate1 = trap_state_set_recompiled(tstate0, true); 2060 if (tstate1 != tstate0) 2061 pdata->set_trap_state(tstate1); 2062 } 2063 2064 #if INCLUDE_RTM_OPT 2065 // Restart collecting RTM locking abort statistic if the method 2066 // is recompiled for a reason other than RTM state change. 2067 // Assume that in new recompiled code the statistic could be different, 2068 // for example, due to different inlining. 2069 if ((reason != Reason_rtm_state_change) && (trap_mdo != NULL) && 2070 UseRTMDeopt && (nm->as_nmethod()->rtm_state() != ProfileRTM)) { 2071 trap_mdo->atomic_set_rtm_state(ProfileRTM); 2072 } 2073 #endif 2074 // For code aging we count traps separately here, using make_not_entrant() 2075 // as a guard against simultaneous deopts in multiple threads. 2076 if (reason == Reason_tenured && trap_mdo != NULL) { 2077 trap_mdo->inc_tenure_traps(); 2078 } 2079 } 2080 2081 if (inc_recompile_count) { 2082 trap_mdo->inc_overflow_recompile_count(); 2083 if ((uint)trap_mdo->overflow_recompile_count() > 2084 (uint)PerBytecodeRecompilationCutoff) { 2085 // Give up on the method containing the bad BCI. 2086 if (trap_method() == nm->method()) { 2087 make_not_compilable = true; 2088 } else { 2089 trap_method->set_not_compilable("overflow_recompile_count > PerBytecodeRecompilationCutoff", CompLevel_full_optimization); 2090 // But give grace to the enclosing nm->method(). 2091 } 2092 } 2093 } 2094 2095 // Reprofile 2096 if (reprofile) { 2097 CompilationPolicy::policy()->reprofile(trap_scope, nm->is_osr_method()); 2098 } 2099 2100 // Give up compiling 2101 if (make_not_compilable && !nm->method()->is_not_compilable(CompLevel_full_optimization)) { 2102 assert(make_not_entrant, "consistent"); 2103 nm->method()->set_not_compilable("give up compiling", CompLevel_full_optimization); 2104 } 2105 2106 } // Free marked resources 2107 2108 } 2109 JRT_END 2110 2111 ProfileData* 2112 Deoptimization::query_update_method_data(MethodData* trap_mdo, 2113 int trap_bci, 2114 Deoptimization::DeoptReason reason, 2115 bool update_total_trap_count, 2116 #if INCLUDE_JVMCI 2117 bool is_osr, 2118 #endif 2119 Method* compiled_method, 2120 //outputs: 2121 uint& ret_this_trap_count, 2122 bool& ret_maybe_prior_trap, 2123 bool& ret_maybe_prior_recompile) { 2124 bool maybe_prior_trap = false; 2125 bool maybe_prior_recompile = false; 2126 uint this_trap_count = 0; 2127 if (update_total_trap_count) { 2128 uint idx = reason; 2129 #if INCLUDE_JVMCI 2130 if (is_osr) { 2131 idx += Reason_LIMIT; 2132 } 2133 #endif 2134 uint prior_trap_count = trap_mdo->trap_count(idx); 2135 this_trap_count = trap_mdo->inc_trap_count(idx); 2136 2137 // If the runtime cannot find a place to store trap history, 2138 // it is estimated based on the general condition of the method. 2139 // If the method has ever been recompiled, or has ever incurred 2140 // a trap with the present reason , then this BCI is assumed 2141 // (pessimistically) to be the culprit. 2142 maybe_prior_trap = (prior_trap_count != 0); 2143 maybe_prior_recompile = (trap_mdo->decompile_count() != 0); 2144 } 2145 ProfileData* pdata = NULL; 2146 2147 2148 // For reasons which are recorded per bytecode, we check per-BCI data. 2149 DeoptReason per_bc_reason = reason_recorded_per_bytecode_if_any(reason); 2150 assert(per_bc_reason != Reason_none || update_total_trap_count, "must be"); 2151 if (per_bc_reason != Reason_none) { 2152 // Find the profile data for this BCI. If there isn't one, 2153 // try to allocate one from the MDO's set of spares. 2154 // This will let us detect a repeated trap at this point. 2155 pdata = trap_mdo->allocate_bci_to_data(trap_bci, reason_is_speculate(reason) ? compiled_method : NULL); 2156 2157 if (pdata != NULL) { 2158 if (reason_is_speculate(reason) && !pdata->is_SpeculativeTrapData()) { 2159 if (LogCompilation && xtty != NULL) { 2160 ttyLocker ttyl; 2161 // no more room for speculative traps in this MDO 2162 xtty->elem("speculative_traps_oom"); 2163 } 2164 } 2165 // Query the trap state of this profile datum. 2166 int tstate0 = pdata->trap_state(); 2167 if (!trap_state_has_reason(tstate0, per_bc_reason)) 2168 maybe_prior_trap = false; 2169 if (!trap_state_is_recompiled(tstate0)) 2170 maybe_prior_recompile = false; 2171 2172 // Update the trap state of this profile datum. 2173 int tstate1 = tstate0; 2174 // Record the reason. 2175 tstate1 = trap_state_add_reason(tstate1, per_bc_reason); 2176 // Store the updated state on the MDO, for next time. 2177 if (tstate1 != tstate0) 2178 pdata->set_trap_state(tstate1); 2179 } else { 2180 if (LogCompilation && xtty != NULL) { 2181 ttyLocker ttyl; 2182 // Missing MDP? Leave a small complaint in the log. 2183 xtty->elem("missing_mdp bci='%d'", trap_bci); 2184 } 2185 } 2186 } 2187 2188 // Return results: 2189 ret_this_trap_count = this_trap_count; 2190 ret_maybe_prior_trap = maybe_prior_trap; 2191 ret_maybe_prior_recompile = maybe_prior_recompile; 2192 return pdata; 2193 } 2194 2195 void 2196 Deoptimization::update_method_data_from_interpreter(MethodData* trap_mdo, int trap_bci, int reason) { 2197 ResourceMark rm; 2198 // Ignored outputs: 2199 uint ignore_this_trap_count; 2200 bool ignore_maybe_prior_trap; 2201 bool ignore_maybe_prior_recompile; 2202 assert(!reason_is_speculate(reason), "reason speculate only used by compiler"); 2203 // JVMCI uses the total counts to determine if deoptimizations are happening too frequently -> do not adjust total counts 2204 bool update_total_counts = true JVMCI_ONLY( && !UseJVMCICompiler); 2205 query_update_method_data(trap_mdo, trap_bci, 2206 (DeoptReason)reason, 2207 update_total_counts, 2208 #if INCLUDE_JVMCI 2209 false, 2210 #endif 2211 NULL, 2212 ignore_this_trap_count, 2213 ignore_maybe_prior_trap, 2214 ignore_maybe_prior_recompile); 2215 } 2216 2217 Deoptimization::UnrollBlock* Deoptimization::uncommon_trap(JavaThread* thread, jint trap_request, jint exec_mode) { 2218 if (TraceDeoptimization) { 2219 tty->print("Uncommon trap "); 2220 } 2221 // Still in Java no safepoints 2222 { 2223 // This enters VM and may safepoint 2224 uncommon_trap_inner(thread, trap_request); 2225 } 2226 return fetch_unroll_info_helper(thread, exec_mode); 2227 } 2228 2229 // Local derived constants. 2230 // Further breakdown of DataLayout::trap_state, as promised by DataLayout. 2231 const int DS_REASON_MASK = ((uint)DataLayout::trap_mask) >> 1; 2232 const int DS_RECOMPILE_BIT = DataLayout::trap_mask - DS_REASON_MASK; 2233 2234 //---------------------------trap_state_reason--------------------------------- 2235 Deoptimization::DeoptReason 2236 Deoptimization::trap_state_reason(int trap_state) { 2237 // This assert provides the link between the width of DataLayout::trap_bits 2238 // and the encoding of "recorded" reasons. It ensures there are enough 2239 // bits to store all needed reasons in the per-BCI MDO profile. 2240 assert(DS_REASON_MASK >= Reason_RECORDED_LIMIT, "enough bits"); 2241 int recompile_bit = (trap_state & DS_RECOMPILE_BIT); 2242 trap_state -= recompile_bit; 2243 if (trap_state == DS_REASON_MASK) { 2244 return Reason_many; 2245 } else { 2246 assert((int)Reason_none == 0, "state=0 => Reason_none"); 2247 return (DeoptReason)trap_state; 2248 } 2249 } 2250 //-------------------------trap_state_has_reason------------------------------- 2251 int Deoptimization::trap_state_has_reason(int trap_state, int reason) { 2252 assert(reason_is_recorded_per_bytecode((DeoptReason)reason), "valid reason"); 2253 assert(DS_REASON_MASK >= Reason_RECORDED_LIMIT, "enough bits"); 2254 int recompile_bit = (trap_state & DS_RECOMPILE_BIT); 2255 trap_state -= recompile_bit; 2256 if (trap_state == DS_REASON_MASK) { 2257 return -1; // true, unspecifically (bottom of state lattice) 2258 } else if (trap_state == reason) { 2259 return 1; // true, definitely 2260 } else if (trap_state == 0) { 2261 return 0; // false, definitely (top of state lattice) 2262 } else { 2263 return 0; // false, definitely 2264 } 2265 } 2266 //-------------------------trap_state_add_reason------------------------------- 2267 int Deoptimization::trap_state_add_reason(int trap_state, int reason) { 2268 assert(reason_is_recorded_per_bytecode((DeoptReason)reason) || reason == Reason_many, "valid reason"); 2269 int recompile_bit = (trap_state & DS_RECOMPILE_BIT); 2270 trap_state -= recompile_bit; 2271 if (trap_state == DS_REASON_MASK) { 2272 return trap_state + recompile_bit; // already at state lattice bottom 2273 } else if (trap_state == reason) { 2274 return trap_state + recompile_bit; // the condition is already true 2275 } else if (trap_state == 0) { 2276 return reason + recompile_bit; // no condition has yet been true 2277 } else { 2278 return DS_REASON_MASK + recompile_bit; // fall to state lattice bottom 2279 } 2280 } 2281 //-----------------------trap_state_is_recompiled------------------------------ 2282 bool Deoptimization::trap_state_is_recompiled(int trap_state) { 2283 return (trap_state & DS_RECOMPILE_BIT) != 0; 2284 } 2285 //-----------------------trap_state_set_recompiled----------------------------- 2286 int Deoptimization::trap_state_set_recompiled(int trap_state, bool z) { 2287 if (z) return trap_state | DS_RECOMPILE_BIT; 2288 else return trap_state & ~DS_RECOMPILE_BIT; 2289 } 2290 //---------------------------format_trap_state--------------------------------- 2291 // This is used for debugging and diagnostics, including LogFile output. 2292 const char* Deoptimization::format_trap_state(char* buf, size_t buflen, 2293 int trap_state) { 2294 assert(buflen > 0, "sanity"); 2295 DeoptReason reason = trap_state_reason(trap_state); 2296 bool recomp_flag = trap_state_is_recompiled(trap_state); 2297 // Re-encode the state from its decoded components. 2298 int decoded_state = 0; 2299 if (reason_is_recorded_per_bytecode(reason) || reason == Reason_many) 2300 decoded_state = trap_state_add_reason(decoded_state, reason); 2301 if (recomp_flag) 2302 decoded_state = trap_state_set_recompiled(decoded_state, recomp_flag); 2303 // If the state re-encodes properly, format it symbolically. 2304 // Because this routine is used for debugging and diagnostics, 2305 // be robust even if the state is a strange value. 2306 size_t len; 2307 if (decoded_state != trap_state) { 2308 // Random buggy state that doesn't decode?? 2309 len = jio_snprintf(buf, buflen, "#%d", trap_state); 2310 } else { 2311 len = jio_snprintf(buf, buflen, "%s%s", 2312 trap_reason_name(reason), 2313 recomp_flag ? " recompiled" : ""); 2314 } 2315 return buf; 2316 } 2317 2318 2319 //--------------------------------statics-------------------------------------- 2320 const char* Deoptimization::_trap_reason_name[] = { 2321 // Note: Keep this in sync. with enum DeoptReason. 2322 "none", 2323 "null_check", 2324 "null_assert" JVMCI_ONLY("_or_unreached0"), 2325 "range_check", 2326 "class_check", 2327 "array_check", 2328 "intrinsic" JVMCI_ONLY("_or_type_checked_inlining"), 2329 "bimorphic" JVMCI_ONLY("_or_optimized_type_check"), 2330 "profile_predicate", 2331 "unloaded", 2332 "uninitialized", 2333 "initialized", 2334 "unreached", 2335 "unhandled", 2336 "constraint", 2337 "div0_check", 2338 "age", 2339 "predicate", 2340 "loop_limit_check", 2341 "speculate_class_check", 2342 "speculate_null_check", 2343 "speculate_null_assert", 2344 "rtm_state_change", 2345 "unstable_if", 2346 "unstable_fused_if", 2347 #if INCLUDE_JVMCI 2348 "aliasing", 2349 "transfer_to_interpreter", 2350 "not_compiled_exception_handler", 2351 "unresolved", 2352 "jsr_mismatch", 2353 #endif 2354 "tenured" 2355 }; 2356 const char* Deoptimization::_trap_action_name[] = { 2357 // Note: Keep this in sync. with enum DeoptAction. 2358 "none", 2359 "maybe_recompile", 2360 "reinterpret", 2361 "make_not_entrant", 2362 "make_not_compilable" 2363 }; 2364 2365 const char* Deoptimization::trap_reason_name(int reason) { 2366 // Check that every reason has a name 2367 STATIC_ASSERT(sizeof(_trap_reason_name)/sizeof(const char*) == Reason_LIMIT); 2368 2369 if (reason == Reason_many) return "many"; 2370 if ((uint)reason < Reason_LIMIT) 2371 return _trap_reason_name[reason]; 2372 static char buf[20]; 2373 sprintf(buf, "reason%d", reason); 2374 return buf; 2375 } 2376 const char* Deoptimization::trap_action_name(int action) { 2377 // Check that every action has a name 2378 STATIC_ASSERT(sizeof(_trap_action_name)/sizeof(const char*) == Action_LIMIT); 2379 2380 if ((uint)action < Action_LIMIT) 2381 return _trap_action_name[action]; 2382 static char buf[20]; 2383 sprintf(buf, "action%d", action); 2384 return buf; 2385 } 2386 2387 // This is used for debugging and diagnostics, including LogFile output. 2388 const char* Deoptimization::format_trap_request(char* buf, size_t buflen, 2389 int trap_request) { 2390 jint unloaded_class_index = trap_request_index(trap_request); 2391 const char* reason = trap_reason_name(trap_request_reason(trap_request)); 2392 const char* action = trap_action_name(trap_request_action(trap_request)); 2393 #if INCLUDE_JVMCI 2394 int debug_id = trap_request_debug_id(trap_request); 2395 #endif 2396 size_t len; 2397 if (unloaded_class_index < 0) { 2398 len = jio_snprintf(buf, buflen, "reason='%s' action='%s'" JVMCI_ONLY(" debug_id='%d'"), 2399 reason, action 2400 #if INCLUDE_JVMCI 2401 ,debug_id 2402 #endif 2403 ); 2404 } else { 2405 len = jio_snprintf(buf, buflen, "reason='%s' action='%s' index='%d'" JVMCI_ONLY(" debug_id='%d'"), 2406 reason, action, unloaded_class_index 2407 #if INCLUDE_JVMCI 2408 ,debug_id 2409 #endif 2410 ); 2411 } 2412 return buf; 2413 } 2414 2415 juint Deoptimization::_deoptimization_hist 2416 [Deoptimization::Reason_LIMIT] 2417 [1 + Deoptimization::Action_LIMIT] 2418 [Deoptimization::BC_CASE_LIMIT] 2419 = {0}; 2420 2421 enum { 2422 LSB_BITS = 8, 2423 LSB_MASK = right_n_bits(LSB_BITS) 2424 }; 2425 2426 void Deoptimization::gather_statistics(DeoptReason reason, DeoptAction action, 2427 Bytecodes::Code bc) { 2428 assert(reason >= 0 && reason < Reason_LIMIT, "oob"); 2429 assert(action >= 0 && action < Action_LIMIT, "oob"); 2430 _deoptimization_hist[Reason_none][0][0] += 1; // total 2431 _deoptimization_hist[reason][0][0] += 1; // per-reason total 2432 juint* cases = _deoptimization_hist[reason][1+action]; 2433 juint* bc_counter_addr = NULL; 2434 juint bc_counter = 0; 2435 // Look for an unused counter, or an exact match to this BC. 2436 if (bc != Bytecodes::_illegal) { 2437 for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) { 2438 juint* counter_addr = &cases[bc_case]; 2439 juint counter = *counter_addr; 2440 if ((counter == 0 && bc_counter_addr == NULL) 2441 || (Bytecodes::Code)(counter & LSB_MASK) == bc) { 2442 // this counter is either free or is already devoted to this BC 2443 bc_counter_addr = counter_addr; 2444 bc_counter = counter | bc; 2445 } 2446 } 2447 } 2448 if (bc_counter_addr == NULL) { 2449 // Overflow, or no given bytecode. 2450 bc_counter_addr = &cases[BC_CASE_LIMIT-1]; 2451 bc_counter = (*bc_counter_addr & ~LSB_MASK); // clear LSB 2452 } 2453 *bc_counter_addr = bc_counter + (1 << LSB_BITS); 2454 } 2455 2456 jint Deoptimization::total_deoptimization_count() { 2457 return _deoptimization_hist[Reason_none][0][0]; 2458 } 2459 2460 void Deoptimization::print_statistics() { 2461 juint total = total_deoptimization_count(); 2462 juint account = total; 2463 if (total != 0) { 2464 ttyLocker ttyl; 2465 if (xtty != NULL) xtty->head("statistics type='deoptimization'"); 2466 tty->print_cr("Deoptimization traps recorded:"); 2467 #define PRINT_STAT_LINE(name, r) \ 2468 tty->print_cr(" %4d (%4.1f%%) %s", (int)(r), ((r) * 100.0) / total, name); 2469 PRINT_STAT_LINE("total", total); 2470 // For each non-zero entry in the histogram, print the reason, 2471 // the action, and (if specifically known) the type of bytecode. 2472 for (int reason = 0; reason < Reason_LIMIT; reason++) { 2473 for (int action = 0; action < Action_LIMIT; action++) { 2474 juint* cases = _deoptimization_hist[reason][1+action]; 2475 for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) { 2476 juint counter = cases[bc_case]; 2477 if (counter != 0) { 2478 char name[1*K]; 2479 Bytecodes::Code bc = (Bytecodes::Code)(counter & LSB_MASK); 2480 if (bc_case == BC_CASE_LIMIT && (int)bc == 0) 2481 bc = Bytecodes::_illegal; 2482 sprintf(name, "%s/%s/%s", 2483 trap_reason_name(reason), 2484 trap_action_name(action), 2485 Bytecodes::is_defined(bc)? Bytecodes::name(bc): "other"); 2486 juint r = counter >> LSB_BITS; 2487 tty->print_cr(" %40s: " UINT32_FORMAT " (%.1f%%)", name, r, (r * 100.0) / total); 2488 account -= r; 2489 } 2490 } 2491 } 2492 } 2493 if (account != 0) { 2494 PRINT_STAT_LINE("unaccounted", account); 2495 } 2496 #undef PRINT_STAT_LINE 2497 if (xtty != NULL) xtty->tail("statistics"); 2498 } 2499 } 2500 #else // COMPILER2_OR_JVMCI 2501 2502 2503 // Stubs for C1 only system. 2504 bool Deoptimization::trap_state_is_recompiled(int trap_state) { 2505 return false; 2506 } 2507 2508 const char* Deoptimization::trap_reason_name(int reason) { 2509 return "unknown"; 2510 } 2511 2512 void Deoptimization::print_statistics() { 2513 // no output 2514 } 2515 2516 void 2517 Deoptimization::update_method_data_from_interpreter(MethodData* trap_mdo, int trap_bci, int reason) { 2518 // no udpate 2519 } 2520 2521 int Deoptimization::trap_state_has_reason(int trap_state, int reason) { 2522 return 0; 2523 } 2524 2525 void Deoptimization::gather_statistics(DeoptReason reason, DeoptAction action, 2526 Bytecodes::Code bc) { 2527 // no update 2528 } 2529 2530 const char* Deoptimization::format_trap_state(char* buf, size_t buflen, 2531 int trap_state) { 2532 jio_snprintf(buf, buflen, "#%d", trap_state); 2533 return buf; 2534 } 2535 2536 #endif // COMPILER2_OR_JVMCI