1 /* 2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "compiler/abstractCompiler.hpp" 27 #include "compiler/disassembler.hpp" 28 #include "gc_interface/collectedHeap.inline.hpp" 29 #include "interpreter/interpreter.hpp" 30 #include "interpreter/oopMapCache.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "memory/universe.inline.hpp" 33 #include "oops/markOop.hpp" 34 #include "oops/methodData.hpp" 35 #include "oops/method.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "oops/oop.inline2.hpp" 38 #include "prims/methodHandles.hpp" 39 #include "runtime/frame.inline.hpp" 40 #include "runtime/handles.inline.hpp" 41 #include "runtime/javaCalls.hpp" 42 #include "runtime/monitorChunk.hpp" 43 #include "runtime/sharedRuntime.hpp" 44 #include "runtime/signature.hpp" 45 #include "runtime/stubCodeGenerator.hpp" 46 #include "runtime/stubRoutines.hpp" 47 #include "runtime/thread.inline.hpp" 48 #include "utilities/decoder.hpp" 49 50 #ifdef TARGET_ARCH_x86 51 # include "nativeInst_x86.hpp" 52 #endif 53 #ifdef TARGET_ARCH_sparc 54 # include "nativeInst_sparc.hpp" 55 #endif 56 #ifdef TARGET_ARCH_zero 57 # include "nativeInst_zero.hpp" 58 #endif 59 #ifdef TARGET_ARCH_arm 60 # include "nativeInst_arm.hpp" 61 #endif 62 #ifdef TARGET_ARCH_ppc 63 # include "nativeInst_ppc.hpp" 64 #endif 65 66 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC 67 68 RegisterMap::RegisterMap(JavaThread *thread, bool update_map) { 69 _thread = thread; 70 _update_map = update_map; 71 clear(); 72 debug_only(_update_for_id = NULL;) 73 #ifndef PRODUCT 74 for (int i = 0; i < reg_count ; i++ ) _location[i] = NULL; 75 #endif /* PRODUCT */ 76 } 77 78 RegisterMap::RegisterMap(const RegisterMap* map) { 79 assert(map != this, "bad initialization parameter"); 80 assert(map != NULL, "RegisterMap must be present"); 81 _thread = map->thread(); 82 _update_map = map->update_map(); 83 _include_argument_oops = map->include_argument_oops(); 84 debug_only(_update_for_id = map->_update_for_id;) 85 pd_initialize_from(map); 86 if (update_map()) { 87 for(int i = 0; i < location_valid_size; i++) { 88 LocationValidType bits = !update_map() ? 0 : map->_location_valid[i]; 89 _location_valid[i] = bits; 90 // for whichever bits are set, pull in the corresponding map->_location 91 int j = i*location_valid_type_size; 92 while (bits != 0) { 93 if ((bits & 1) != 0) { 94 assert(0 <= j && j < reg_count, "range check"); 95 _location[j] = map->_location[j]; 96 } 97 bits >>= 1; 98 j += 1; 99 } 100 } 101 } 102 } 103 104 void RegisterMap::clear() { 105 set_include_argument_oops(true); 106 if (_update_map) { 107 for(int i = 0; i < location_valid_size; i++) { 108 _location_valid[i] = 0; 109 } 110 pd_clear(); 111 } else { 112 pd_initialize(); 113 } 114 } 115 116 #ifndef PRODUCT 117 118 void RegisterMap::print_on(outputStream* st) const { 119 st->print_cr("Register map"); 120 for(int i = 0; i < reg_count; i++) { 121 122 VMReg r = VMRegImpl::as_VMReg(i); 123 intptr_t* src = (intptr_t*) location(r); 124 if (src != NULL) { 125 126 r->print_on(st); 127 st->print(" [" INTPTR_FORMAT "] = ", src); 128 if (((uintptr_t)src & (sizeof(*src)-1)) != 0) { 129 st->print_cr("<misaligned>"); 130 } else { 131 st->print_cr(INTPTR_FORMAT, *src); 132 } 133 } 134 } 135 } 136 137 void RegisterMap::print() const { 138 print_on(tty); 139 } 140 141 #endif 142 // This returns the pc that if you were in the debugger you'd see. Not 143 // the idealized value in the frame object. This undoes the magic conversion 144 // that happens for deoptimized frames. In addition it makes the value the 145 // hardware would want to see in the native frame. The only user (at this point) 146 // is deoptimization. It likely no one else should ever use it. 147 148 address frame::raw_pc() const { 149 if (is_deoptimized_frame()) { 150 nmethod* nm = cb()->as_nmethod_or_null(); 151 if (nm->is_method_handle_return(pc())) 152 return nm->deopt_mh_handler_begin() - pc_return_offset; 153 else 154 return nm->deopt_handler_begin() - pc_return_offset; 155 } else { 156 return (pc() - pc_return_offset); 157 } 158 } 159 160 // Change the pc in a frame object. This does not change the actual pc in 161 // actual frame. To do that use patch_pc. 162 // 163 void frame::set_pc(address newpc ) { 164 #ifdef ASSERT 165 if (_cb != NULL && _cb->is_nmethod()) { 166 assert(!((nmethod*)_cb)->is_deopt_pc(_pc), "invariant violation"); 167 } 168 #endif // ASSERT 169 170 // Unsafe to use the is_deoptimzed tester after changing pc 171 _deopt_state = unknown; 172 _pc = newpc; 173 _cb = CodeCache::find_blob_unsafe(_pc); 174 175 } 176 177 // type testers 178 bool frame::is_ignored_frame() const { 179 return false; // FIXME: some LambdaForm frames should be ignored 180 } 181 bool frame::is_deoptimized_frame() const { 182 assert(_deopt_state != unknown, "not answerable"); 183 return _deopt_state == is_deoptimized; 184 } 185 186 bool frame::is_native_frame() const { 187 return (_cb != NULL && 188 _cb->is_nmethod() && 189 ((nmethod*)_cb)->is_native_method()); 190 } 191 192 bool frame::is_java_frame() const { 193 if (is_interpreted_frame()) return true; 194 if (is_compiled_frame()) return true; 195 return false; 196 } 197 198 199 bool frame::is_compiled_frame() const { 200 if (_cb != NULL && 201 _cb->is_nmethod() && 202 ((nmethod*)_cb)->is_java_method()) { 203 return true; 204 } 205 return false; 206 } 207 208 209 bool frame::is_runtime_frame() const { 210 return (_cb != NULL && _cb->is_runtime_stub()); 211 } 212 213 bool frame::is_safepoint_blob_frame() const { 214 return (_cb != NULL && _cb->is_safepoint_stub()); 215 } 216 217 // testers 218 219 bool frame::is_first_java_frame() const { 220 RegisterMap map(JavaThread::current(), false); // No update 221 frame s; 222 for (s = sender(&map); !(s.is_java_frame() || s.is_first_frame()); s = s.sender(&map)); 223 return s.is_first_frame(); 224 } 225 226 227 bool frame::entry_frame_is_first() const { 228 return entry_frame_call_wrapper()->is_first_frame(); 229 } 230 231 JavaCallWrapper* frame::entry_frame_call_wrapper_if_safe(JavaThread* thread) const { 232 JavaCallWrapper** jcw = entry_frame_call_wrapper_addr(); 233 address addr = (address) jcw; 234 235 // addr must be within the usable part of the stack 236 if (thread->is_in_usable_stack(addr)) { 237 return *jcw; 238 } 239 240 return NULL; 241 } 242 243 bool frame::should_be_deoptimized() const { 244 if (_deopt_state == is_deoptimized || 245 !is_compiled_frame() ) return false; 246 assert(_cb != NULL && _cb->is_nmethod(), "must be an nmethod"); 247 nmethod* nm = (nmethod *)_cb; 248 if (TraceDependencies) { 249 tty->print("checking (%s) ", nm->is_marked_for_deoptimization() ? "true" : "false"); 250 nm->print_value_on(tty); 251 tty->cr(); 252 } 253 254 if( !nm->is_marked_for_deoptimization() ) 255 return false; 256 257 // If at the return point, then the frame has already been popped, and 258 // only the return needs to be executed. Don't deoptimize here. 259 return !nm->is_at_poll_return(pc()); 260 } 261 262 bool frame::can_be_deoptimized() const { 263 if (!is_compiled_frame()) return false; 264 nmethod* nm = (nmethod*)_cb; 265 266 if( !nm->can_be_deoptimized() ) 267 return false; 268 269 return !nm->is_at_poll_return(pc()); 270 } 271 272 void frame::deoptimize(JavaThread* thread) { 273 // Schedule deoptimization of an nmethod activation with this frame. 274 assert(_cb != NULL && _cb->is_nmethod(), "must be"); 275 nmethod* nm = (nmethod*)_cb; 276 277 // This is a fix for register window patching race 278 if (NeedsDeoptSuspend && Thread::current() != thread) { 279 assert(SafepointSynchronize::is_at_safepoint(), 280 "patching other threads for deopt may only occur at a safepoint"); 281 282 // It is possible especially with DeoptimizeALot/DeoptimizeRandom that 283 // we could see the frame again and ask for it to be deoptimized since 284 // it might move for a long time. That is harmless and we just ignore it. 285 if (id() == thread->must_deopt_id()) { 286 assert(thread->is_deopt_suspend(), "lost suspension"); 287 return; 288 } 289 290 // We are at a safepoint so the target thread can only be 291 // in 4 states: 292 // blocked - no problem 293 // blocked_trans - no problem (i.e. could have woken up from blocked 294 // during a safepoint). 295 // native - register window pc patching race 296 // native_trans - momentary state 297 // 298 // We could just wait out a thread in native_trans to block. 299 // Then we'd have all the issues that the safepoint code has as to 300 // whether to spin or block. It isn't worth it. Just treat it like 301 // native and be done with it. 302 // 303 // Examine the state of the thread at the start of safepoint since 304 // threads that were in native at the start of the safepoint could 305 // come to a halt during the safepoint, changing the current value 306 // of the safepoint_state. 307 JavaThreadState state = thread->safepoint_state()->orig_thread_state(); 308 if (state == _thread_in_native || state == _thread_in_native_trans) { 309 // Since we are at a safepoint the target thread will stop itself 310 // before it can return to java as long as we remain at the safepoint. 311 // Therefore we can put an additional request for the thread to stop 312 // no matter what no (like a suspend). This will cause the thread 313 // to notice it needs to do the deopt on its own once it leaves native. 314 // 315 // The only reason we must do this is because on machine with register 316 // windows we have a race with patching the return address and the 317 // window coming live as the thread returns to the Java code (but still 318 // in native mode) and then blocks. It is only this top most frame 319 // that is at risk. So in truth we could add an additional check to 320 // see if this frame is one that is at risk. 321 RegisterMap map(thread, false); 322 frame at_risk = thread->last_frame().sender(&map); 323 if (id() == at_risk.id()) { 324 thread->set_must_deopt_id(id()); 325 thread->set_deopt_suspend(); 326 return; 327 } 328 } 329 } // NeedsDeoptSuspend 330 331 332 // If the call site is a MethodHandle call site use the MH deopt 333 // handler. 334 address deopt = nm->is_method_handle_return(pc()) ? 335 nm->deopt_mh_handler_begin() : 336 nm->deopt_handler_begin(); 337 338 // Save the original pc before we patch in the new one 339 nm->set_original_pc(this, pc()); 340 patch_pc(thread, deopt); 341 342 #ifdef ASSERT 343 { 344 RegisterMap map(thread, false); 345 frame check = thread->last_frame(); 346 while (id() != check.id()) { 347 check = check.sender(&map); 348 } 349 assert(check.is_deoptimized_frame(), "missed deopt"); 350 } 351 #endif // ASSERT 352 } 353 354 frame frame::java_sender() const { 355 RegisterMap map(JavaThread::current(), false); 356 frame s; 357 for (s = sender(&map); !(s.is_java_frame() || s.is_first_frame()); s = s.sender(&map)) ; 358 guarantee(s.is_java_frame(), "tried to get caller of first java frame"); 359 return s; 360 } 361 362 frame frame::real_sender(RegisterMap* map) const { 363 frame result = sender(map); 364 while (result.is_runtime_frame() || 365 result.is_ignored_frame()) { 366 result = result.sender(map); 367 } 368 return result; 369 } 370 371 // Note: called by profiler - NOT for current thread 372 frame frame::profile_find_Java_sender_frame(JavaThread *thread) { 373 // If we don't recognize this frame, walk back up the stack until we do 374 RegisterMap map(thread, false); 375 frame first_java_frame = frame(); 376 377 // Find the first Java frame on the stack starting with input frame 378 if (is_java_frame()) { 379 // top frame is compiled frame or deoptimized frame 380 first_java_frame = *this; 381 } else if (safe_for_sender(thread)) { 382 for (frame sender_frame = sender(&map); 383 sender_frame.safe_for_sender(thread) && !sender_frame.is_first_frame(); 384 sender_frame = sender_frame.sender(&map)) { 385 if (sender_frame.is_java_frame()) { 386 first_java_frame = sender_frame; 387 break; 388 } 389 } 390 } 391 return first_java_frame; 392 } 393 394 // Interpreter frames 395 396 397 void frame::interpreter_frame_set_locals(intptr_t* locs) { 398 assert(is_interpreted_frame(), "Not an interpreted frame"); 399 *interpreter_frame_locals_addr() = locs; 400 } 401 402 Method* frame::interpreter_frame_method() const { 403 assert(is_interpreted_frame(), "interpreted frame expected"); 404 Method* m = *interpreter_frame_method_addr(); 405 assert(m->is_method(), "not a Method*"); 406 return m; 407 } 408 409 void frame::interpreter_frame_set_method(Method* method) { 410 assert(is_interpreted_frame(), "interpreted frame expected"); 411 *interpreter_frame_method_addr() = method; 412 } 413 414 void frame::interpreter_frame_set_bcx(intptr_t bcx) { 415 assert(is_interpreted_frame(), "Not an interpreted frame"); 416 if (ProfileInterpreter) { 417 bool formerly_bci = is_bci(interpreter_frame_bcx()); 418 bool is_now_bci = is_bci(bcx); 419 *interpreter_frame_bcx_addr() = bcx; 420 421 intptr_t mdx = interpreter_frame_mdx(); 422 423 if (mdx != 0) { 424 if (formerly_bci) { 425 if (!is_now_bci) { 426 // The bcx was just converted from bci to bcp. 427 // Convert the mdx in parallel. 428 MethodData* mdo = interpreter_frame_method()->method_data(); 429 assert(mdo != NULL, ""); 430 int mdi = mdx - 1; // We distinguish valid mdi from zero by adding one. 431 address mdp = mdo->di_to_dp(mdi); 432 interpreter_frame_set_mdx((intptr_t)mdp); 433 } 434 } else { 435 if (is_now_bci) { 436 // The bcx was just converted from bcp to bci. 437 // Convert the mdx in parallel. 438 MethodData* mdo = interpreter_frame_method()->method_data(); 439 assert(mdo != NULL, ""); 440 int mdi = mdo->dp_to_di((address)mdx); 441 interpreter_frame_set_mdx((intptr_t)mdi + 1); // distinguish valid from 0. 442 } 443 } 444 } 445 } else { 446 *interpreter_frame_bcx_addr() = bcx; 447 } 448 } 449 450 jint frame::interpreter_frame_bci() const { 451 assert(is_interpreted_frame(), "interpreted frame expected"); 452 intptr_t bcx = interpreter_frame_bcx(); 453 return is_bci(bcx) ? bcx : interpreter_frame_method()->bci_from((address)bcx); 454 } 455 456 void frame::interpreter_frame_set_bci(jint bci) { 457 assert(is_interpreted_frame(), "interpreted frame expected"); 458 assert(!is_bci(interpreter_frame_bcx()), "should not set bci during GC"); 459 interpreter_frame_set_bcx((intptr_t)interpreter_frame_method()->bcp_from(bci)); 460 } 461 462 address frame::interpreter_frame_bcp() const { 463 assert(is_interpreted_frame(), "interpreted frame expected"); 464 intptr_t bcx = interpreter_frame_bcx(); 465 return is_bci(bcx) ? interpreter_frame_method()->bcp_from(bcx) : (address)bcx; 466 } 467 468 void frame::interpreter_frame_set_bcp(address bcp) { 469 assert(is_interpreted_frame(), "interpreted frame expected"); 470 assert(!is_bci(interpreter_frame_bcx()), "should not set bcp during GC"); 471 interpreter_frame_set_bcx((intptr_t)bcp); 472 } 473 474 void frame::interpreter_frame_set_mdx(intptr_t mdx) { 475 assert(is_interpreted_frame(), "Not an interpreted frame"); 476 assert(ProfileInterpreter, "must be profiling interpreter"); 477 *interpreter_frame_mdx_addr() = mdx; 478 } 479 480 address frame::interpreter_frame_mdp() const { 481 assert(ProfileInterpreter, "must be profiling interpreter"); 482 assert(is_interpreted_frame(), "interpreted frame expected"); 483 intptr_t bcx = interpreter_frame_bcx(); 484 intptr_t mdx = interpreter_frame_mdx(); 485 486 assert(!is_bci(bcx), "should not access mdp during GC"); 487 return (address)mdx; 488 } 489 490 void frame::interpreter_frame_set_mdp(address mdp) { 491 assert(is_interpreted_frame(), "interpreted frame expected"); 492 if (mdp == NULL) { 493 // Always allow the mdp to be cleared. 494 interpreter_frame_set_mdx((intptr_t)mdp); 495 } 496 intptr_t bcx = interpreter_frame_bcx(); 497 assert(!is_bci(bcx), "should not set mdp during GC"); 498 interpreter_frame_set_mdx((intptr_t)mdp); 499 } 500 501 BasicObjectLock* frame::next_monitor_in_interpreter_frame(BasicObjectLock* current) const { 502 assert(is_interpreted_frame(), "Not an interpreted frame"); 503 #ifdef ASSERT 504 interpreter_frame_verify_monitor(current); 505 #endif 506 BasicObjectLock* next = (BasicObjectLock*) (((intptr_t*) current) + interpreter_frame_monitor_size()); 507 return next; 508 } 509 510 BasicObjectLock* frame::previous_monitor_in_interpreter_frame(BasicObjectLock* current) const { 511 assert(is_interpreted_frame(), "Not an interpreted frame"); 512 #ifdef ASSERT 513 // // This verification needs to be checked before being enabled 514 // interpreter_frame_verify_monitor(current); 515 #endif 516 BasicObjectLock* previous = (BasicObjectLock*) (((intptr_t*) current) - interpreter_frame_monitor_size()); 517 return previous; 518 } 519 520 // Interpreter locals and expression stack locations. 521 522 intptr_t* frame::interpreter_frame_local_at(int index) const { 523 const int n = Interpreter::local_offset_in_bytes(index)/wordSize; 524 return &((*interpreter_frame_locals_addr())[n]); 525 } 526 527 intptr_t* frame::interpreter_frame_expression_stack_at(jint offset) const { 528 const int i = offset * interpreter_frame_expression_stack_direction(); 529 const int n = i * Interpreter::stackElementWords; 530 return &(interpreter_frame_expression_stack()[n]); 531 } 532 533 jint frame::interpreter_frame_expression_stack_size() const { 534 // Number of elements on the interpreter expression stack 535 // Callers should span by stackElementWords 536 int element_size = Interpreter::stackElementWords; 537 size_t stack_size = 0; 538 if (frame::interpreter_frame_expression_stack_direction() < 0) { 539 stack_size = (interpreter_frame_expression_stack() - 540 interpreter_frame_tos_address() + 1)/element_size; 541 } else { 542 stack_size = (interpreter_frame_tos_address() - 543 interpreter_frame_expression_stack() + 1)/element_size; 544 } 545 assert( stack_size <= (size_t)max_jint, "stack size too big"); 546 return ((jint)stack_size); 547 } 548 549 550 // (frame::interpreter_frame_sender_sp accessor is in frame_<arch>.cpp) 551 552 const char* frame::print_name() const { 553 if (is_native_frame()) return "Native"; 554 if (is_interpreted_frame()) return "Interpreted"; 555 if (is_compiled_frame()) { 556 if (is_deoptimized_frame()) return "Deoptimized"; 557 return "Compiled"; 558 } 559 if (sp() == NULL) return "Empty"; 560 return "C"; 561 } 562 563 void frame::print_value_on(outputStream* st, JavaThread *thread) const { 564 NOT_PRODUCT(address begin = pc()-40;) 565 NOT_PRODUCT(address end = NULL;) 566 567 st->print("%s frame (sp=" INTPTR_FORMAT " unextended sp=" INTPTR_FORMAT, print_name(), sp(), unextended_sp()); 568 if (sp() != NULL) 569 st->print(", fp=" INTPTR_FORMAT ", real_fp=" INTPTR_FORMAT ", pc=" INTPTR_FORMAT, fp(), real_fp(), pc()); 570 571 if (StubRoutines::contains(pc())) { 572 st->print_cr(")"); 573 st->print("("); 574 StubCodeDesc* desc = StubCodeDesc::desc_for(pc()); 575 st->print("~Stub::%s", desc->name()); 576 NOT_PRODUCT(begin = desc->begin(); end = desc->end();) 577 } else if (Interpreter::contains(pc())) { 578 st->print_cr(")"); 579 st->print("("); 580 InterpreterCodelet* desc = Interpreter::codelet_containing(pc()); 581 if (desc != NULL) { 582 st->print("~"); 583 desc->print_on(st); 584 NOT_PRODUCT(begin = desc->code_begin(); end = desc->code_end();) 585 } else { 586 st->print("~interpreter"); 587 } 588 } 589 st->print_cr(")"); 590 591 if (_cb != NULL) { 592 st->print(" "); 593 _cb->print_value_on(st); 594 st->cr(); 595 #ifndef PRODUCT 596 if (end == NULL) { 597 begin = _cb->code_begin(); 598 end = _cb->code_end(); 599 } 600 #endif 601 } 602 NOT_PRODUCT(if (WizardMode && Verbose) Disassembler::decode(begin, end);) 603 } 604 605 606 void frame::print_on(outputStream* st) const { 607 print_value_on(st,NULL); 608 if (is_interpreted_frame()) { 609 interpreter_frame_print_on(st); 610 } 611 } 612 613 614 void frame::interpreter_frame_print_on(outputStream* st) const { 615 #ifndef PRODUCT 616 assert(is_interpreted_frame(), "Not an interpreted frame"); 617 jint i; 618 for (i = 0; i < interpreter_frame_method()->max_locals(); i++ ) { 619 intptr_t x = *interpreter_frame_local_at(i); 620 st->print(" - local [" INTPTR_FORMAT "]", x); 621 st->fill_to(23); 622 st->print_cr("; #%d", i); 623 } 624 for (i = interpreter_frame_expression_stack_size() - 1; i >= 0; --i ) { 625 intptr_t x = *interpreter_frame_expression_stack_at(i); 626 st->print(" - stack [" INTPTR_FORMAT "]", x); 627 st->fill_to(23); 628 st->print_cr("; #%d", i); 629 } 630 // locks for synchronization 631 for (BasicObjectLock* current = interpreter_frame_monitor_end(); 632 current < interpreter_frame_monitor_begin(); 633 current = next_monitor_in_interpreter_frame(current)) { 634 st->print(" - obj ["); 635 current->obj()->print_value_on(st); 636 st->print_cr("]"); 637 st->print(" - lock ["); 638 current->lock()->print_on(st); 639 st->print_cr("]"); 640 } 641 // monitor 642 st->print_cr(" - monitor[" INTPTR_FORMAT "]", interpreter_frame_monitor_begin()); 643 // bcp 644 st->print(" - bcp [" INTPTR_FORMAT "]", interpreter_frame_bcp()); 645 st->fill_to(23); 646 st->print_cr("; @%d", interpreter_frame_bci()); 647 // locals 648 st->print_cr(" - locals [" INTPTR_FORMAT "]", interpreter_frame_local_at(0)); 649 // method 650 st->print(" - method [" INTPTR_FORMAT "]", (address)interpreter_frame_method()); 651 st->fill_to(23); 652 st->print("; "); 653 interpreter_frame_method()->print_name(st); 654 st->cr(); 655 #endif 656 } 657 658 // Print whether the frame is in the VM or OS indicating a HotSpot problem. 659 // Otherwise, it's likely a bug in the native library that the Java code calls, 660 // hopefully indicating where to submit bugs. 661 void frame::print_C_frame(outputStream* st, char* buf, int buflen, address pc) { 662 // C/C++ frame 663 bool in_vm = os::address_is_in_vm(pc); 664 st->print(in_vm ? "V" : "C"); 665 666 int offset; 667 bool found; 668 669 // libname 670 found = os::dll_address_to_library_name(pc, buf, buflen, &offset); 671 if (found) { 672 // skip directory names 673 const char *p1, *p2; 674 p1 = buf; 675 int len = (int)strlen(os::file_separator()); 676 while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len; 677 st->print(" [%s+0x%x]", p1, offset); 678 } else { 679 st->print(" " PTR_FORMAT, pc); 680 } 681 682 // function name - os::dll_address_to_function_name() may return confusing 683 // names if pc is within jvm.dll or libjvm.so, because JVM only has 684 // JVM_xxxx and a few other symbols in the dynamic symbol table. Do this 685 // only for native libraries. 686 if (!in_vm || Decoder::can_decode_C_frame_in_vm()) { 687 found = os::dll_address_to_function_name(pc, buf, buflen, &offset); 688 689 if (found) { 690 st->print(" %s+0x%x", buf, offset); 691 } 692 } 693 } 694 695 // frame::print_on_error() is called by fatal error handler. Notice that we may 696 // crash inside this function if stack frame is corrupted. The fatal error 697 // handler can catch and handle the crash. Here we assume the frame is valid. 698 // 699 // First letter indicates type of the frame: 700 // J: Java frame (compiled) 701 // j: Java frame (interpreted) 702 // V: VM frame (C/C++) 703 // v: Other frames running VM generated code (e.g. stubs, adapters, etc.) 704 // C: C/C++ frame 705 // 706 // We don't need detailed frame type as that in frame::print_name(). "C" 707 // suggests the problem is in user lib; everything else is likely a VM bug. 708 709 void frame::print_on_error(outputStream* st, char* buf, int buflen, bool verbose) const { 710 if (_cb != NULL) { 711 if (Interpreter::contains(pc())) { 712 Method* m = this->interpreter_frame_method(); 713 if (m != NULL) { 714 m->name_and_sig_as_C_string(buf, buflen); 715 st->print("j %s", buf); 716 st->print("+%d", this->interpreter_frame_bci()); 717 } else { 718 st->print("j " PTR_FORMAT, pc()); 719 } 720 } else if (StubRoutines::contains(pc())) { 721 StubCodeDesc* desc = StubCodeDesc::desc_for(pc()); 722 if (desc != NULL) { 723 st->print("v ~StubRoutines::%s", desc->name()); 724 } else { 725 st->print("v ~StubRoutines::" PTR_FORMAT, pc()); 726 } 727 } else if (_cb->is_buffer_blob()) { 728 st->print("v ~BufferBlob::%s", ((BufferBlob *)_cb)->name()); 729 } else if (_cb->is_nmethod()) { 730 nmethod* nm = (nmethod*)_cb; 731 Method* m = nm->method(); 732 if (m != NULL) { 733 m->name_and_sig_as_C_string(buf, buflen); 734 st->print("J %d%s %s %s (%d bytes) @ " PTR_FORMAT " [" PTR_FORMAT "+0x%x]", 735 nm->compile_id(), (nm->is_osr_method() ? "%" : ""), 736 ((nm->compiler() != NULL) ? nm->compiler()->name() : ""), 737 buf, m->code_size(), _pc, _cb->code_begin(), _pc - _cb->code_begin()); 738 } else { 739 st->print("J " PTR_FORMAT, pc()); 740 } 741 } else if (_cb->is_runtime_stub()) { 742 st->print("v ~RuntimeStub::%s", ((RuntimeStub *)_cb)->name()); 743 } else if (_cb->is_deoptimization_stub()) { 744 st->print("v ~DeoptimizationBlob"); 745 } else if (_cb->is_exception_stub()) { 746 st->print("v ~ExceptionBlob"); 747 } else if (_cb->is_safepoint_stub()) { 748 st->print("v ~SafepointBlob"); 749 } else { 750 st->print("v blob " PTR_FORMAT, pc()); 751 } 752 } else { 753 print_C_frame(st, buf, buflen, pc()); 754 } 755 } 756 757 758 /* 759 The interpreter_frame_expression_stack_at method in the case of SPARC needs the 760 max_stack value of the method in order to compute the expression stack address. 761 It uses the Method* in order to get the max_stack value but during GC this 762 Method* value saved on the frame is changed by reverse_and_push and hence cannot 763 be used. So we save the max_stack value in the FrameClosure object and pass it 764 down to the interpreter_frame_expression_stack_at method 765 */ 766 class InterpreterFrameClosure : public OffsetClosure { 767 private: 768 frame* _fr; 769 OopClosure* _f; 770 int _max_locals; 771 int _max_stack; 772 773 public: 774 InterpreterFrameClosure(frame* fr, int max_locals, int max_stack, 775 OopClosure* f) { 776 _fr = fr; 777 _max_locals = max_locals; 778 _max_stack = max_stack; 779 _f = f; 780 } 781 782 void offset_do(int offset) { 783 oop* addr; 784 if (offset < _max_locals) { 785 addr = (oop*) _fr->interpreter_frame_local_at(offset); 786 assert((intptr_t*)addr >= _fr->sp(), "must be inside the frame"); 787 _f->do_oop(addr); 788 } else { 789 addr = (oop*) _fr->interpreter_frame_expression_stack_at((offset - _max_locals)); 790 // In case of exceptions, the expression stack is invalid and the esp will be reset to express 791 // this condition. Therefore, we call f only if addr is 'inside' the stack (i.e., addr >= esp for Intel). 792 bool in_stack; 793 if (frame::interpreter_frame_expression_stack_direction() > 0) { 794 in_stack = (intptr_t*)addr <= _fr->interpreter_frame_tos_address(); 795 } else { 796 in_stack = (intptr_t*)addr >= _fr->interpreter_frame_tos_address(); 797 } 798 if (in_stack) { 799 _f->do_oop(addr); 800 } 801 } 802 } 803 804 int max_locals() { return _max_locals; } 805 frame* fr() { return _fr; } 806 }; 807 808 809 class InterpretedArgumentOopFinder: public SignatureInfo { 810 private: 811 OopClosure* _f; // Closure to invoke 812 int _offset; // TOS-relative offset, decremented with each argument 813 bool _has_receiver; // true if the callee has a receiver 814 frame* _fr; 815 816 void set(int size, BasicType type) { 817 _offset -= size; 818 if (type == T_OBJECT || type == T_ARRAY) oop_offset_do(); 819 } 820 821 void oop_offset_do() { 822 oop* addr; 823 addr = (oop*)_fr->interpreter_frame_tos_at(_offset); 824 _f->do_oop(addr); 825 } 826 827 public: 828 InterpretedArgumentOopFinder(Symbol* signature, bool has_receiver, frame* fr, OopClosure* f) : SignatureInfo(signature), _has_receiver(has_receiver) { 829 // compute size of arguments 830 int args_size = ArgumentSizeComputer(signature).size() + (has_receiver ? 1 : 0); 831 assert(!fr->is_interpreted_frame() || 832 args_size <= fr->interpreter_frame_expression_stack_size(), 833 "args cannot be on stack anymore"); 834 // initialize InterpretedArgumentOopFinder 835 _f = f; 836 _fr = fr; 837 _offset = args_size; 838 } 839 840 void oops_do() { 841 if (_has_receiver) { 842 --_offset; 843 oop_offset_do(); 844 } 845 iterate_parameters(); 846 } 847 }; 848 849 850 // Entry frame has following form (n arguments) 851 // +-----------+ 852 // sp -> | last arg | 853 // +-----------+ 854 // : ::: : 855 // +-----------+ 856 // (sp+n)->| first arg| 857 // +-----------+ 858 859 860 861 // visits and GC's all the arguments in entry frame 862 class EntryFrameOopFinder: public SignatureInfo { 863 private: 864 bool _is_static; 865 int _offset; 866 frame* _fr; 867 OopClosure* _f; 868 869 void set(int size, BasicType type) { 870 assert (_offset >= 0, "illegal offset"); 871 if (type == T_OBJECT || type == T_ARRAY) oop_at_offset_do(_offset); 872 _offset -= size; 873 } 874 875 void oop_at_offset_do(int offset) { 876 assert (offset >= 0, "illegal offset"); 877 oop* addr = (oop*) _fr->entry_frame_argument_at(offset); 878 _f->do_oop(addr); 879 } 880 881 public: 882 EntryFrameOopFinder(frame* frame, Symbol* signature, bool is_static) : SignatureInfo(signature) { 883 _f = NULL; // will be set later 884 _fr = frame; 885 _is_static = is_static; 886 _offset = ArgumentSizeComputer(signature).size() - 1; // last parameter is at index 0 887 } 888 889 void arguments_do(OopClosure* f) { 890 _f = f; 891 if (!_is_static) oop_at_offset_do(_offset+1); // do the receiver 892 iterate_parameters(); 893 } 894 895 }; 896 897 oop* frame::interpreter_callee_receiver_addr(Symbol* signature) { 898 ArgumentSizeComputer asc(signature); 899 int size = asc.size(); 900 return (oop *)interpreter_frame_tos_at(size); 901 } 902 903 904 void frame::oops_interpreted_do(OopClosure* f, CLDClosure* cld_f, 905 const RegisterMap* map, bool query_oop_map_cache) { 906 assert(is_interpreted_frame(), "Not an interpreted frame"); 907 assert(map != NULL, "map must be set"); 908 Thread *thread = Thread::current(); 909 methodHandle m (thread, interpreter_frame_method()); 910 jint bci = interpreter_frame_bci(); 911 912 assert(!Universe::heap()->is_in(m()), 913 "must be valid oop"); 914 assert(m->is_method(), "checking frame value"); 915 assert((m->is_native() && bci == 0) || 916 (!m->is_native() && bci >= 0 && bci < m->code_size()), 917 "invalid bci value"); 918 919 // Handle the monitor elements in the activation 920 for ( 921 BasicObjectLock* current = interpreter_frame_monitor_end(); 922 current < interpreter_frame_monitor_begin(); 923 current = next_monitor_in_interpreter_frame(current) 924 ) { 925 #ifdef ASSERT 926 interpreter_frame_verify_monitor(current); 927 #endif 928 current->oops_do(f); 929 } 930 931 // process fixed part 932 if (cld_f != NULL) { 933 // The method pointer in the frame might be the only path to the method's 934 // klass, and the klass needs to be kept alive while executing. The GCs 935 // don't trace through method pointers, so typically in similar situations 936 // the mirror or the class loader of the klass are installed as a GC root. 937 // To minimize the overhead of doing that here, we ask the GC to pass down a 938 // closure that knows how to keep klasses alive given a ClassLoaderData. 939 cld_f->do_cld(m->method_holder()->class_loader_data()); 940 } 941 942 if (m->is_native() PPC32_ONLY(&& m->is_static())) { 943 f->do_oop(interpreter_frame_temp_oop_addr()); 944 } 945 946 int max_locals = m->is_native() ? m->size_of_parameters() : m->max_locals(); 947 948 Symbol* signature = NULL; 949 bool has_receiver = false; 950 951 // Process a callee's arguments if we are at a call site 952 // (i.e., if we are at an invoke bytecode) 953 // This is used sometimes for calling into the VM, not for another 954 // interpreted or compiled frame. 955 if (!m->is_native()) { 956 Bytecode_invoke call = Bytecode_invoke_check(m, bci); 957 if (call.is_valid()) { 958 signature = call.signature(); 959 has_receiver = call.has_receiver(); 960 if (map->include_argument_oops() && 961 interpreter_frame_expression_stack_size() > 0) { 962 ResourceMark rm(thread); // is this right ??? 963 // we are at a call site & the expression stack is not empty 964 // => process callee's arguments 965 // 966 // Note: The expression stack can be empty if an exception 967 // occurred during method resolution/execution. In all 968 // cases we empty the expression stack completely be- 969 // fore handling the exception (the exception handling 970 // code in the interpreter calls a blocking runtime 971 // routine which can cause this code to be executed). 972 // (was bug gri 7/27/98) 973 oops_interpreted_arguments_do(signature, has_receiver, f); 974 } 975 } 976 } 977 978 InterpreterFrameClosure blk(this, max_locals, m->max_stack(), f); 979 980 // process locals & expression stack 981 InterpreterOopMap mask; 982 if (query_oop_map_cache) { 983 m->mask_for(bci, &mask); 984 } else { 985 OopMapCache::compute_one_oop_map(m, bci, &mask); 986 } 987 mask.iterate_oop(&blk); 988 } 989 990 991 void frame::oops_interpreted_arguments_do(Symbol* signature, bool has_receiver, OopClosure* f) { 992 InterpretedArgumentOopFinder finder(signature, has_receiver, this, f); 993 finder.oops_do(); 994 } 995 996 void frame::oops_code_blob_do(OopClosure* f, CodeBlobClosure* cf, const RegisterMap* reg_map) { 997 assert(_cb != NULL, "sanity check"); 998 if (_cb->oop_maps() != NULL) { 999 OopMapSet::oops_do(this, reg_map, f); 1000 1001 // Preserve potential arguments for a callee. We handle this by dispatching 1002 // on the codeblob. For c2i, we do 1003 if (reg_map->include_argument_oops()) { 1004 _cb->preserve_callee_argument_oops(*this, reg_map, f); 1005 } 1006 } 1007 // In cases where perm gen is collected, GC will want to mark 1008 // oops referenced from nmethods active on thread stacks so as to 1009 // prevent them from being collected. However, this visit should be 1010 // restricted to certain phases of the collection only. The 1011 // closure decides how it wants nmethods to be traced. 1012 if (cf != NULL) 1013 cf->do_code_blob(_cb); 1014 } 1015 1016 class CompiledArgumentOopFinder: public SignatureInfo { 1017 protected: 1018 OopClosure* _f; 1019 int _offset; // the current offset, incremented with each argument 1020 bool _has_receiver; // true if the callee has a receiver 1021 bool _has_appendix; // true if the call has an appendix 1022 frame _fr; 1023 RegisterMap* _reg_map; 1024 int _arg_size; 1025 VMRegPair* _regs; // VMReg list of arguments 1026 1027 void set(int size, BasicType type) { 1028 if (type == T_OBJECT || type == T_ARRAY) handle_oop_offset(); 1029 _offset += size; 1030 } 1031 1032 virtual void handle_oop_offset() { 1033 // Extract low order register number from register array. 1034 // In LP64-land, the high-order bits are valid but unhelpful. 1035 VMReg reg = _regs[_offset].first(); 1036 oop *loc = _fr.oopmapreg_to_location(reg, _reg_map); 1037 _f->do_oop(loc); 1038 } 1039 1040 public: 1041 CompiledArgumentOopFinder(Symbol* signature, bool has_receiver, bool has_appendix, OopClosure* f, frame fr, const RegisterMap* reg_map) 1042 : SignatureInfo(signature) { 1043 1044 // initialize CompiledArgumentOopFinder 1045 _f = f; 1046 _offset = 0; 1047 _has_receiver = has_receiver; 1048 _has_appendix = has_appendix; 1049 _fr = fr; 1050 _reg_map = (RegisterMap*)reg_map; 1051 _arg_size = ArgumentSizeComputer(signature).size() + (has_receiver ? 1 : 0) + (has_appendix ? 1 : 0); 1052 1053 int arg_size; 1054 _regs = SharedRuntime::find_callee_arguments(signature, has_receiver, has_appendix, &arg_size); 1055 assert(arg_size == _arg_size, "wrong arg size"); 1056 } 1057 1058 void oops_do() { 1059 if (_has_receiver) { 1060 handle_oop_offset(); 1061 _offset++; 1062 } 1063 iterate_parameters(); 1064 if (_has_appendix) { 1065 handle_oop_offset(); 1066 _offset++; 1067 } 1068 } 1069 }; 1070 1071 void frame::oops_compiled_arguments_do(Symbol* signature, bool has_receiver, bool has_appendix, const RegisterMap* reg_map, OopClosure* f) { 1072 ResourceMark rm; 1073 CompiledArgumentOopFinder finder(signature, has_receiver, has_appendix, f, *this, reg_map); 1074 finder.oops_do(); 1075 } 1076 1077 1078 // Get receiver out of callers frame, i.e. find parameter 0 in callers 1079 // frame. Consult ADLC for where parameter 0 is to be found. Then 1080 // check local reg_map for it being a callee-save register or argument 1081 // register, both of which are saved in the local frame. If not found 1082 // there, it must be an in-stack argument of the caller. 1083 // Note: caller.sp() points to callee-arguments 1084 oop frame::retrieve_receiver(RegisterMap* reg_map) { 1085 frame caller = *this; 1086 1087 // First consult the ADLC on where it puts parameter 0 for this signature. 1088 VMReg reg = SharedRuntime::name_for_receiver(); 1089 oop* oop_adr = caller.oopmapreg_to_location(reg, reg_map); 1090 if (oop_adr == NULL) { 1091 guarantee(oop_adr != NULL, "bad register save location"); 1092 return NULL; 1093 } 1094 oop r = *oop_adr; 1095 assert(Universe::heap()->is_in_or_null(r), err_msg("bad receiver: " INTPTR_FORMAT " (" INTX_FORMAT ")", (void *) r, (void *) r)); 1096 return r; 1097 } 1098 1099 1100 oop* frame::oopmapreg_to_location(VMReg reg, const RegisterMap* reg_map) const { 1101 if(reg->is_reg()) { 1102 // If it is passed in a register, it got spilled in the stub frame. 1103 return (oop *)reg_map->location(reg); 1104 } else { 1105 int sp_offset_in_bytes = reg->reg2stack() * VMRegImpl::stack_slot_size; 1106 return (oop*)(((address)unextended_sp()) + sp_offset_in_bytes); 1107 } 1108 } 1109 1110 BasicLock* frame::get_native_monitor() { 1111 nmethod* nm = (nmethod*)_cb; 1112 assert(_cb != NULL && _cb->is_nmethod() && nm->method()->is_native(), 1113 "Should not call this unless it's a native nmethod"); 1114 int byte_offset = in_bytes(nm->native_basic_lock_sp_offset()); 1115 assert(byte_offset >= 0, "should not see invalid offset"); 1116 return (BasicLock*) &sp()[byte_offset / wordSize]; 1117 } 1118 1119 oop frame::get_native_receiver() { 1120 nmethod* nm = (nmethod*)_cb; 1121 assert(_cb != NULL && _cb->is_nmethod() && nm->method()->is_native(), 1122 "Should not call this unless it's a native nmethod"); 1123 int byte_offset = in_bytes(nm->native_receiver_sp_offset()); 1124 assert(byte_offset >= 0, "should not see invalid offset"); 1125 oop owner = ((oop*) sp())[byte_offset / wordSize]; 1126 assert( Universe::heap()->is_in(owner), "bad receiver" ); 1127 return owner; 1128 } 1129 1130 void frame::oops_entry_do(OopClosure* f, const RegisterMap* map) { 1131 assert(map != NULL, "map must be set"); 1132 if (map->include_argument_oops()) { 1133 // must collect argument oops, as nobody else is doing it 1134 Thread *thread = Thread::current(); 1135 methodHandle m (thread, entry_frame_call_wrapper()->callee_method()); 1136 EntryFrameOopFinder finder(this, m->signature(), m->is_static()); 1137 finder.arguments_do(f); 1138 } 1139 // Traverse the Handle Block saved in the entry frame 1140 entry_frame_call_wrapper()->oops_do(f); 1141 } 1142 1143 1144 void frame::oops_do_internal(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf, RegisterMap* map, bool use_interpreter_oop_map_cache) { 1145 #ifndef PRODUCT 1146 // simulate GC crash here to dump java thread in error report 1147 if (CrashGCForDumpingJavaThread) { 1148 char *t = NULL; 1149 *t = 'c'; 1150 } 1151 #endif 1152 if (is_interpreted_frame()) { 1153 oops_interpreted_do(f, cld_f, map, use_interpreter_oop_map_cache); 1154 } else if (is_entry_frame()) { 1155 oops_entry_do(f, map); 1156 } else if (CodeCache::contains(pc())) { 1157 oops_code_blob_do(f, cf, map); 1158 #ifdef SHARK 1159 } else if (is_fake_stub_frame()) { 1160 // nothing to do 1161 #endif // SHARK 1162 } else { 1163 ShouldNotReachHere(); 1164 } 1165 } 1166 1167 void frame::nmethods_do(CodeBlobClosure* cf) { 1168 if (_cb != NULL && _cb->is_nmethod()) { 1169 cf->do_code_blob(_cb); 1170 } 1171 } 1172 1173 1174 // call f() on the interpreted Method*s in the stack. 1175 // Have to walk the entire code cache for the compiled frames Yuck. 1176 void frame::metadata_do(void f(Metadata*)) { 1177 if (_cb != NULL && Interpreter::contains(pc())) { 1178 Method* m = this->interpreter_frame_method(); 1179 assert(m != NULL, "huh?"); 1180 f(m); 1181 } 1182 } 1183 1184 void frame::gc_prologue() { 1185 if (is_interpreted_frame()) { 1186 // set bcx to bci to become Method* position independent during GC 1187 interpreter_frame_set_bcx(interpreter_frame_bci()); 1188 } 1189 } 1190 1191 1192 void frame::gc_epilogue() { 1193 if (is_interpreted_frame()) { 1194 // set bcx back to bcp for interpreter 1195 interpreter_frame_set_bcx((intptr_t)interpreter_frame_bcp()); 1196 } 1197 // call processor specific epilog function 1198 pd_gc_epilog(); 1199 } 1200 1201 1202 # ifdef ENABLE_ZAP_DEAD_LOCALS 1203 1204 void frame::CheckValueClosure::do_oop(oop* p) { 1205 if (CheckOopishValues && Universe::heap()->is_in_reserved(*p)) { 1206 warning("value @ " INTPTR_FORMAT " looks oopish (" INTPTR_FORMAT ") (thread = " INTPTR_FORMAT ")", p, (address)*p, Thread::current()); 1207 } 1208 } 1209 frame::CheckValueClosure frame::_check_value; 1210 1211 1212 void frame::CheckOopClosure::do_oop(oop* p) { 1213 if (*p != NULL && !(*p)->is_oop()) { 1214 warning("value @ " INTPTR_FORMAT " should be an oop (" INTPTR_FORMAT ") (thread = " INTPTR_FORMAT ")", p, (address)*p, Thread::current()); 1215 } 1216 } 1217 frame::CheckOopClosure frame::_check_oop; 1218 1219 void frame::check_derived_oop(oop* base, oop* derived) { 1220 _check_oop.do_oop(base); 1221 } 1222 1223 1224 void frame::ZapDeadClosure::do_oop(oop* p) { 1225 if (TraceZapDeadLocals) tty->print_cr("zapping @ " INTPTR_FORMAT " containing " INTPTR_FORMAT, p, (address)*p); 1226 *p = cast_to_oop<intptr_t>(0xbabebabe); 1227 } 1228 frame::ZapDeadClosure frame::_zap_dead; 1229 1230 void frame::zap_dead_locals(JavaThread* thread, const RegisterMap* map) { 1231 assert(thread == Thread::current(), "need to synchronize to do this to another thread"); 1232 // Tracing - part 1 1233 if (TraceZapDeadLocals) { 1234 ResourceMark rm(thread); 1235 tty->print_cr("--------------------------------------------------------------------------------"); 1236 tty->print("Zapping dead locals in "); 1237 print_on(tty); 1238 tty->cr(); 1239 } 1240 // Zapping 1241 if (is_entry_frame ()) zap_dead_entry_locals (thread, map); 1242 else if (is_interpreted_frame()) zap_dead_interpreted_locals(thread, map); 1243 else if (is_compiled_frame()) zap_dead_compiled_locals (thread, map); 1244 1245 else 1246 // could be is_runtime_frame 1247 // so remove error: ShouldNotReachHere(); 1248 ; 1249 // Tracing - part 2 1250 if (TraceZapDeadLocals) { 1251 tty->cr(); 1252 } 1253 } 1254 1255 1256 void frame::zap_dead_interpreted_locals(JavaThread *thread, const RegisterMap* map) { 1257 // get current interpreter 'pc' 1258 assert(is_interpreted_frame(), "Not an interpreted frame"); 1259 Method* m = interpreter_frame_method(); 1260 int bci = interpreter_frame_bci(); 1261 1262 int max_locals = m->is_native() ? m->size_of_parameters() : m->max_locals(); 1263 1264 // process dynamic part 1265 InterpreterFrameClosure value_blk(this, max_locals, m->max_stack(), 1266 &_check_value); 1267 InterpreterFrameClosure oop_blk(this, max_locals, m->max_stack(), 1268 &_check_oop ); 1269 InterpreterFrameClosure dead_blk(this, max_locals, m->max_stack(), 1270 &_zap_dead ); 1271 1272 // get frame map 1273 InterpreterOopMap mask; 1274 m->mask_for(bci, &mask); 1275 mask.iterate_all( &oop_blk, &value_blk, &dead_blk); 1276 } 1277 1278 1279 void frame::zap_dead_compiled_locals(JavaThread* thread, const RegisterMap* reg_map) { 1280 1281 ResourceMark rm(thread); 1282 assert(_cb != NULL, "sanity check"); 1283 if (_cb->oop_maps() != NULL) { 1284 OopMapSet::all_do(this, reg_map, &_check_oop, check_derived_oop, &_check_value); 1285 } 1286 } 1287 1288 1289 void frame::zap_dead_entry_locals(JavaThread*, const RegisterMap*) { 1290 if (TraceZapDeadLocals) warning("frame::zap_dead_entry_locals unimplemented"); 1291 } 1292 1293 1294 void frame::zap_dead_deoptimized_locals(JavaThread*, const RegisterMap*) { 1295 if (TraceZapDeadLocals) warning("frame::zap_dead_deoptimized_locals unimplemented"); 1296 } 1297 1298 # endif // ENABLE_ZAP_DEAD_LOCALS 1299 1300 void frame::verify(const RegisterMap* map) { 1301 // for now make sure receiver type is correct 1302 if (is_interpreted_frame()) { 1303 Method* method = interpreter_frame_method(); 1304 guarantee(method->is_method(), "method is wrong in frame::verify"); 1305 if (!method->is_static()) { 1306 // fetch the receiver 1307 oop* p = (oop*) interpreter_frame_local_at(0); 1308 // make sure we have the right receiver type 1309 } 1310 } 1311 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(), "must be empty before verify");) 1312 oops_do_internal(&VerifyOopClosure::verify_oop, NULL, NULL, (RegisterMap*)map, false); 1313 } 1314 1315 1316 #ifdef ASSERT 1317 bool frame::verify_return_pc(address x) { 1318 if (StubRoutines::returns_to_call_stub(x)) { 1319 return true; 1320 } 1321 if (CodeCache::contains(x)) { 1322 return true; 1323 } 1324 if (Interpreter::contains(x)) { 1325 return true; 1326 } 1327 return false; 1328 } 1329 #endif 1330 1331 #ifdef ASSERT 1332 void frame::interpreter_frame_verify_monitor(BasicObjectLock* value) const { 1333 assert(is_interpreted_frame(), "Not an interpreted frame"); 1334 // verify that the value is in the right part of the frame 1335 address low_mark = (address) interpreter_frame_monitor_end(); 1336 address high_mark = (address) interpreter_frame_monitor_begin(); 1337 address current = (address) value; 1338 1339 const int monitor_size = frame::interpreter_frame_monitor_size(); 1340 guarantee((high_mark - current) % monitor_size == 0 , "Misaligned top of BasicObjectLock*"); 1341 guarantee( high_mark > current , "Current BasicObjectLock* higher than high_mark"); 1342 1343 guarantee((current - low_mark) % monitor_size == 0 , "Misaligned bottom of BasicObjectLock*"); 1344 guarantee( current >= low_mark , "Current BasicObjectLock* below than low_mark"); 1345 } 1346 #endif 1347 1348 #ifndef PRODUCT 1349 void frame::describe(FrameValues& values, int frame_no) { 1350 // boundaries: sp and the 'real' frame pointer 1351 values.describe(-1, sp(), err_msg("sp for #%d", frame_no), 1); 1352 intptr_t* frame_pointer = real_fp(); // Note: may differ from fp() 1353 1354 // print frame info at the highest boundary 1355 intptr_t* info_address = MAX2(sp(), frame_pointer); 1356 1357 if (info_address != frame_pointer) { 1358 // print frame_pointer explicitly if not marked by the frame info 1359 values.describe(-1, frame_pointer, err_msg("frame pointer for #%d", frame_no), 1); 1360 } 1361 1362 if (is_entry_frame() || is_compiled_frame() || is_interpreted_frame() || is_native_frame()) { 1363 // Label values common to most frames 1364 values.describe(-1, unextended_sp(), err_msg("unextended_sp for #%d", frame_no)); 1365 } 1366 1367 if (is_interpreted_frame()) { 1368 Method* m = interpreter_frame_method(); 1369 int bci = interpreter_frame_bci(); 1370 1371 // Label the method and current bci 1372 values.describe(-1, info_address, 1373 FormatBuffer<1024>("#%d method %s @ %d", frame_no, m->name_and_sig_as_C_string(), bci), 2); 1374 values.describe(-1, info_address, 1375 err_msg("- %d locals %d max stack", m->max_locals(), m->max_stack()), 1); 1376 if (m->max_locals() > 0) { 1377 intptr_t* l0 = interpreter_frame_local_at(0); 1378 intptr_t* ln = interpreter_frame_local_at(m->max_locals() - 1); 1379 values.describe(-1, MAX2(l0, ln), err_msg("locals for #%d", frame_no), 1); 1380 // Report each local and mark as owned by this frame 1381 for (int l = 0; l < m->max_locals(); l++) { 1382 intptr_t* l0 = interpreter_frame_local_at(l); 1383 values.describe(frame_no, l0, err_msg("local %d", l)); 1384 } 1385 } 1386 1387 // Compute the actual expression stack size 1388 InterpreterOopMap mask; 1389 OopMapCache::compute_one_oop_map(m, bci, &mask); 1390 intptr_t* tos = NULL; 1391 // Report each stack element and mark as owned by this frame 1392 for (int e = 0; e < mask.expression_stack_size(); e++) { 1393 tos = MAX2(tos, interpreter_frame_expression_stack_at(e)); 1394 values.describe(frame_no, interpreter_frame_expression_stack_at(e), 1395 err_msg("stack %d", e)); 1396 } 1397 if (tos != NULL) { 1398 values.describe(-1, tos, err_msg("expression stack for #%d", frame_no), 1); 1399 } 1400 if (interpreter_frame_monitor_begin() != interpreter_frame_monitor_end()) { 1401 values.describe(frame_no, (intptr_t*)interpreter_frame_monitor_begin(), "monitors begin"); 1402 values.describe(frame_no, (intptr_t*)interpreter_frame_monitor_end(), "monitors end"); 1403 } 1404 } else if (is_entry_frame()) { 1405 // For now just label the frame 1406 values.describe(-1, info_address, err_msg("#%d entry frame", frame_no), 2); 1407 } else if (is_compiled_frame()) { 1408 // For now just label the frame 1409 nmethod* nm = cb()->as_nmethod_or_null(); 1410 values.describe(-1, info_address, 1411 FormatBuffer<1024>("#%d nmethod " INTPTR_FORMAT " for method %s%s", frame_no, 1412 nm, nm->method()->name_and_sig_as_C_string(), 1413 (_deopt_state == is_deoptimized) ? 1414 " (deoptimized)" : 1415 ((_deopt_state == unknown) ? " (state unknown)" : "")), 1416 2); 1417 } else if (is_native_frame()) { 1418 // For now just label the frame 1419 nmethod* nm = cb()->as_nmethod_or_null(); 1420 values.describe(-1, info_address, 1421 FormatBuffer<1024>("#%d nmethod " INTPTR_FORMAT " for native method %s", frame_no, 1422 nm, nm->method()->name_and_sig_as_C_string()), 2); 1423 } else { 1424 // provide default info if not handled before 1425 char *info = (char *) "special frame"; 1426 if ((_cb != NULL) && 1427 (_cb->name() != NULL)) { 1428 info = (char *)_cb->name(); 1429 } 1430 values.describe(-1, info_address, err_msg("#%d <%s>", frame_no, info), 2); 1431 } 1432 1433 // platform dependent additional data 1434 describe_pd(values, frame_no); 1435 } 1436 1437 #endif 1438 1439 1440 //----------------------------------------------------------------------------------- 1441 // StackFrameStream implementation 1442 1443 StackFrameStream::StackFrameStream(JavaThread *thread, bool update) : _reg_map(thread, update) { 1444 assert(thread->has_last_Java_frame(), "sanity check"); 1445 _fr = thread->last_frame(); 1446 _is_done = false; 1447 } 1448 1449 1450 #ifndef PRODUCT 1451 1452 void FrameValues::describe(int owner, intptr_t* location, const char* description, int priority) { 1453 FrameValue fv; 1454 fv.location = location; 1455 fv.owner = owner; 1456 fv.priority = priority; 1457 fv.description = NEW_RESOURCE_ARRAY(char, strlen(description) + 1); 1458 strcpy(fv.description, description); 1459 _values.append(fv); 1460 } 1461 1462 1463 #ifdef ASSERT 1464 void FrameValues::validate() { 1465 _values.sort(compare); 1466 bool error = false; 1467 FrameValue prev; 1468 prev.owner = -1; 1469 for (int i = _values.length() - 1; i >= 0; i--) { 1470 FrameValue fv = _values.at(i); 1471 if (fv.owner == -1) continue; 1472 if (prev.owner == -1) { 1473 prev = fv; 1474 continue; 1475 } 1476 if (prev.location == fv.location) { 1477 if (fv.owner != prev.owner) { 1478 tty->print_cr("overlapping storage"); 1479 tty->print_cr(" " INTPTR_FORMAT ": " INTPTR_FORMAT " %s", prev.location, *prev.location, prev.description); 1480 tty->print_cr(" " INTPTR_FORMAT ": " INTPTR_FORMAT " %s", fv.location, *fv.location, fv.description); 1481 error = true; 1482 } 1483 } else { 1484 prev = fv; 1485 } 1486 } 1487 assert(!error, "invalid layout"); 1488 } 1489 #endif // ASSERT 1490 1491 void FrameValues::print(JavaThread* thread) { 1492 _values.sort(compare); 1493 1494 // Sometimes values like the fp can be invalid values if the 1495 // register map wasn't updated during the walk. Trim out values 1496 // that aren't actually in the stack of the thread. 1497 int min_index = 0; 1498 int max_index = _values.length() - 1; 1499 intptr_t* v0 = _values.at(min_index).location; 1500 intptr_t* v1 = _values.at(max_index).location; 1501 1502 if (thread == Thread::current()) { 1503 while (!thread->is_in_stack((address)v0)) { 1504 v0 = _values.at(++min_index).location; 1505 } 1506 while (!thread->is_in_stack((address)v1)) { 1507 v1 = _values.at(--max_index).location; 1508 } 1509 } else { 1510 while (!thread->on_local_stack((address)v0)) { 1511 v0 = _values.at(++min_index).location; 1512 } 1513 while (!thread->on_local_stack((address)v1)) { 1514 v1 = _values.at(--max_index).location; 1515 } 1516 } 1517 intptr_t* min = MIN2(v0, v1); 1518 intptr_t* max = MAX2(v0, v1); 1519 intptr_t* cur = max; 1520 intptr_t* last = NULL; 1521 for (int i = max_index; i >= min_index; i--) { 1522 FrameValue fv = _values.at(i); 1523 while (cur > fv.location) { 1524 tty->print_cr(" " INTPTR_FORMAT ": " INTPTR_FORMAT, cur, *cur); 1525 cur--; 1526 } 1527 if (last == fv.location) { 1528 const char* spacer = " " LP64_ONLY(" "); 1529 tty->print_cr(" %s %s %s", spacer, spacer, fv.description); 1530 } else { 1531 tty->print_cr(" " INTPTR_FORMAT ": " INTPTR_FORMAT " %s", fv.location, *fv.location, fv.description); 1532 last = fv.location; 1533 cur--; 1534 } 1535 } 1536 } 1537 1538 #endif // ndef PRODUCT