1 /* 2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "compiler/abstractCompiler.hpp" 27 #include "compiler/disassembler.hpp" 28 #include "gc_interface/collectedHeap.inline.hpp" 29 #include "interpreter/interpreter.hpp" 30 #include "interpreter/oopMapCache.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "memory/universe.inline.hpp" 33 #include "oops/markOop.hpp" 34 #include "oops/methodData.hpp" 35 #include "oops/method.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "oops/oop.inline2.hpp" 38 #include "prims/methodHandles.hpp" 39 #include "runtime/frame.inline.hpp" 40 #include "runtime/handles.inline.hpp" 41 #include "runtime/javaCalls.hpp" 42 #include "runtime/monitorChunk.hpp" 43 #include "runtime/sharedRuntime.hpp" 44 #include "runtime/signature.hpp" 45 #include "runtime/stubCodeGenerator.hpp" 46 #include "runtime/stubRoutines.hpp" 47 #include "utilities/decoder.hpp" 48 49 #ifdef TARGET_ARCH_x86 50 # include "nativeInst_x86.hpp" 51 #endif 52 #ifdef TARGET_ARCH_sparc 53 # include "nativeInst_sparc.hpp" 54 #endif 55 #ifdef TARGET_ARCH_zero 56 # include "nativeInst_zero.hpp" 57 #endif 58 #ifdef TARGET_ARCH_arm 59 # include "nativeInst_arm.hpp" 60 #endif 61 #ifdef TARGET_ARCH_ppc 62 # include "nativeInst_ppc.hpp" 63 #endif 64 65 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC 66 67 RegisterMap::RegisterMap(JavaThread *thread, bool update_map) { 68 _thread = thread; 69 _update_map = update_map; 70 clear(); 71 debug_only(_update_for_id = NULL;) 72 #ifndef PRODUCT 73 for (int i = 0; i < reg_count ; i++ ) _location[i] = NULL; 74 #endif /* PRODUCT */ 75 } 76 77 RegisterMap::RegisterMap(const RegisterMap* map) { 78 assert(map != this, "bad initialization parameter"); 79 assert(map != NULL, "RegisterMap must be present"); 80 _thread = map->thread(); 81 _update_map = map->update_map(); 82 _include_argument_oops = map->include_argument_oops(); 83 debug_only(_update_for_id = map->_update_for_id;) 84 pd_initialize_from(map); 85 if (update_map()) { 86 for(int i = 0; i < location_valid_size; i++) { 87 LocationValidType bits = !update_map() ? 0 : map->_location_valid[i]; 88 _location_valid[i] = bits; 89 // for whichever bits are set, pull in the corresponding map->_location 90 int j = i*location_valid_type_size; 91 while (bits != 0) { 92 if ((bits & 1) != 0) { 93 assert(0 <= j && j < reg_count, "range check"); 94 _location[j] = map->_location[j]; 95 } 96 bits >>= 1; 97 j += 1; 98 } 99 } 100 } 101 } 102 103 void RegisterMap::clear() { 104 set_include_argument_oops(true); 105 if (_update_map) { 106 for(int i = 0; i < location_valid_size; i++) { 107 _location_valid[i] = 0; 108 } 109 pd_clear(); 110 } else { 111 pd_initialize(); 112 } 113 } 114 115 #ifndef PRODUCT 116 117 void RegisterMap::print_on(outputStream* st) const { 118 st->print_cr("Register map"); 119 for(int i = 0; i < reg_count; i++) { 120 121 VMReg r = VMRegImpl::as_VMReg(i); 122 intptr_t* src = (intptr_t*) location(r); 123 if (src != NULL) { 124 125 r->print_on(st); 126 st->print(" [" INTPTR_FORMAT "] = ", src); 127 if (((uintptr_t)src & (sizeof(*src)-1)) != 0) { 128 st->print_cr("<misaligned>"); 129 } else { 130 st->print_cr(INTPTR_FORMAT, *src); 131 } 132 } 133 } 134 } 135 136 void RegisterMap::print() const { 137 print_on(tty); 138 } 139 140 #endif 141 // This returns the pc that if you were in the debugger you'd see. Not 142 // the idealized value in the frame object. This undoes the magic conversion 143 // that happens for deoptimized frames. In addition it makes the value the 144 // hardware would want to see in the native frame. The only user (at this point) 145 // is deoptimization. It likely no one else should ever use it. 146 147 address frame::raw_pc() const { 148 if (is_deoptimized_frame()) { 149 nmethod* nm = cb()->as_nmethod_or_null(); 150 if (nm->is_method_handle_return(pc())) 151 return nm->deopt_mh_handler_begin() - pc_return_offset; 152 else 153 return nm->deopt_handler_begin() - pc_return_offset; 154 } else { 155 return (pc() - pc_return_offset); 156 } 157 } 158 159 // Change the pc in a frame object. This does not change the actual pc in 160 // actual frame. To do that use patch_pc. 161 // 162 void frame::set_pc(address newpc ) { 163 #ifdef ASSERT 164 if (_cb != NULL && _cb->is_nmethod()) { 165 assert(!((nmethod*)_cb)->is_deopt_pc(_pc), "invariant violation"); 166 } 167 #endif // ASSERT 168 169 // Unsafe to use the is_deoptimzed tester after changing pc 170 _deopt_state = unknown; 171 _pc = newpc; 172 _cb = CodeCache::find_blob_unsafe(_pc); 173 174 } 175 176 // type testers 177 bool frame::is_ignored_frame() const { 178 return false; // FIXME: some LambdaForm frames should be ignored 179 } 180 bool frame::is_deoptimized_frame() const { 181 assert(_deopt_state != unknown, "not answerable"); 182 return _deopt_state == is_deoptimized; 183 } 184 185 bool frame::is_native_frame() const { 186 return (_cb != NULL && 187 _cb->is_nmethod() && 188 ((nmethod*)_cb)->is_native_method()); 189 } 190 191 bool frame::is_java_frame() const { 192 if (is_interpreted_frame()) return true; 193 if (is_compiled_frame()) return true; 194 return false; 195 } 196 197 198 bool frame::is_compiled_frame() const { 199 if (_cb != NULL && 200 _cb->is_nmethod() && 201 ((nmethod*)_cb)->is_java_method()) { 202 return true; 203 } 204 return false; 205 } 206 207 208 bool frame::is_runtime_frame() const { 209 return (_cb != NULL && _cb->is_runtime_stub()); 210 } 211 212 bool frame::is_safepoint_blob_frame() const { 213 return (_cb != NULL && _cb->is_safepoint_stub()); 214 } 215 216 // testers 217 218 bool frame::is_first_java_frame() const { 219 RegisterMap map(JavaThread::current(), false); // No update 220 frame s; 221 for (s = sender(&map); !(s.is_java_frame() || s.is_first_frame()); s = s.sender(&map)); 222 return s.is_first_frame(); 223 } 224 225 226 bool frame::entry_frame_is_first() const { 227 return entry_frame_call_wrapper()->is_first_frame(); 228 } 229 230 JavaCallWrapper* frame::entry_frame_call_wrapper_if_safe(JavaThread* thread) const { 231 JavaCallWrapper** jcw = entry_frame_call_wrapper_addr(); 232 address addr = (address) jcw; 233 234 // addr must be within the usable part of the stack 235 if (thread->is_in_usable_stack(addr)) { 236 return *jcw; 237 } 238 239 return NULL; 240 } 241 242 bool frame::should_be_deoptimized() const { 243 if (_deopt_state == is_deoptimized || 244 !is_compiled_frame() ) return false; 245 assert(_cb != NULL && _cb->is_nmethod(), "must be an nmethod"); 246 nmethod* nm = (nmethod *)_cb; 247 if (TraceDependencies) { 248 tty->print("checking (%s) ", nm->is_marked_for_deoptimization() ? "true" : "false"); 249 nm->print_value_on(tty); 250 tty->cr(); 251 } 252 253 if( !nm->is_marked_for_deoptimization() ) 254 return false; 255 256 // If at the return point, then the frame has already been popped, and 257 // only the return needs to be executed. Don't deoptimize here. 258 return !nm->is_at_poll_return(pc()); 259 } 260 261 bool frame::can_be_deoptimized() const { 262 if (!is_compiled_frame()) return false; 263 nmethod* nm = (nmethod*)_cb; 264 265 if( !nm->can_be_deoptimized() ) 266 return false; 267 268 return !nm->is_at_poll_return(pc()); 269 } 270 271 void frame::deoptimize(JavaThread* thread) { 272 // Schedule deoptimization of an nmethod activation with this frame. 273 assert(_cb != NULL && _cb->is_nmethod(), "must be"); 274 nmethod* nm = (nmethod*)_cb; 275 276 // This is a fix for register window patching race 277 if (NeedsDeoptSuspend && Thread::current() != thread) { 278 assert(SafepointSynchronize::is_at_safepoint(), 279 "patching other threads for deopt may only occur at a safepoint"); 280 281 // It is possible especially with DeoptimizeALot/DeoptimizeRandom that 282 // we could see the frame again and ask for it to be deoptimized since 283 // it might move for a long time. That is harmless and we just ignore it. 284 if (id() == thread->must_deopt_id()) { 285 assert(thread->is_deopt_suspend(), "lost suspension"); 286 return; 287 } 288 289 // We are at a safepoint so the target thread can only be 290 // in 4 states: 291 // blocked - no problem 292 // blocked_trans - no problem (i.e. could have woken up from blocked 293 // during a safepoint). 294 // native - register window pc patching race 295 // native_trans - momentary state 296 // 297 // We could just wait out a thread in native_trans to block. 298 // Then we'd have all the issues that the safepoint code has as to 299 // whether to spin or block. It isn't worth it. Just treat it like 300 // native and be done with it. 301 // 302 // Examine the state of the thread at the start of safepoint since 303 // threads that were in native at the start of the safepoint could 304 // come to a halt during the safepoint, changing the current value 305 // of the safepoint_state. 306 JavaThreadState state = thread->safepoint_state()->orig_thread_state(); 307 if (state == _thread_in_native || state == _thread_in_native_trans) { 308 // Since we are at a safepoint the target thread will stop itself 309 // before it can return to java as long as we remain at the safepoint. 310 // Therefore we can put an additional request for the thread to stop 311 // no matter what no (like a suspend). This will cause the thread 312 // to notice it needs to do the deopt on its own once it leaves native. 313 // 314 // The only reason we must do this is because on machine with register 315 // windows we have a race with patching the return address and the 316 // window coming live as the thread returns to the Java code (but still 317 // in native mode) and then blocks. It is only this top most frame 318 // that is at risk. So in truth we could add an additional check to 319 // see if this frame is one that is at risk. 320 RegisterMap map(thread, false); 321 frame at_risk = thread->last_frame().sender(&map); 322 if (id() == at_risk.id()) { 323 thread->set_must_deopt_id(id()); 324 thread->set_deopt_suspend(); 325 return; 326 } 327 } 328 } // NeedsDeoptSuspend 329 330 331 // If the call site is a MethodHandle call site use the MH deopt 332 // handler. 333 address deopt = nm->is_method_handle_return(pc()) ? 334 nm->deopt_mh_handler_begin() : 335 nm->deopt_handler_begin(); 336 337 // Save the original pc before we patch in the new one 338 nm->set_original_pc(this, pc()); 339 patch_pc(thread, deopt); 340 341 #ifdef ASSERT 342 { 343 RegisterMap map(thread, false); 344 frame check = thread->last_frame(); 345 while (id() != check.id()) { 346 check = check.sender(&map); 347 } 348 assert(check.is_deoptimized_frame(), "missed deopt"); 349 } 350 #endif // ASSERT 351 } 352 353 frame frame::java_sender() const { 354 RegisterMap map(JavaThread::current(), false); 355 frame s; 356 for (s = sender(&map); !(s.is_java_frame() || s.is_first_frame()); s = s.sender(&map)) ; 357 guarantee(s.is_java_frame(), "tried to get caller of first java frame"); 358 return s; 359 } 360 361 frame frame::real_sender(RegisterMap* map) const { 362 frame result = sender(map); 363 while (result.is_runtime_frame() || 364 result.is_ignored_frame()) { 365 result = result.sender(map); 366 } 367 return result; 368 } 369 370 // Note: called by profiler - NOT for current thread 371 frame frame::profile_find_Java_sender_frame(JavaThread *thread) { 372 // If we don't recognize this frame, walk back up the stack until we do 373 RegisterMap map(thread, false); 374 frame first_java_frame = frame(); 375 376 // Find the first Java frame on the stack starting with input frame 377 if (is_java_frame()) { 378 // top frame is compiled frame or deoptimized frame 379 first_java_frame = *this; 380 } else if (safe_for_sender(thread)) { 381 for (frame sender_frame = sender(&map); 382 sender_frame.safe_for_sender(thread) && !sender_frame.is_first_frame(); 383 sender_frame = sender_frame.sender(&map)) { 384 if (sender_frame.is_java_frame()) { 385 first_java_frame = sender_frame; 386 break; 387 } 388 } 389 } 390 return first_java_frame; 391 } 392 393 // Interpreter frames 394 395 396 void frame::interpreter_frame_set_locals(intptr_t* locs) { 397 assert(is_interpreted_frame(), "Not an interpreted frame"); 398 *interpreter_frame_locals_addr() = locs; 399 } 400 401 Method* frame::interpreter_frame_method() const { 402 assert(is_interpreted_frame(), "interpreted frame expected"); 403 Method* m = *interpreter_frame_method_addr(); 404 assert(m->is_method(), "not a Method*"); 405 return m; 406 } 407 408 void frame::interpreter_frame_set_method(Method* method) { 409 assert(is_interpreted_frame(), "interpreted frame expected"); 410 *interpreter_frame_method_addr() = method; 411 } 412 413 void frame::interpreter_frame_set_bcx(intptr_t bcx) { 414 assert(is_interpreted_frame(), "Not an interpreted frame"); 415 if (ProfileInterpreter) { 416 bool formerly_bci = is_bci(interpreter_frame_bcx()); 417 bool is_now_bci = is_bci(bcx); 418 *interpreter_frame_bcx_addr() = bcx; 419 420 intptr_t mdx = interpreter_frame_mdx(); 421 422 if (mdx != 0) { 423 if (formerly_bci) { 424 if (!is_now_bci) { 425 // The bcx was just converted from bci to bcp. 426 // Convert the mdx in parallel. 427 MethodData* mdo = interpreter_frame_method()->method_data(); 428 assert(mdo != NULL, ""); 429 int mdi = mdx - 1; // We distinguish valid mdi from zero by adding one. 430 address mdp = mdo->di_to_dp(mdi); 431 interpreter_frame_set_mdx((intptr_t)mdp); 432 } 433 } else { 434 if (is_now_bci) { 435 // The bcx was just converted from bcp to bci. 436 // Convert the mdx in parallel. 437 MethodData* mdo = interpreter_frame_method()->method_data(); 438 assert(mdo != NULL, ""); 439 int mdi = mdo->dp_to_di((address)mdx); 440 interpreter_frame_set_mdx((intptr_t)mdi + 1); // distinguish valid from 0. 441 } 442 } 443 } 444 } else { 445 *interpreter_frame_bcx_addr() = bcx; 446 } 447 } 448 449 jint frame::interpreter_frame_bci() const { 450 assert(is_interpreted_frame(), "interpreted frame expected"); 451 intptr_t bcx = interpreter_frame_bcx(); 452 return is_bci(bcx) ? bcx : interpreter_frame_method()->bci_from((address)bcx); 453 } 454 455 void frame::interpreter_frame_set_bci(jint bci) { 456 assert(is_interpreted_frame(), "interpreted frame expected"); 457 assert(!is_bci(interpreter_frame_bcx()), "should not set bci during GC"); 458 interpreter_frame_set_bcx((intptr_t)interpreter_frame_method()->bcp_from(bci)); 459 } 460 461 address frame::interpreter_frame_bcp() const { 462 assert(is_interpreted_frame(), "interpreted frame expected"); 463 intptr_t bcx = interpreter_frame_bcx(); 464 return is_bci(bcx) ? interpreter_frame_method()->bcp_from(bcx) : (address)bcx; 465 } 466 467 void frame::interpreter_frame_set_bcp(address bcp) { 468 assert(is_interpreted_frame(), "interpreted frame expected"); 469 assert(!is_bci(interpreter_frame_bcx()), "should not set bcp during GC"); 470 interpreter_frame_set_bcx((intptr_t)bcp); 471 } 472 473 void frame::interpreter_frame_set_mdx(intptr_t mdx) { 474 assert(is_interpreted_frame(), "Not an interpreted frame"); 475 assert(ProfileInterpreter, "must be profiling interpreter"); 476 *interpreter_frame_mdx_addr() = mdx; 477 } 478 479 address frame::interpreter_frame_mdp() const { 480 assert(ProfileInterpreter, "must be profiling interpreter"); 481 assert(is_interpreted_frame(), "interpreted frame expected"); 482 intptr_t bcx = interpreter_frame_bcx(); 483 intptr_t mdx = interpreter_frame_mdx(); 484 485 assert(!is_bci(bcx), "should not access mdp during GC"); 486 return (address)mdx; 487 } 488 489 void frame::interpreter_frame_set_mdp(address mdp) { 490 assert(is_interpreted_frame(), "interpreted frame expected"); 491 if (mdp == NULL) { 492 // Always allow the mdp to be cleared. 493 interpreter_frame_set_mdx((intptr_t)mdp); 494 } 495 intptr_t bcx = interpreter_frame_bcx(); 496 assert(!is_bci(bcx), "should not set mdp during GC"); 497 interpreter_frame_set_mdx((intptr_t)mdp); 498 } 499 500 BasicObjectLock* frame::next_monitor_in_interpreter_frame(BasicObjectLock* current) const { 501 assert(is_interpreted_frame(), "Not an interpreted frame"); 502 #ifdef ASSERT 503 interpreter_frame_verify_monitor(current); 504 #endif 505 BasicObjectLock* next = (BasicObjectLock*) (((intptr_t*) current) + interpreter_frame_monitor_size()); 506 return next; 507 } 508 509 BasicObjectLock* frame::previous_monitor_in_interpreter_frame(BasicObjectLock* current) const { 510 assert(is_interpreted_frame(), "Not an interpreted frame"); 511 #ifdef ASSERT 512 // // This verification needs to be checked before being enabled 513 // interpreter_frame_verify_monitor(current); 514 #endif 515 BasicObjectLock* previous = (BasicObjectLock*) (((intptr_t*) current) - interpreter_frame_monitor_size()); 516 return previous; 517 } 518 519 // Interpreter locals and expression stack locations. 520 521 intptr_t* frame::interpreter_frame_local_at(int index) const { 522 const int n = Interpreter::local_offset_in_bytes(index)/wordSize; 523 return &((*interpreter_frame_locals_addr())[n]); 524 } 525 526 intptr_t* frame::interpreter_frame_expression_stack_at(jint offset) const { 527 const int i = offset * interpreter_frame_expression_stack_direction(); 528 const int n = i * Interpreter::stackElementWords; 529 return &(interpreter_frame_expression_stack()[n]); 530 } 531 532 jint frame::interpreter_frame_expression_stack_size() const { 533 // Number of elements on the interpreter expression stack 534 // Callers should span by stackElementWords 535 int element_size = Interpreter::stackElementWords; 536 size_t stack_size = 0; 537 if (frame::interpreter_frame_expression_stack_direction() < 0) { 538 stack_size = (interpreter_frame_expression_stack() - 539 interpreter_frame_tos_address() + 1)/element_size; 540 } else { 541 stack_size = (interpreter_frame_tos_address() - 542 interpreter_frame_expression_stack() + 1)/element_size; 543 } 544 assert( stack_size <= (size_t)max_jint, "stack size too big"); 545 return ((jint)stack_size); 546 } 547 548 549 // (frame::interpreter_frame_sender_sp accessor is in frame_<arch>.cpp) 550 551 const char* frame::print_name() const { 552 if (is_native_frame()) return "Native"; 553 if (is_interpreted_frame()) return "Interpreted"; 554 if (is_compiled_frame()) { 555 if (is_deoptimized_frame()) return "Deoptimized"; 556 return "Compiled"; 557 } 558 if (sp() == NULL) return "Empty"; 559 return "C"; 560 } 561 562 void frame::print_value_on(outputStream* st, JavaThread *thread) const { 563 NOT_PRODUCT(address begin = pc()-40;) 564 NOT_PRODUCT(address end = NULL;) 565 566 st->print("%s frame (sp=" INTPTR_FORMAT " unextended sp=" INTPTR_FORMAT, print_name(), sp(), unextended_sp()); 567 if (sp() != NULL) 568 st->print(", fp=" INTPTR_FORMAT ", real_fp=" INTPTR_FORMAT ", pc=" INTPTR_FORMAT, fp(), real_fp(), pc()); 569 570 if (StubRoutines::contains(pc())) { 571 st->print_cr(")"); 572 st->print("("); 573 StubCodeDesc* desc = StubCodeDesc::desc_for(pc()); 574 st->print("~Stub::%s", desc->name()); 575 NOT_PRODUCT(begin = desc->begin(); end = desc->end();) 576 } else if (Interpreter::contains(pc())) { 577 st->print_cr(")"); 578 st->print("("); 579 InterpreterCodelet* desc = Interpreter::codelet_containing(pc()); 580 if (desc != NULL) { 581 st->print("~"); 582 desc->print_on(st); 583 NOT_PRODUCT(begin = desc->code_begin(); end = desc->code_end();) 584 } else { 585 st->print("~interpreter"); 586 } 587 } 588 st->print_cr(")"); 589 590 if (_cb != NULL) { 591 st->print(" "); 592 _cb->print_value_on(st); 593 st->cr(); 594 #ifndef PRODUCT 595 if (end == NULL) { 596 begin = _cb->code_begin(); 597 end = _cb->code_end(); 598 } 599 #endif 600 } 601 NOT_PRODUCT(if (WizardMode && Verbose) Disassembler::decode(begin, end);) 602 } 603 604 605 void frame::print_on(outputStream* st) const { 606 print_value_on(st,NULL); 607 if (is_interpreted_frame()) { 608 interpreter_frame_print_on(st); 609 } 610 } 611 612 613 void frame::interpreter_frame_print_on(outputStream* st) const { 614 #ifndef PRODUCT 615 assert(is_interpreted_frame(), "Not an interpreted frame"); 616 jint i; 617 for (i = 0; i < interpreter_frame_method()->max_locals(); i++ ) { 618 intptr_t x = *interpreter_frame_local_at(i); 619 st->print(" - local [" INTPTR_FORMAT "]", x); 620 st->fill_to(23); 621 st->print_cr("; #%d", i); 622 } 623 for (i = interpreter_frame_expression_stack_size() - 1; i >= 0; --i ) { 624 intptr_t x = *interpreter_frame_expression_stack_at(i); 625 st->print(" - stack [" INTPTR_FORMAT "]", x); 626 st->fill_to(23); 627 st->print_cr("; #%d", i); 628 } 629 // locks for synchronization 630 for (BasicObjectLock* current = interpreter_frame_monitor_end(); 631 current < interpreter_frame_monitor_begin(); 632 current = next_monitor_in_interpreter_frame(current)) { 633 st->print(" - obj ["); 634 current->obj()->print_value_on(st); 635 st->print_cr("]"); 636 st->print(" - lock ["); 637 current->lock()->print_on(st); 638 st->print_cr("]"); 639 } 640 // monitor 641 st->print_cr(" - monitor[" INTPTR_FORMAT "]", interpreter_frame_monitor_begin()); 642 // bcp 643 st->print(" - bcp [" INTPTR_FORMAT "]", interpreter_frame_bcp()); 644 st->fill_to(23); 645 st->print_cr("; @%d", interpreter_frame_bci()); 646 // locals 647 st->print_cr(" - locals [" INTPTR_FORMAT "]", interpreter_frame_local_at(0)); 648 // method 649 st->print(" - method [" INTPTR_FORMAT "]", (address)interpreter_frame_method()); 650 st->fill_to(23); 651 st->print("; "); 652 interpreter_frame_method()->print_name(st); 653 st->cr(); 654 #endif 655 } 656 657 // Print whether the frame is in the VM or OS indicating a HotSpot problem. 658 // Otherwise, it's likely a bug in the native library that the Java code calls, 659 // hopefully indicating where to submit bugs. 660 void frame::print_C_frame(outputStream* st, char* buf, int buflen, address pc) { 661 // C/C++ frame 662 bool in_vm = os::address_is_in_vm(pc); 663 st->print(in_vm ? "V" : "C"); 664 665 int offset; 666 bool found; 667 668 // libname 669 found = os::dll_address_to_library_name(pc, buf, buflen, &offset); 670 if (found) { 671 // skip directory names 672 const char *p1, *p2; 673 p1 = buf; 674 int len = (int)strlen(os::file_separator()); 675 while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len; 676 st->print(" [%s+0x%x]", p1, offset); 677 } else { 678 st->print(" " PTR_FORMAT, pc); 679 } 680 681 // function name - os::dll_address_to_function_name() may return confusing 682 // names if pc is within jvm.dll or libjvm.so, because JVM only has 683 // JVM_xxxx and a few other symbols in the dynamic symbol table. Do this 684 // only for native libraries. 685 if (!in_vm || Decoder::can_decode_C_frame_in_vm()) { 686 found = os::dll_address_to_function_name(pc, buf, buflen, &offset); 687 688 if (found) { 689 st->print(" %s+0x%x", buf, offset); 690 } 691 } 692 } 693 694 // frame::print_on_error() is called by fatal error handler. Notice that we may 695 // crash inside this function if stack frame is corrupted. The fatal error 696 // handler can catch and handle the crash. Here we assume the frame is valid. 697 // 698 // First letter indicates type of the frame: 699 // J: Java frame (compiled) 700 // j: Java frame (interpreted) 701 // V: VM frame (C/C++) 702 // v: Other frames running VM generated code (e.g. stubs, adapters, etc.) 703 // C: C/C++ frame 704 // 705 // We don't need detailed frame type as that in frame::print_name(). "C" 706 // suggests the problem is in user lib; everything else is likely a VM bug. 707 708 void frame::print_on_error(outputStream* st, char* buf, int buflen, bool verbose) const { 709 if (_cb != NULL) { 710 if (Interpreter::contains(pc())) { 711 Method* m = this->interpreter_frame_method(); 712 if (m != NULL) { 713 m->name_and_sig_as_C_string(buf, buflen); 714 st->print("j %s", buf); 715 st->print("+%d", this->interpreter_frame_bci()); 716 } else { 717 st->print("j " PTR_FORMAT, pc()); 718 } 719 } else if (StubRoutines::contains(pc())) { 720 StubCodeDesc* desc = StubCodeDesc::desc_for(pc()); 721 if (desc != NULL) { 722 st->print("v ~StubRoutines::%s", desc->name()); 723 } else { 724 st->print("v ~StubRoutines::" PTR_FORMAT, pc()); 725 } 726 } else if (_cb->is_buffer_blob()) { 727 st->print("v ~BufferBlob::%s", ((BufferBlob *)_cb)->name()); 728 } else if (_cb->is_nmethod()) { 729 nmethod* nm = (nmethod*)_cb; 730 Method* m = nm->method(); 731 if (m != NULL) { 732 m->name_and_sig_as_C_string(buf, buflen); 733 st->print("J %d%s %s %s (%d bytes) @ " PTR_FORMAT " [" PTR_FORMAT "+0x%x]", 734 nm->compile_id(), (nm->is_osr_method() ? "%" : ""), 735 ((nm->compiler() != NULL) ? nm->compiler()->name() : ""), 736 buf, m->code_size(), _pc, _cb->code_begin(), _pc - _cb->code_begin()); 737 } else { 738 st->print("J " PTR_FORMAT, pc()); 739 } 740 } else if (_cb->is_runtime_stub()) { 741 st->print("v ~RuntimeStub::%s", ((RuntimeStub *)_cb)->name()); 742 } else if (_cb->is_deoptimization_stub()) { 743 st->print("v ~DeoptimizationBlob"); 744 } else if (_cb->is_exception_stub()) { 745 st->print("v ~ExceptionBlob"); 746 } else if (_cb->is_safepoint_stub()) { 747 st->print("v ~SafepointBlob"); 748 } else { 749 st->print("v blob " PTR_FORMAT, pc()); 750 } 751 } else { 752 print_C_frame(st, buf, buflen, pc()); 753 } 754 } 755 756 757 /* 758 The interpreter_frame_expression_stack_at method in the case of SPARC needs the 759 max_stack value of the method in order to compute the expression stack address. 760 It uses the Method* in order to get the max_stack value but during GC this 761 Method* value saved on the frame is changed by reverse_and_push and hence cannot 762 be used. So we save the max_stack value in the FrameClosure object and pass it 763 down to the interpreter_frame_expression_stack_at method 764 */ 765 class InterpreterFrameClosure : public OffsetClosure { 766 private: 767 frame* _fr; 768 OopClosure* _f; 769 int _max_locals; 770 int _max_stack; 771 772 public: 773 InterpreterFrameClosure(frame* fr, int max_locals, int max_stack, 774 OopClosure* f) { 775 _fr = fr; 776 _max_locals = max_locals; 777 _max_stack = max_stack; 778 _f = f; 779 } 780 781 void offset_do(int offset) { 782 oop* addr; 783 if (offset < _max_locals) { 784 addr = (oop*) _fr->interpreter_frame_local_at(offset); 785 assert((intptr_t*)addr >= _fr->sp(), "must be inside the frame"); 786 _f->do_oop(addr); 787 } else { 788 addr = (oop*) _fr->interpreter_frame_expression_stack_at((offset - _max_locals)); 789 // In case of exceptions, the expression stack is invalid and the esp will be reset to express 790 // this condition. Therefore, we call f only if addr is 'inside' the stack (i.e., addr >= esp for Intel). 791 bool in_stack; 792 if (frame::interpreter_frame_expression_stack_direction() > 0) { 793 in_stack = (intptr_t*)addr <= _fr->interpreter_frame_tos_address(); 794 } else { 795 in_stack = (intptr_t*)addr >= _fr->interpreter_frame_tos_address(); 796 } 797 if (in_stack) { 798 _f->do_oop(addr); 799 } 800 } 801 } 802 803 int max_locals() { return _max_locals; } 804 frame* fr() { return _fr; } 805 }; 806 807 808 class InterpretedArgumentOopFinder: public SignatureInfo { 809 private: 810 OopClosure* _f; // Closure to invoke 811 int _offset; // TOS-relative offset, decremented with each argument 812 bool _has_receiver; // true if the callee has a receiver 813 frame* _fr; 814 815 void set(int size, BasicType type) { 816 _offset -= size; 817 if (type == T_OBJECT || type == T_ARRAY) oop_offset_do(); 818 } 819 820 void oop_offset_do() { 821 oop* addr; 822 addr = (oop*)_fr->interpreter_frame_tos_at(_offset); 823 _f->do_oop(addr); 824 } 825 826 public: 827 InterpretedArgumentOopFinder(Symbol* signature, bool has_receiver, frame* fr, OopClosure* f) : SignatureInfo(signature), _has_receiver(has_receiver) { 828 // compute size of arguments 829 int args_size = ArgumentSizeComputer(signature).size() + (has_receiver ? 1 : 0); 830 assert(!fr->is_interpreted_frame() || 831 args_size <= fr->interpreter_frame_expression_stack_size(), 832 "args cannot be on stack anymore"); 833 // initialize InterpretedArgumentOopFinder 834 _f = f; 835 _fr = fr; 836 _offset = args_size; 837 } 838 839 void oops_do() { 840 if (_has_receiver) { 841 --_offset; 842 oop_offset_do(); 843 } 844 iterate_parameters(); 845 } 846 }; 847 848 849 // Entry frame has following form (n arguments) 850 // +-----------+ 851 // sp -> | last arg | 852 // +-----------+ 853 // : ::: : 854 // +-----------+ 855 // (sp+n)->| first arg| 856 // +-----------+ 857 858 859 860 // visits and GC's all the arguments in entry frame 861 class EntryFrameOopFinder: public SignatureInfo { 862 private: 863 bool _is_static; 864 int _offset; 865 frame* _fr; 866 OopClosure* _f; 867 868 void set(int size, BasicType type) { 869 assert (_offset >= 0, "illegal offset"); 870 if (type == T_OBJECT || type == T_ARRAY) oop_at_offset_do(_offset); 871 _offset -= size; 872 } 873 874 void oop_at_offset_do(int offset) { 875 assert (offset >= 0, "illegal offset"); 876 oop* addr = (oop*) _fr->entry_frame_argument_at(offset); 877 _f->do_oop(addr); 878 } 879 880 public: 881 EntryFrameOopFinder(frame* frame, Symbol* signature, bool is_static) : SignatureInfo(signature) { 882 _f = NULL; // will be set later 883 _fr = frame; 884 _is_static = is_static; 885 _offset = ArgumentSizeComputer(signature).size() - 1; // last parameter is at index 0 886 } 887 888 void arguments_do(OopClosure* f) { 889 _f = f; 890 if (!_is_static) oop_at_offset_do(_offset+1); // do the receiver 891 iterate_parameters(); 892 } 893 894 }; 895 896 oop* frame::interpreter_callee_receiver_addr(Symbol* signature) { 897 ArgumentSizeComputer asc(signature); 898 int size = asc.size(); 899 return (oop *)interpreter_frame_tos_at(size); 900 } 901 902 903 void frame::oops_interpreted_do(OopClosure* f, CLDClosure* cld_f, 904 const RegisterMap* map, bool query_oop_map_cache) { 905 assert(is_interpreted_frame(), "Not an interpreted frame"); 906 assert(map != NULL, "map must be set"); 907 Thread *thread = Thread::current(); 908 methodHandle m (thread, interpreter_frame_method()); 909 jint bci = interpreter_frame_bci(); 910 911 assert(!Universe::heap()->is_in(m()), 912 "must be valid oop"); 913 assert(m->is_method(), "checking frame value"); 914 assert((m->is_native() && bci == 0) || 915 (!m->is_native() && bci >= 0 && bci < m->code_size()), 916 "invalid bci value"); 917 918 // Handle the monitor elements in the activation 919 for ( 920 BasicObjectLock* current = interpreter_frame_monitor_end(); 921 current < interpreter_frame_monitor_begin(); 922 current = next_monitor_in_interpreter_frame(current) 923 ) { 924 #ifdef ASSERT 925 interpreter_frame_verify_monitor(current); 926 #endif 927 current->oops_do(f); 928 } 929 930 // process fixed part 931 if (cld_f != NULL) { 932 // The method pointer in the frame might be the only path to the method's 933 // klass, and the klass needs to be kept alive while executing. The GCs 934 // don't trace through method pointers, so typically in similar situations 935 // the mirror or the class loader of the klass are installed as a GC root. 936 // To minimize the overhead of doing that here, we ask the GC to pass down a 937 // closure that knows how to keep klasses alive given a ClassLoaderData. 938 cld_f->do_cld(m->method_holder()->class_loader_data()); 939 } 940 941 if (m->is_native() PPC32_ONLY(&& m->is_static())) { 942 f->do_oop(interpreter_frame_temp_oop_addr()); 943 } 944 945 int max_locals = m->is_native() ? m->size_of_parameters() : m->max_locals(); 946 947 Symbol* signature = NULL; 948 bool has_receiver = false; 949 950 // Process a callee's arguments if we are at a call site 951 // (i.e., if we are at an invoke bytecode) 952 // This is used sometimes for calling into the VM, not for another 953 // interpreted or compiled frame. 954 if (!m->is_native()) { 955 Bytecode_invoke call = Bytecode_invoke_check(m, bci); 956 if (call.is_valid()) { 957 signature = call.signature(); 958 has_receiver = call.has_receiver(); 959 if (map->include_argument_oops() && 960 interpreter_frame_expression_stack_size() > 0) { 961 ResourceMark rm(thread); // is this right ??? 962 // we are at a call site & the expression stack is not empty 963 // => process callee's arguments 964 // 965 // Note: The expression stack can be empty if an exception 966 // occurred during method resolution/execution. In all 967 // cases we empty the expression stack completely be- 968 // fore handling the exception (the exception handling 969 // code in the interpreter calls a blocking runtime 970 // routine which can cause this code to be executed). 971 // (was bug gri 7/27/98) 972 oops_interpreted_arguments_do(signature, has_receiver, f); 973 } 974 } 975 } 976 977 InterpreterFrameClosure blk(this, max_locals, m->max_stack(), f); 978 979 // process locals & expression stack 980 InterpreterOopMap mask; 981 if (query_oop_map_cache) { 982 m->mask_for(bci, &mask); 983 } else { 984 OopMapCache::compute_one_oop_map(m, bci, &mask); 985 } 986 mask.iterate_oop(&blk); 987 } 988 989 990 void frame::oops_interpreted_arguments_do(Symbol* signature, bool has_receiver, OopClosure* f) { 991 InterpretedArgumentOopFinder finder(signature, has_receiver, this, f); 992 finder.oops_do(); 993 } 994 995 void frame::oops_code_blob_do(OopClosure* f, CodeBlobClosure* cf, const RegisterMap* reg_map) { 996 assert(_cb != NULL, "sanity check"); 997 if (_cb->oop_maps() != NULL) { 998 OopMapSet::oops_do(this, reg_map, f); 999 1000 // Preserve potential arguments for a callee. We handle this by dispatching 1001 // on the codeblob. For c2i, we do 1002 if (reg_map->include_argument_oops()) { 1003 _cb->preserve_callee_argument_oops(*this, reg_map, f); 1004 } 1005 } 1006 // In cases where perm gen is collected, GC will want to mark 1007 // oops referenced from nmethods active on thread stacks so as to 1008 // prevent them from being collected. However, this visit should be 1009 // restricted to certain phases of the collection only. The 1010 // closure decides how it wants nmethods to be traced. 1011 if (cf != NULL) 1012 cf->do_code_blob(_cb); 1013 } 1014 1015 class CompiledArgumentOopFinder: public SignatureInfo { 1016 protected: 1017 OopClosure* _f; 1018 int _offset; // the current offset, incremented with each argument 1019 bool _has_receiver; // true if the callee has a receiver 1020 bool _has_appendix; // true if the call has an appendix 1021 frame _fr; 1022 RegisterMap* _reg_map; 1023 int _arg_size; 1024 VMRegPair* _regs; // VMReg list of arguments 1025 1026 void set(int size, BasicType type) { 1027 if (type == T_OBJECT || type == T_ARRAY) handle_oop_offset(); 1028 _offset += size; 1029 } 1030 1031 virtual void handle_oop_offset() { 1032 // Extract low order register number from register array. 1033 // In LP64-land, the high-order bits are valid but unhelpful. 1034 VMReg reg = _regs[_offset].first(); 1035 oop *loc = _fr.oopmapreg_to_location(reg, _reg_map); 1036 _f->do_oop(loc); 1037 } 1038 1039 public: 1040 CompiledArgumentOopFinder(Symbol* signature, bool has_receiver, bool has_appendix, OopClosure* f, frame fr, const RegisterMap* reg_map) 1041 : SignatureInfo(signature) { 1042 1043 // initialize CompiledArgumentOopFinder 1044 _f = f; 1045 _offset = 0; 1046 _has_receiver = has_receiver; 1047 _has_appendix = has_appendix; 1048 _fr = fr; 1049 _reg_map = (RegisterMap*)reg_map; 1050 _arg_size = ArgumentSizeComputer(signature).size() + (has_receiver ? 1 : 0) + (has_appendix ? 1 : 0); 1051 1052 int arg_size; 1053 _regs = SharedRuntime::find_callee_arguments(signature, has_receiver, has_appendix, &arg_size); 1054 assert(arg_size == _arg_size, "wrong arg size"); 1055 } 1056 1057 void oops_do() { 1058 if (_has_receiver) { 1059 handle_oop_offset(); 1060 _offset++; 1061 } 1062 iterate_parameters(); 1063 if (_has_appendix) { 1064 handle_oop_offset(); 1065 _offset++; 1066 } 1067 } 1068 }; 1069 1070 void frame::oops_compiled_arguments_do(Symbol* signature, bool has_receiver, bool has_appendix, const RegisterMap* reg_map, OopClosure* f) { 1071 ResourceMark rm; 1072 CompiledArgumentOopFinder finder(signature, has_receiver, has_appendix, f, *this, reg_map); 1073 finder.oops_do(); 1074 } 1075 1076 1077 // Get receiver out of callers frame, i.e. find parameter 0 in callers 1078 // frame. Consult ADLC for where parameter 0 is to be found. Then 1079 // check local reg_map for it being a callee-save register or argument 1080 // register, both of which are saved in the local frame. If not found 1081 // there, it must be an in-stack argument of the caller. 1082 // Note: caller.sp() points to callee-arguments 1083 oop frame::retrieve_receiver(RegisterMap* reg_map) { 1084 frame caller = *this; 1085 1086 // First consult the ADLC on where it puts parameter 0 for this signature. 1087 VMReg reg = SharedRuntime::name_for_receiver(); 1088 oop* oop_adr = caller.oopmapreg_to_location(reg, reg_map); 1089 if (oop_adr == NULL) { 1090 guarantee(oop_adr != NULL, "bad register save location"); 1091 return NULL; 1092 } 1093 oop r = *oop_adr; 1094 assert(Universe::heap()->is_in_or_null(r), err_msg("bad receiver: " INTPTR_FORMAT " (" INTX_FORMAT ")", (void *) r, (void *) r)); 1095 return r; 1096 } 1097 1098 1099 oop* frame::oopmapreg_to_location(VMReg reg, const RegisterMap* reg_map) const { 1100 if(reg->is_reg()) { 1101 // If it is passed in a register, it got spilled in the stub frame. 1102 return (oop *)reg_map->location(reg); 1103 } else { 1104 int sp_offset_in_bytes = reg->reg2stack() * VMRegImpl::stack_slot_size; 1105 return (oop*)(((address)unextended_sp()) + sp_offset_in_bytes); 1106 } 1107 } 1108 1109 BasicLock* frame::get_native_monitor() { 1110 nmethod* nm = (nmethod*)_cb; 1111 assert(_cb != NULL && _cb->is_nmethod() && nm->method()->is_native(), 1112 "Should not call this unless it's a native nmethod"); 1113 int byte_offset = in_bytes(nm->native_basic_lock_sp_offset()); 1114 assert(byte_offset >= 0, "should not see invalid offset"); 1115 return (BasicLock*) &sp()[byte_offset / wordSize]; 1116 } 1117 1118 oop frame::get_native_receiver() { 1119 nmethod* nm = (nmethod*)_cb; 1120 assert(_cb != NULL && _cb->is_nmethod() && nm->method()->is_native(), 1121 "Should not call this unless it's a native nmethod"); 1122 int byte_offset = in_bytes(nm->native_receiver_sp_offset()); 1123 assert(byte_offset >= 0, "should not see invalid offset"); 1124 oop owner = ((oop*) sp())[byte_offset / wordSize]; 1125 assert( Universe::heap()->is_in(owner), "bad receiver" ); 1126 return owner; 1127 } 1128 1129 void frame::oops_entry_do(OopClosure* f, const RegisterMap* map) { 1130 assert(map != NULL, "map must be set"); 1131 if (map->include_argument_oops()) { 1132 // must collect argument oops, as nobody else is doing it 1133 Thread *thread = Thread::current(); 1134 methodHandle m (thread, entry_frame_call_wrapper()->callee_method()); 1135 EntryFrameOopFinder finder(this, m->signature(), m->is_static()); 1136 finder.arguments_do(f); 1137 } 1138 // Traverse the Handle Block saved in the entry frame 1139 entry_frame_call_wrapper()->oops_do(f); 1140 } 1141 1142 1143 void frame::oops_do_internal(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf, RegisterMap* map, bool use_interpreter_oop_map_cache) { 1144 #ifndef PRODUCT 1145 // simulate GC crash here to dump java thread in error report 1146 if (CrashGCForDumpingJavaThread) { 1147 char *t = NULL; 1148 *t = 'c'; 1149 } 1150 #endif 1151 if (is_interpreted_frame()) { 1152 oops_interpreted_do(f, cld_f, map, use_interpreter_oop_map_cache); 1153 } else if (is_entry_frame()) { 1154 oops_entry_do(f, map); 1155 } else if (CodeCache::contains(pc())) { 1156 oops_code_blob_do(f, cf, map); 1157 #ifdef SHARK 1158 } else if (is_fake_stub_frame()) { 1159 // nothing to do 1160 #endif // SHARK 1161 } else { 1162 ShouldNotReachHere(); 1163 } 1164 } 1165 1166 void frame::nmethods_do(CodeBlobClosure* cf) { 1167 if (_cb != NULL && _cb->is_nmethod()) { 1168 cf->do_code_blob(_cb); 1169 } 1170 } 1171 1172 1173 // call f() on the interpreted Method*s in the stack. 1174 // Have to walk the entire code cache for the compiled frames Yuck. 1175 void frame::metadata_do(void f(Metadata*)) { 1176 if (_cb != NULL && Interpreter::contains(pc())) { 1177 Method* m = this->interpreter_frame_method(); 1178 assert(m != NULL, "huh?"); 1179 f(m); 1180 } 1181 } 1182 1183 void frame::gc_prologue() { 1184 if (is_interpreted_frame()) { 1185 // set bcx to bci to become Method* position independent during GC 1186 interpreter_frame_set_bcx(interpreter_frame_bci()); 1187 } 1188 } 1189 1190 1191 void frame::gc_epilogue() { 1192 if (is_interpreted_frame()) { 1193 // set bcx back to bcp for interpreter 1194 interpreter_frame_set_bcx((intptr_t)interpreter_frame_bcp()); 1195 } 1196 // call processor specific epilog function 1197 pd_gc_epilog(); 1198 } 1199 1200 1201 # ifdef ENABLE_ZAP_DEAD_LOCALS 1202 1203 void frame::CheckValueClosure::do_oop(oop* p) { 1204 if (CheckOopishValues && Universe::heap()->is_in_reserved(*p)) { 1205 warning("value @ " INTPTR_FORMAT " looks oopish (" INTPTR_FORMAT ") (thread = " INTPTR_FORMAT ")", p, (address)*p, Thread::current()); 1206 } 1207 } 1208 frame::CheckValueClosure frame::_check_value; 1209 1210 1211 void frame::CheckOopClosure::do_oop(oop* p) { 1212 if (*p != NULL && !(*p)->is_oop()) { 1213 warning("value @ " INTPTR_FORMAT " should be an oop (" INTPTR_FORMAT ") (thread = " INTPTR_FORMAT ")", p, (address)*p, Thread::current()); 1214 } 1215 } 1216 frame::CheckOopClosure frame::_check_oop; 1217 1218 void frame::check_derived_oop(oop* base, oop* derived) { 1219 _check_oop.do_oop(base); 1220 } 1221 1222 1223 void frame::ZapDeadClosure::do_oop(oop* p) { 1224 if (TraceZapDeadLocals) tty->print_cr("zapping @ " INTPTR_FORMAT " containing " INTPTR_FORMAT, p, (address)*p); 1225 *p = cast_to_oop<intptr_t>(0xbabebabe); 1226 } 1227 frame::ZapDeadClosure frame::_zap_dead; 1228 1229 void frame::zap_dead_locals(JavaThread* thread, const RegisterMap* map) { 1230 assert(thread == Thread::current(), "need to synchronize to do this to another thread"); 1231 // Tracing - part 1 1232 if (TraceZapDeadLocals) { 1233 ResourceMark rm(thread); 1234 tty->print_cr("--------------------------------------------------------------------------------"); 1235 tty->print("Zapping dead locals in "); 1236 print_on(tty); 1237 tty->cr(); 1238 } 1239 // Zapping 1240 if (is_entry_frame ()) zap_dead_entry_locals (thread, map); 1241 else if (is_interpreted_frame()) zap_dead_interpreted_locals(thread, map); 1242 else if (is_compiled_frame()) zap_dead_compiled_locals (thread, map); 1243 1244 else 1245 // could be is_runtime_frame 1246 // so remove error: ShouldNotReachHere(); 1247 ; 1248 // Tracing - part 2 1249 if (TraceZapDeadLocals) { 1250 tty->cr(); 1251 } 1252 } 1253 1254 1255 void frame::zap_dead_interpreted_locals(JavaThread *thread, const RegisterMap* map) { 1256 // get current interpreter 'pc' 1257 assert(is_interpreted_frame(), "Not an interpreted frame"); 1258 Method* m = interpreter_frame_method(); 1259 int bci = interpreter_frame_bci(); 1260 1261 int max_locals = m->is_native() ? m->size_of_parameters() : m->max_locals(); 1262 1263 // process dynamic part 1264 InterpreterFrameClosure value_blk(this, max_locals, m->max_stack(), 1265 &_check_value); 1266 InterpreterFrameClosure oop_blk(this, max_locals, m->max_stack(), 1267 &_check_oop ); 1268 InterpreterFrameClosure dead_blk(this, max_locals, m->max_stack(), 1269 &_zap_dead ); 1270 1271 // get frame map 1272 InterpreterOopMap mask; 1273 m->mask_for(bci, &mask); 1274 mask.iterate_all( &oop_blk, &value_blk, &dead_blk); 1275 } 1276 1277 1278 void frame::zap_dead_compiled_locals(JavaThread* thread, const RegisterMap* reg_map) { 1279 1280 ResourceMark rm(thread); 1281 assert(_cb != NULL, "sanity check"); 1282 if (_cb->oop_maps() != NULL) { 1283 OopMapSet::all_do(this, reg_map, &_check_oop, check_derived_oop, &_check_value); 1284 } 1285 } 1286 1287 1288 void frame::zap_dead_entry_locals(JavaThread*, const RegisterMap*) { 1289 if (TraceZapDeadLocals) warning("frame::zap_dead_entry_locals unimplemented"); 1290 } 1291 1292 1293 void frame::zap_dead_deoptimized_locals(JavaThread*, const RegisterMap*) { 1294 if (TraceZapDeadLocals) warning("frame::zap_dead_deoptimized_locals unimplemented"); 1295 } 1296 1297 # endif // ENABLE_ZAP_DEAD_LOCALS 1298 1299 void frame::verify(const RegisterMap* map) { 1300 // for now make sure receiver type is correct 1301 if (is_interpreted_frame()) { 1302 Method* method = interpreter_frame_method(); 1303 guarantee(method->is_method(), "method is wrong in frame::verify"); 1304 if (!method->is_static()) { 1305 // fetch the receiver 1306 oop* p = (oop*) interpreter_frame_local_at(0); 1307 // make sure we have the right receiver type 1308 } 1309 } 1310 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(), "must be empty before verify");) 1311 oops_do_internal(&VerifyOopClosure::verify_oop, NULL, NULL, (RegisterMap*)map, false); 1312 } 1313 1314 1315 #ifdef ASSERT 1316 bool frame::verify_return_pc(address x) { 1317 if (StubRoutines::returns_to_call_stub(x)) { 1318 return true; 1319 } 1320 if (CodeCache::contains(x)) { 1321 return true; 1322 } 1323 if (Interpreter::contains(x)) { 1324 return true; 1325 } 1326 return false; 1327 } 1328 #endif 1329 1330 #ifdef ASSERT 1331 void frame::interpreter_frame_verify_monitor(BasicObjectLock* value) const { 1332 assert(is_interpreted_frame(), "Not an interpreted frame"); 1333 // verify that the value is in the right part of the frame 1334 address low_mark = (address) interpreter_frame_monitor_end(); 1335 address high_mark = (address) interpreter_frame_monitor_begin(); 1336 address current = (address) value; 1337 1338 const int monitor_size = frame::interpreter_frame_monitor_size(); 1339 guarantee((high_mark - current) % monitor_size == 0 , "Misaligned top of BasicObjectLock*"); 1340 guarantee( high_mark > current , "Current BasicObjectLock* higher than high_mark"); 1341 1342 guarantee((current - low_mark) % monitor_size == 0 , "Misaligned bottom of BasicObjectLock*"); 1343 guarantee( current >= low_mark , "Current BasicObjectLock* below than low_mark"); 1344 } 1345 #endif 1346 1347 #ifndef PRODUCT 1348 void frame::describe(FrameValues& values, int frame_no) { 1349 // boundaries: sp and the 'real' frame pointer 1350 values.describe(-1, sp(), err_msg("sp for #%d", frame_no), 1); 1351 intptr_t* frame_pointer = real_fp(); // Note: may differ from fp() 1352 1353 // print frame info at the highest boundary 1354 intptr_t* info_address = MAX2(sp(), frame_pointer); 1355 1356 if (info_address != frame_pointer) { 1357 // print frame_pointer explicitly if not marked by the frame info 1358 values.describe(-1, frame_pointer, err_msg("frame pointer for #%d", frame_no), 1); 1359 } 1360 1361 if (is_entry_frame() || is_compiled_frame() || is_interpreted_frame() || is_native_frame()) { 1362 // Label values common to most frames 1363 values.describe(-1, unextended_sp(), err_msg("unextended_sp for #%d", frame_no)); 1364 } 1365 1366 if (is_interpreted_frame()) { 1367 Method* m = interpreter_frame_method(); 1368 int bci = interpreter_frame_bci(); 1369 1370 // Label the method and current bci 1371 values.describe(-1, info_address, 1372 FormatBuffer<1024>("#%d method %s @ %d", frame_no, m->name_and_sig_as_C_string(), bci), 2); 1373 values.describe(-1, info_address, 1374 err_msg("- %d locals %d max stack", m->max_locals(), m->max_stack()), 1); 1375 if (m->max_locals() > 0) { 1376 intptr_t* l0 = interpreter_frame_local_at(0); 1377 intptr_t* ln = interpreter_frame_local_at(m->max_locals() - 1); 1378 values.describe(-1, MAX2(l0, ln), err_msg("locals for #%d", frame_no), 1); 1379 // Report each local and mark as owned by this frame 1380 for (int l = 0; l < m->max_locals(); l++) { 1381 intptr_t* l0 = interpreter_frame_local_at(l); 1382 values.describe(frame_no, l0, err_msg("local %d", l)); 1383 } 1384 } 1385 1386 // Compute the actual expression stack size 1387 InterpreterOopMap mask; 1388 OopMapCache::compute_one_oop_map(m, bci, &mask); 1389 intptr_t* tos = NULL; 1390 // Report each stack element and mark as owned by this frame 1391 for (int e = 0; e < mask.expression_stack_size(); e++) { 1392 tos = MAX2(tos, interpreter_frame_expression_stack_at(e)); 1393 values.describe(frame_no, interpreter_frame_expression_stack_at(e), 1394 err_msg("stack %d", e)); 1395 } 1396 if (tos != NULL) { 1397 values.describe(-1, tos, err_msg("expression stack for #%d", frame_no), 1); 1398 } 1399 if (interpreter_frame_monitor_begin() != interpreter_frame_monitor_end()) { 1400 values.describe(frame_no, (intptr_t*)interpreter_frame_monitor_begin(), "monitors begin"); 1401 values.describe(frame_no, (intptr_t*)interpreter_frame_monitor_end(), "monitors end"); 1402 } 1403 } else if (is_entry_frame()) { 1404 // For now just label the frame 1405 values.describe(-1, info_address, err_msg("#%d entry frame", frame_no), 2); 1406 } else if (is_compiled_frame()) { 1407 // For now just label the frame 1408 nmethod* nm = cb()->as_nmethod_or_null(); 1409 values.describe(-1, info_address, 1410 FormatBuffer<1024>("#%d nmethod " INTPTR_FORMAT " for method %s%s", frame_no, 1411 nm, nm->method()->name_and_sig_as_C_string(), 1412 (_deopt_state == is_deoptimized) ? 1413 " (deoptimized)" : 1414 ((_deopt_state == unknown) ? " (state unknown)" : "")), 1415 2); 1416 } else if (is_native_frame()) { 1417 // For now just label the frame 1418 nmethod* nm = cb()->as_nmethod_or_null(); 1419 values.describe(-1, info_address, 1420 FormatBuffer<1024>("#%d nmethod " INTPTR_FORMAT " for native method %s", frame_no, 1421 nm, nm->method()->name_and_sig_as_C_string()), 2); 1422 } else { 1423 // provide default info if not handled before 1424 char *info = (char *) "special frame"; 1425 if ((_cb != NULL) && 1426 (_cb->name() != NULL)) { 1427 info = (char *)_cb->name(); 1428 } 1429 values.describe(-1, info_address, err_msg("#%d <%s>", frame_no, info), 2); 1430 } 1431 1432 // platform dependent additional data 1433 describe_pd(values, frame_no); 1434 } 1435 1436 #endif 1437 1438 1439 //----------------------------------------------------------------------------------- 1440 // StackFrameStream implementation 1441 1442 StackFrameStream::StackFrameStream(JavaThread *thread, bool update) : _reg_map(thread, update) { 1443 assert(thread->has_last_Java_frame(), "sanity check"); 1444 _fr = thread->last_frame(); 1445 _is_done = false; 1446 } 1447 1448 1449 #ifndef PRODUCT 1450 1451 void FrameValues::describe(int owner, intptr_t* location, const char* description, int priority) { 1452 FrameValue fv; 1453 fv.location = location; 1454 fv.owner = owner; 1455 fv.priority = priority; 1456 fv.description = NEW_RESOURCE_ARRAY(char, strlen(description) + 1); 1457 strcpy(fv.description, description); 1458 _values.append(fv); 1459 } 1460 1461 1462 #ifdef ASSERT 1463 void FrameValues::validate() { 1464 _values.sort(compare); 1465 bool error = false; 1466 FrameValue prev; 1467 prev.owner = -1; 1468 for (int i = _values.length() - 1; i >= 0; i--) { 1469 FrameValue fv = _values.at(i); 1470 if (fv.owner == -1) continue; 1471 if (prev.owner == -1) { 1472 prev = fv; 1473 continue; 1474 } 1475 if (prev.location == fv.location) { 1476 if (fv.owner != prev.owner) { 1477 tty->print_cr("overlapping storage"); 1478 tty->print_cr(" " INTPTR_FORMAT ": " INTPTR_FORMAT " %s", prev.location, *prev.location, prev.description); 1479 tty->print_cr(" " INTPTR_FORMAT ": " INTPTR_FORMAT " %s", fv.location, *fv.location, fv.description); 1480 error = true; 1481 } 1482 } else { 1483 prev = fv; 1484 } 1485 } 1486 assert(!error, "invalid layout"); 1487 } 1488 #endif // ASSERT 1489 1490 void FrameValues::print(JavaThread* thread) { 1491 _values.sort(compare); 1492 1493 // Sometimes values like the fp can be invalid values if the 1494 // register map wasn't updated during the walk. Trim out values 1495 // that aren't actually in the stack of the thread. 1496 int min_index = 0; 1497 int max_index = _values.length() - 1; 1498 intptr_t* v0 = _values.at(min_index).location; 1499 intptr_t* v1 = _values.at(max_index).location; 1500 1501 if (thread == Thread::current()) { 1502 while (!thread->is_in_stack((address)v0)) { 1503 v0 = _values.at(++min_index).location; 1504 } 1505 while (!thread->is_in_stack((address)v1)) { 1506 v1 = _values.at(--max_index).location; 1507 } 1508 } else { 1509 while (!thread->on_local_stack((address)v0)) { 1510 v0 = _values.at(++min_index).location; 1511 } 1512 while (!thread->on_local_stack((address)v1)) { 1513 v1 = _values.at(--max_index).location; 1514 } 1515 } 1516 intptr_t* min = MIN2(v0, v1); 1517 intptr_t* max = MAX2(v0, v1); 1518 intptr_t* cur = max; 1519 intptr_t* last = NULL; 1520 for (int i = max_index; i >= min_index; i--) { 1521 FrameValue fv = _values.at(i); 1522 while (cur > fv.location) { 1523 tty->print_cr(" " INTPTR_FORMAT ": " INTPTR_FORMAT, cur, *cur); 1524 cur--; 1525 } 1526 if (last == fv.location) { 1527 const char* spacer = " " LP64_ONLY(" "); 1528 tty->print_cr(" %s %s %s", spacer, spacer, fv.description); 1529 } else { 1530 tty->print_cr(" " INTPTR_FORMAT ": " INTPTR_FORMAT " %s", fv.location, *fv.location, fv.description); 1531 last = fv.location; 1532 cur--; 1533 } 1534 } 1535 } 1536 1537 #endif // ndef PRODUCT