1 /* 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "compiler/abstractCompiler.hpp" 27 #include "compiler/disassembler.hpp" 28 #include "gc_interface/collectedHeap.inline.hpp" 29 #include "interpreter/interpreter.hpp" 30 #include "interpreter/oopMapCache.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "memory/universe.inline.hpp" 33 #include "oops/markOop.hpp" 34 #include "oops/methodData.hpp" 35 #include "oops/method.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "oops/oop.inline2.hpp" 38 #include "prims/methodHandles.hpp" 39 #include "runtime/frame.inline.hpp" 40 #include "runtime/handles.inline.hpp" 41 #include "runtime/javaCalls.hpp" 42 #include "runtime/monitorChunk.hpp" 43 #include "runtime/sharedRuntime.hpp" 44 #include "runtime/signature.hpp" 45 #include "runtime/stubCodeGenerator.hpp" 46 #include "runtime/stubRoutines.hpp" 47 #include "utilities/decoder.hpp" 48 49 #ifdef TARGET_ARCH_x86 50 # include "nativeInst_x86.hpp" 51 #endif 52 #ifdef TARGET_ARCH_sparc 53 # include "nativeInst_sparc.hpp" 54 #endif 55 #ifdef TARGET_ARCH_zero 56 # include "nativeInst_zero.hpp" 57 #endif 58 #ifdef TARGET_ARCH_arm 59 # include "nativeInst_arm.hpp" 60 #endif 61 #ifdef TARGET_ARCH_ppc 62 # include "nativeInst_ppc.hpp" 63 #endif 64 65 RegisterMap::RegisterMap(JavaThread *thread, bool update_map) { 66 _thread = thread; 67 _update_map = update_map; 68 clear(); 69 debug_only(_update_for_id = NULL;) 70 #ifndef PRODUCT 71 for (int i = 0; i < reg_count ; i++ ) _location[i] = NULL; 72 #endif /* PRODUCT */ 73 } 74 75 RegisterMap::RegisterMap(const RegisterMap* map) { 76 assert(map != this, "bad initialization parameter"); 77 assert(map != NULL, "RegisterMap must be present"); 78 _thread = map->thread(); 79 _update_map = map->update_map(); 80 _include_argument_oops = map->include_argument_oops(); 81 debug_only(_update_for_id = map->_update_for_id;) 82 pd_initialize_from(map); 83 if (update_map()) { 84 for(int i = 0; i < location_valid_size; i++) { 85 LocationValidType bits = !update_map() ? 0 : map->_location_valid[i]; 86 _location_valid[i] = bits; 87 // for whichever bits are set, pull in the corresponding map->_location 88 int j = i*location_valid_type_size; 89 while (bits != 0) { 90 if ((bits & 1) != 0) { 91 assert(0 <= j && j < reg_count, "range check"); 92 _location[j] = map->_location[j]; 93 } 94 bits >>= 1; 95 j += 1; 96 } 97 } 98 } 99 } 100 101 void RegisterMap::clear() { 102 set_include_argument_oops(true); 103 if (_update_map) { 104 for(int i = 0; i < location_valid_size; i++) { 105 _location_valid[i] = 0; 106 } 107 pd_clear(); 108 } else { 109 pd_initialize(); 110 } 111 } 112 113 #ifndef PRODUCT 114 115 void RegisterMap::print_on(outputStream* st) const { 116 st->print_cr("Register map"); 117 for(int i = 0; i < reg_count; i++) { 118 119 VMReg r = VMRegImpl::as_VMReg(i); 120 intptr_t* src = (intptr_t*) location(r); 121 if (src != NULL) { 122 123 r->print_on(st); 124 st->print(" [" INTPTR_FORMAT "] = ", src); 125 if (((uintptr_t)src & (sizeof(*src)-1)) != 0) { 126 st->print_cr("<misaligned>"); 127 } else { 128 st->print_cr(INTPTR_FORMAT, *src); 129 } 130 } 131 } 132 } 133 134 void RegisterMap::print() const { 135 print_on(tty); 136 } 137 138 #endif 139 // This returns the pc that if you were in the debugger you'd see. Not 140 // the idealized value in the frame object. This undoes the magic conversion 141 // that happens for deoptimized frames. In addition it makes the value the 142 // hardware would want to see in the native frame. The only user (at this point) 143 // is deoptimization. It likely no one else should ever use it. 144 145 address frame::raw_pc() const { 146 if (is_deoptimized_frame()) { 147 nmethod* nm = cb()->as_nmethod_or_null(); 148 if (nm->is_method_handle_return(pc())) 149 return nm->deopt_mh_handler_begin() - pc_return_offset; 150 else 151 return nm->deopt_handler_begin() - pc_return_offset; 152 } else { 153 return (pc() - pc_return_offset); 154 } 155 } 156 157 // Change the pc in a frame object. This does not change the actual pc in 158 // actual frame. To do that use patch_pc. 159 // 160 void frame::set_pc(address newpc ) { 161 #ifdef ASSERT 162 if (_cb != NULL && _cb->is_nmethod()) { 163 assert(!((nmethod*)_cb)->is_deopt_pc(_pc), "invariant violation"); 164 } 165 #endif // ASSERT 166 167 // Unsafe to use the is_deoptimzed tester after changing pc 168 _deopt_state = unknown; 169 _pc = newpc; 170 _cb = CodeCache::find_blob_unsafe(_pc); 171 172 } 173 174 // type testers 175 bool frame::is_ignored_frame() const { 176 return false; // FIXME: some LambdaForm frames should be ignored 177 } 178 bool frame::is_deoptimized_frame() const { 179 assert(_deopt_state != unknown, "not answerable"); 180 return _deopt_state == is_deoptimized; 181 } 182 183 bool frame::is_native_frame() const { 184 return (_cb != NULL && 185 _cb->is_nmethod() && 186 ((nmethod*)_cb)->is_native_method()); 187 } 188 189 bool frame::is_java_frame() const { 190 if (is_interpreted_frame()) return true; 191 if (is_compiled_frame()) return true; 192 return false; 193 } 194 195 196 bool frame::is_compiled_frame() const { 197 if (_cb != NULL && 198 _cb->is_nmethod() && 199 ((nmethod*)_cb)->is_java_method()) { 200 return true; 201 } 202 return false; 203 } 204 205 206 bool frame::is_runtime_frame() const { 207 return (_cb != NULL && _cb->is_runtime_stub()); 208 } 209 210 bool frame::is_safepoint_blob_frame() const { 211 return (_cb != NULL && _cb->is_safepoint_stub()); 212 } 213 214 // testers 215 216 bool frame::is_first_java_frame() const { 217 RegisterMap map(JavaThread::current(), false); // No update 218 frame s; 219 for (s = sender(&map); !(s.is_java_frame() || s.is_first_frame()); s = s.sender(&map)); 220 return s.is_first_frame(); 221 } 222 223 224 bool frame::entry_frame_is_first() const { 225 return entry_frame_call_wrapper()->is_first_frame(); 226 } 227 228 JavaCallWrapper* frame::entry_frame_call_wrapper_if_safe(JavaThread* thread) const { 229 JavaCallWrapper** jcw = entry_frame_call_wrapper_addr(); 230 address addr = (address) jcw; 231 232 // addr must be within the usable part of the stack 233 if (thread->is_in_usable_stack(addr)) { 234 return *jcw; 235 } 236 237 return NULL; 238 } 239 240 bool frame::should_be_deoptimized() const { 241 if (_deopt_state == is_deoptimized || 242 !is_compiled_frame() ) return false; 243 assert(_cb != NULL && _cb->is_nmethod(), "must be an nmethod"); 244 nmethod* nm = (nmethod *)_cb; 245 if (TraceDependencies) { 246 tty->print("checking (%s) ", nm->is_marked_for_deoptimization() ? "true" : "false"); 247 nm->print_value_on(tty); 248 tty->cr(); 249 } 250 251 if( !nm->is_marked_for_deoptimization() ) 252 return false; 253 254 // If at the return point, then the frame has already been popped, and 255 // only the return needs to be executed. Don't deoptimize here. 256 return !nm->is_at_poll_return(pc()); 257 } 258 259 bool frame::can_be_deoptimized() const { 260 if (!is_compiled_frame()) return false; 261 nmethod* nm = (nmethod*)_cb; 262 263 if( !nm->can_be_deoptimized() ) 264 return false; 265 266 return !nm->is_at_poll_return(pc()); 267 } 268 269 void frame::deoptimize(JavaThread* thread) { 270 // Schedule deoptimization of an nmethod activation with this frame. 271 assert(_cb != NULL && _cb->is_nmethod(), "must be"); 272 nmethod* nm = (nmethod*)_cb; 273 274 // This is a fix for register window patching race 275 if (NeedsDeoptSuspend && Thread::current() != thread) { 276 assert(SafepointSynchronize::is_at_safepoint(), 277 "patching other threads for deopt may only occur at a safepoint"); 278 279 // It is possible especially with DeoptimizeALot/DeoptimizeRandom that 280 // we could see the frame again and ask for it to be deoptimized since 281 // it might move for a long time. That is harmless and we just ignore it. 282 if (id() == thread->must_deopt_id()) { 283 assert(thread->is_deopt_suspend(), "lost suspension"); 284 return; 285 } 286 287 // We are at a safepoint so the target thread can only be 288 // in 4 states: 289 // blocked - no problem 290 // blocked_trans - no problem (i.e. could have woken up from blocked 291 // during a safepoint). 292 // native - register window pc patching race 293 // native_trans - momentary state 294 // 295 // We could just wait out a thread in native_trans to block. 296 // Then we'd have all the issues that the safepoint code has as to 297 // whether to spin or block. It isn't worth it. Just treat it like 298 // native and be done with it. 299 // 300 // Examine the state of the thread at the start of safepoint since 301 // threads that were in native at the start of the safepoint could 302 // come to a halt during the safepoint, changing the current value 303 // of the safepoint_state. 304 JavaThreadState state = thread->safepoint_state()->orig_thread_state(); 305 if (state == _thread_in_native || state == _thread_in_native_trans) { 306 // Since we are at a safepoint the target thread will stop itself 307 // before it can return to java as long as we remain at the safepoint. 308 // Therefore we can put an additional request for the thread to stop 309 // no matter what no (like a suspend). This will cause the thread 310 // to notice it needs to do the deopt on its own once it leaves native. 311 // 312 // The only reason we must do this is because on machine with register 313 // windows we have a race with patching the return address and the 314 // window coming live as the thread returns to the Java code (but still 315 // in native mode) and then blocks. It is only this top most frame 316 // that is at risk. So in truth we could add an additional check to 317 // see if this frame is one that is at risk. 318 RegisterMap map(thread, false); 319 frame at_risk = thread->last_frame().sender(&map); 320 if (id() == at_risk.id()) { 321 thread->set_must_deopt_id(id()); 322 thread->set_deopt_suspend(); 323 return; 324 } 325 } 326 } // NeedsDeoptSuspend 327 328 329 // If the call site is a MethodHandle call site use the MH deopt 330 // handler. 331 address deopt = nm->is_method_handle_return(pc()) ? 332 nm->deopt_mh_handler_begin() : 333 nm->deopt_handler_begin(); 334 335 // Save the original pc before we patch in the new one 336 nm->set_original_pc(this, pc()); 337 patch_pc(thread, deopt); 338 339 #ifdef ASSERT 340 { 341 RegisterMap map(thread, false); 342 frame check = thread->last_frame(); 343 while (id() != check.id()) { 344 check = check.sender(&map); 345 } 346 assert(check.is_deoptimized_frame(), "missed deopt"); 347 } 348 #endif // ASSERT 349 } 350 351 frame frame::java_sender() const { 352 RegisterMap map(JavaThread::current(), false); 353 frame s; 354 for (s = sender(&map); !(s.is_java_frame() || s.is_first_frame()); s = s.sender(&map)) ; 355 guarantee(s.is_java_frame(), "tried to get caller of first java frame"); 356 return s; 357 } 358 359 frame frame::real_sender(RegisterMap* map) const { 360 frame result = sender(map); 361 while (result.is_runtime_frame() || 362 result.is_ignored_frame()) { 363 result = result.sender(map); 364 } 365 return result; 366 } 367 368 // Note: called by profiler - NOT for current thread 369 frame frame::profile_find_Java_sender_frame(JavaThread *thread) { 370 // If we don't recognize this frame, walk back up the stack until we do 371 RegisterMap map(thread, false); 372 frame first_java_frame = frame(); 373 374 // Find the first Java frame on the stack starting with input frame 375 if (is_java_frame()) { 376 // top frame is compiled frame or deoptimized frame 377 first_java_frame = *this; 378 } else if (safe_for_sender(thread)) { 379 for (frame sender_frame = sender(&map); 380 sender_frame.safe_for_sender(thread) && !sender_frame.is_first_frame(); 381 sender_frame = sender_frame.sender(&map)) { 382 if (sender_frame.is_java_frame()) { 383 first_java_frame = sender_frame; 384 break; 385 } 386 } 387 } 388 return first_java_frame; 389 } 390 391 // Interpreter frames 392 393 394 void frame::interpreter_frame_set_locals(intptr_t* locs) { 395 assert(is_interpreted_frame(), "Not an interpreted frame"); 396 *interpreter_frame_locals_addr() = locs; 397 } 398 399 Method* frame::interpreter_frame_method() const { 400 assert(is_interpreted_frame(), "interpreted frame expected"); 401 Method* m = *interpreter_frame_method_addr(); 402 assert(m->is_method(), "not a Method*"); 403 return m; 404 } 405 406 void frame::interpreter_frame_set_method(Method* method) { 407 assert(is_interpreted_frame(), "interpreted frame expected"); 408 *interpreter_frame_method_addr() = method; 409 } 410 411 void frame::interpreter_frame_set_bcx(intptr_t bcx) { 412 assert(is_interpreted_frame(), "Not an interpreted frame"); 413 if (ProfileInterpreter) { 414 bool formerly_bci = is_bci(interpreter_frame_bcx()); 415 bool is_now_bci = is_bci(bcx); 416 *interpreter_frame_bcx_addr() = bcx; 417 418 intptr_t mdx = interpreter_frame_mdx(); 419 420 if (mdx != 0) { 421 if (formerly_bci) { 422 if (!is_now_bci) { 423 // The bcx was just converted from bci to bcp. 424 // Convert the mdx in parallel. 425 MethodData* mdo = interpreter_frame_method()->method_data(); 426 assert(mdo != NULL, ""); 427 int mdi = mdx - 1; // We distinguish valid mdi from zero by adding one. 428 address mdp = mdo->di_to_dp(mdi); 429 interpreter_frame_set_mdx((intptr_t)mdp); 430 } 431 } else { 432 if (is_now_bci) { 433 // The bcx was just converted from bcp to bci. 434 // Convert the mdx in parallel. 435 MethodData* mdo = interpreter_frame_method()->method_data(); 436 assert(mdo != NULL, ""); 437 int mdi = mdo->dp_to_di((address)mdx); 438 interpreter_frame_set_mdx((intptr_t)mdi + 1); // distinguish valid from 0. 439 } 440 } 441 } 442 } else { 443 *interpreter_frame_bcx_addr() = bcx; 444 } 445 } 446 447 jint frame::interpreter_frame_bci() const { 448 assert(is_interpreted_frame(), "interpreted frame expected"); 449 intptr_t bcx = interpreter_frame_bcx(); 450 return is_bci(bcx) ? bcx : interpreter_frame_method()->bci_from((address)bcx); 451 } 452 453 void frame::interpreter_frame_set_bci(jint bci) { 454 assert(is_interpreted_frame(), "interpreted frame expected"); 455 assert(!is_bci(interpreter_frame_bcx()), "should not set bci during GC"); 456 interpreter_frame_set_bcx((intptr_t)interpreter_frame_method()->bcp_from(bci)); 457 } 458 459 address frame::interpreter_frame_bcp() const { 460 assert(is_interpreted_frame(), "interpreted frame expected"); 461 intptr_t bcx = interpreter_frame_bcx(); 462 return is_bci(bcx) ? interpreter_frame_method()->bcp_from(bcx) : (address)bcx; 463 } 464 465 void frame::interpreter_frame_set_bcp(address bcp) { 466 assert(is_interpreted_frame(), "interpreted frame expected"); 467 assert(!is_bci(interpreter_frame_bcx()), "should not set bcp during GC"); 468 interpreter_frame_set_bcx((intptr_t)bcp); 469 } 470 471 void frame::interpreter_frame_set_mdx(intptr_t mdx) { 472 assert(is_interpreted_frame(), "Not an interpreted frame"); 473 assert(ProfileInterpreter, "must be profiling interpreter"); 474 *interpreter_frame_mdx_addr() = mdx; 475 } 476 477 address frame::interpreter_frame_mdp() const { 478 assert(ProfileInterpreter, "must be profiling interpreter"); 479 assert(is_interpreted_frame(), "interpreted frame expected"); 480 intptr_t bcx = interpreter_frame_bcx(); 481 intptr_t mdx = interpreter_frame_mdx(); 482 483 assert(!is_bci(bcx), "should not access mdp during GC"); 484 return (address)mdx; 485 } 486 487 void frame::interpreter_frame_set_mdp(address mdp) { 488 assert(is_interpreted_frame(), "interpreted frame expected"); 489 if (mdp == NULL) { 490 // Always allow the mdp to be cleared. 491 interpreter_frame_set_mdx((intptr_t)mdp); 492 } 493 intptr_t bcx = interpreter_frame_bcx(); 494 assert(!is_bci(bcx), "should not set mdp during GC"); 495 interpreter_frame_set_mdx((intptr_t)mdp); 496 } 497 498 BasicObjectLock* frame::next_monitor_in_interpreter_frame(BasicObjectLock* current) const { 499 assert(is_interpreted_frame(), "Not an interpreted frame"); 500 #ifdef ASSERT 501 interpreter_frame_verify_monitor(current); 502 #endif 503 BasicObjectLock* next = (BasicObjectLock*) (((intptr_t*) current) + interpreter_frame_monitor_size()); 504 return next; 505 } 506 507 BasicObjectLock* frame::previous_monitor_in_interpreter_frame(BasicObjectLock* current) const { 508 assert(is_interpreted_frame(), "Not an interpreted frame"); 509 #ifdef ASSERT 510 // // This verification needs to be checked before being enabled 511 // interpreter_frame_verify_monitor(current); 512 #endif 513 BasicObjectLock* previous = (BasicObjectLock*) (((intptr_t*) current) - interpreter_frame_monitor_size()); 514 return previous; 515 } 516 517 // Interpreter locals and expression stack locations. 518 519 intptr_t* frame::interpreter_frame_local_at(int index) const { 520 const int n = Interpreter::local_offset_in_bytes(index)/wordSize; 521 return &((*interpreter_frame_locals_addr())[n]); 522 } 523 524 intptr_t* frame::interpreter_frame_expression_stack_at(jint offset) const { 525 const int i = offset * interpreter_frame_expression_stack_direction(); 526 const int n = i * Interpreter::stackElementWords; 527 return &(interpreter_frame_expression_stack()[n]); 528 } 529 530 jint frame::interpreter_frame_expression_stack_size() const { 531 // Number of elements on the interpreter expression stack 532 // Callers should span by stackElementWords 533 int element_size = Interpreter::stackElementWords; 534 size_t stack_size = 0; 535 if (frame::interpreter_frame_expression_stack_direction() < 0) { 536 stack_size = (interpreter_frame_expression_stack() - 537 interpreter_frame_tos_address() + 1)/element_size; 538 } else { 539 stack_size = (interpreter_frame_tos_address() - 540 interpreter_frame_expression_stack() + 1)/element_size; 541 } 542 assert( stack_size <= (size_t)max_jint, "stack size too big"); 543 return ((jint)stack_size); 544 } 545 546 547 // (frame::interpreter_frame_sender_sp accessor is in frame_<arch>.cpp) 548 549 const char* frame::print_name() const { 550 if (is_native_frame()) return "Native"; 551 if (is_interpreted_frame()) return "Interpreted"; 552 if (is_compiled_frame()) { 553 if (is_deoptimized_frame()) return "Deoptimized"; 554 return "Compiled"; 555 } 556 if (sp() == NULL) return "Empty"; 557 return "C"; 558 } 559 560 void frame::print_value_on(outputStream* st, JavaThread *thread) const { 561 NOT_PRODUCT(address begin = pc()-40;) 562 NOT_PRODUCT(address end = NULL;) 563 564 st->print("%s frame (sp=" INTPTR_FORMAT " unextended sp=" INTPTR_FORMAT, print_name(), sp(), unextended_sp()); 565 if (sp() != NULL) 566 st->print(", fp=" INTPTR_FORMAT ", real_fp=" INTPTR_FORMAT ", pc=" INTPTR_FORMAT, fp(), real_fp(), pc()); 567 568 if (StubRoutines::contains(pc())) { 569 st->print_cr(")"); 570 st->print("("); 571 StubCodeDesc* desc = StubCodeDesc::desc_for(pc()); 572 st->print("~Stub::%s", desc->name()); 573 NOT_PRODUCT(begin = desc->begin(); end = desc->end();) 574 } else if (Interpreter::contains(pc())) { 575 st->print_cr(")"); 576 st->print("("); 577 InterpreterCodelet* desc = Interpreter::codelet_containing(pc()); 578 if (desc != NULL) { 579 st->print("~"); 580 desc->print_on(st); 581 NOT_PRODUCT(begin = desc->code_begin(); end = desc->code_end();) 582 } else { 583 st->print("~interpreter"); 584 } 585 } 586 st->print_cr(")"); 587 588 if (_cb != NULL) { 589 st->print(" "); 590 _cb->print_value_on(st); 591 st->cr(); 592 #ifndef PRODUCT 593 if (end == NULL) { 594 begin = _cb->code_begin(); 595 end = _cb->code_end(); 596 } 597 #endif 598 } 599 NOT_PRODUCT(if (WizardMode && Verbose) Disassembler::decode(begin, end);) 600 } 601 602 603 void frame::print_on(outputStream* st) const { 604 print_value_on(st,NULL); 605 if (is_interpreted_frame()) { 606 interpreter_frame_print_on(st); 607 } 608 } 609 610 611 void frame::interpreter_frame_print_on(outputStream* st) const { 612 #ifndef PRODUCT 613 assert(is_interpreted_frame(), "Not an interpreted frame"); 614 jint i; 615 for (i = 0; i < interpreter_frame_method()->max_locals(); i++ ) { 616 intptr_t x = *interpreter_frame_local_at(i); 617 st->print(" - local [" INTPTR_FORMAT "]", x); 618 st->fill_to(23); 619 st->print_cr("; #%d", i); 620 } 621 for (i = interpreter_frame_expression_stack_size() - 1; i >= 0; --i ) { 622 intptr_t x = *interpreter_frame_expression_stack_at(i); 623 st->print(" - stack [" INTPTR_FORMAT "]", x); 624 st->fill_to(23); 625 st->print_cr("; #%d", i); 626 } 627 // locks for synchronization 628 for (BasicObjectLock* current = interpreter_frame_monitor_end(); 629 current < interpreter_frame_monitor_begin(); 630 current = next_monitor_in_interpreter_frame(current)) { 631 st->print(" - obj ["); 632 current->obj()->print_value_on(st); 633 st->print_cr("]"); 634 st->print(" - lock ["); 635 current->lock()->print_on(st); 636 st->print_cr("]"); 637 } 638 // monitor 639 st->print_cr(" - monitor[" INTPTR_FORMAT "]", interpreter_frame_monitor_begin()); 640 // bcp 641 st->print(" - bcp [" INTPTR_FORMAT "]", interpreter_frame_bcp()); 642 st->fill_to(23); 643 st->print_cr("; @%d", interpreter_frame_bci()); 644 // locals 645 st->print_cr(" - locals [" INTPTR_FORMAT "]", interpreter_frame_local_at(0)); 646 // method 647 st->print(" - method [" INTPTR_FORMAT "]", (address)interpreter_frame_method()); 648 st->fill_to(23); 649 st->print("; "); 650 interpreter_frame_method()->print_name(st); 651 st->cr(); 652 #endif 653 } 654 655 // Print whether the frame is in the VM or OS indicating a HotSpot problem. 656 // Otherwise, it's likely a bug in the native library that the Java code calls, 657 // hopefully indicating where to submit bugs. 658 void frame::print_C_frame(outputStream* st, char* buf, int buflen, address pc) { 659 // C/C++ frame 660 bool in_vm = os::address_is_in_vm(pc); 661 st->print(in_vm ? "V" : "C"); 662 663 int offset; 664 bool found; 665 666 // libname 667 found = os::dll_address_to_library_name(pc, buf, buflen, &offset); 668 if (found) { 669 // skip directory names 670 const char *p1, *p2; 671 p1 = buf; 672 int len = (int)strlen(os::file_separator()); 673 while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len; 674 st->print(" [%s+0x%x]", p1, offset); 675 } else { 676 st->print(" " PTR_FORMAT, pc); 677 } 678 679 // function name - os::dll_address_to_function_name() may return confusing 680 // names if pc is within jvm.dll or libjvm.so, because JVM only has 681 // JVM_xxxx and a few other symbols in the dynamic symbol table. Do this 682 // only for native libraries. 683 if (!in_vm || Decoder::can_decode_C_frame_in_vm()) { 684 found = os::dll_address_to_function_name(pc, buf, buflen, &offset); 685 686 if (found) { 687 st->print(" %s+0x%x", buf, offset); 688 } 689 } 690 } 691 692 // frame::print_on_error() is called by fatal error handler. Notice that we may 693 // crash inside this function if stack frame is corrupted. The fatal error 694 // handler can catch and handle the crash. Here we assume the frame is valid. 695 // 696 // First letter indicates type of the frame: 697 // J: Java frame (compiled) 698 // j: Java frame (interpreted) 699 // V: VM frame (C/C++) 700 // v: Other frames running VM generated code (e.g. stubs, adapters, etc.) 701 // C: C/C++ frame 702 // 703 // We don't need detailed frame type as that in frame::print_name(). "C" 704 // suggests the problem is in user lib; everything else is likely a VM bug. 705 706 void frame::print_on_error(outputStream* st, char* buf, int buflen, bool verbose) const { 707 if (_cb != NULL) { 708 if (Interpreter::contains(pc())) { 709 Method* m = this->interpreter_frame_method(); 710 if (m != NULL) { 711 m->name_and_sig_as_C_string(buf, buflen); 712 st->print("j %s", buf); 713 st->print("+%d", this->interpreter_frame_bci()); 714 } else { 715 st->print("j " PTR_FORMAT, pc()); 716 } 717 } else if (StubRoutines::contains(pc())) { 718 StubCodeDesc* desc = StubCodeDesc::desc_for(pc()); 719 if (desc != NULL) { 720 st->print("v ~StubRoutines::%s", desc->name()); 721 } else { 722 st->print("v ~StubRoutines::" PTR_FORMAT, pc()); 723 } 724 } else if (_cb->is_buffer_blob()) { 725 st->print("v ~BufferBlob::%s", ((BufferBlob *)_cb)->name()); 726 } else if (_cb->is_nmethod()) { 727 nmethod* nm = (nmethod*)_cb; 728 Method* m = nm->method(); 729 if (m != NULL) { 730 m->name_and_sig_as_C_string(buf, buflen); 731 st->print("J %d%s %s %s (%d bytes) @ " PTR_FORMAT " [" PTR_FORMAT "+0x%x]", 732 nm->compile_id(), (nm->is_osr_method() ? "%" : ""), 733 ((nm->compiler() != NULL) ? nm->compiler()->name() : ""), 734 buf, m->code_size(), _pc, _cb->code_begin(), _pc - _cb->code_begin()); 735 } else { 736 st->print("J " PTR_FORMAT, pc()); 737 } 738 } else if (_cb->is_runtime_stub()) { 739 st->print("v ~RuntimeStub::%s", ((RuntimeStub *)_cb)->name()); 740 } else if (_cb->is_deoptimization_stub()) { 741 st->print("v ~DeoptimizationBlob"); 742 } else if (_cb->is_exception_stub()) { 743 st->print("v ~ExceptionBlob"); 744 } else if (_cb->is_safepoint_stub()) { 745 st->print("v ~SafepointBlob"); 746 } else { 747 st->print("v blob " PTR_FORMAT, pc()); 748 } 749 } else { 750 print_C_frame(st, buf, buflen, pc()); 751 } 752 } 753 754 755 /* 756 The interpreter_frame_expression_stack_at method in the case of SPARC needs the 757 max_stack value of the method in order to compute the expression stack address. 758 It uses the Method* in order to get the max_stack value but during GC this 759 Method* value saved on the frame is changed by reverse_and_push and hence cannot 760 be used. So we save the max_stack value in the FrameClosure object and pass it 761 down to the interpreter_frame_expression_stack_at method 762 */ 763 class InterpreterFrameClosure : public OffsetClosure { 764 private: 765 frame* _fr; 766 OopClosure* _f; 767 int _max_locals; 768 int _max_stack; 769 770 public: 771 InterpreterFrameClosure(frame* fr, int max_locals, int max_stack, 772 OopClosure* f) { 773 _fr = fr; 774 _max_locals = max_locals; 775 _max_stack = max_stack; 776 _f = f; 777 } 778 779 void offset_do(int offset) { 780 oop* addr; 781 if (offset < _max_locals) { 782 addr = (oop*) _fr->interpreter_frame_local_at(offset); 783 assert((intptr_t*)addr >= _fr->sp(), "must be inside the frame"); 784 _f->do_oop(addr); 785 } else { 786 addr = (oop*) _fr->interpreter_frame_expression_stack_at((offset - _max_locals)); 787 // In case of exceptions, the expression stack is invalid and the esp will be reset to express 788 // this condition. Therefore, we call f only if addr is 'inside' the stack (i.e., addr >= esp for Intel). 789 bool in_stack; 790 if (frame::interpreter_frame_expression_stack_direction() > 0) { 791 in_stack = (intptr_t*)addr <= _fr->interpreter_frame_tos_address(); 792 } else { 793 in_stack = (intptr_t*)addr >= _fr->interpreter_frame_tos_address(); 794 } 795 if (in_stack) { 796 _f->do_oop(addr); 797 } 798 } 799 } 800 801 int max_locals() { return _max_locals; } 802 frame* fr() { return _fr; } 803 }; 804 805 806 class InterpretedArgumentOopFinder: public SignatureInfo { 807 private: 808 OopClosure* _f; // Closure to invoke 809 int _offset; // TOS-relative offset, decremented with each argument 810 bool _has_receiver; // true if the callee has a receiver 811 frame* _fr; 812 813 void set(int size, BasicType type) { 814 _offset -= size; 815 if (type == T_OBJECT || type == T_ARRAY) oop_offset_do(); 816 } 817 818 void oop_offset_do() { 819 oop* addr; 820 addr = (oop*)_fr->interpreter_frame_tos_at(_offset); 821 _f->do_oop(addr); 822 } 823 824 public: 825 InterpretedArgumentOopFinder(Symbol* signature, bool has_receiver, frame* fr, OopClosure* f) : SignatureInfo(signature), _has_receiver(has_receiver) { 826 // compute size of arguments 827 int args_size = ArgumentSizeComputer(signature).size() + (has_receiver ? 1 : 0); 828 assert(!fr->is_interpreted_frame() || 829 args_size <= fr->interpreter_frame_expression_stack_size(), 830 "args cannot be on stack anymore"); 831 // initialize InterpretedArgumentOopFinder 832 _f = f; 833 _fr = fr; 834 _offset = args_size; 835 } 836 837 void oops_do() { 838 if (_has_receiver) { 839 --_offset; 840 oop_offset_do(); 841 } 842 iterate_parameters(); 843 } 844 }; 845 846 847 // Entry frame has following form (n arguments) 848 // +-----------+ 849 // sp -> | last arg | 850 // +-----------+ 851 // : ::: : 852 // +-----------+ 853 // (sp+n)->| first arg| 854 // +-----------+ 855 856 857 858 // visits and GC's all the arguments in entry frame 859 class EntryFrameOopFinder: public SignatureInfo { 860 private: 861 bool _is_static; 862 int _offset; 863 frame* _fr; 864 OopClosure* _f; 865 866 void set(int size, BasicType type) { 867 assert (_offset >= 0, "illegal offset"); 868 if (type == T_OBJECT || type == T_ARRAY) oop_at_offset_do(_offset); 869 _offset -= size; 870 } 871 872 void oop_at_offset_do(int offset) { 873 assert (offset >= 0, "illegal offset"); 874 oop* addr = (oop*) _fr->entry_frame_argument_at(offset); 875 _f->do_oop(addr); 876 } 877 878 public: 879 EntryFrameOopFinder(frame* frame, Symbol* signature, bool is_static) : SignatureInfo(signature) { 880 _f = NULL; // will be set later 881 _fr = frame; 882 _is_static = is_static; 883 _offset = ArgumentSizeComputer(signature).size() - 1; // last parameter is at index 0 884 } 885 886 void arguments_do(OopClosure* f) { 887 _f = f; 888 if (!_is_static) oop_at_offset_do(_offset+1); // do the receiver 889 iterate_parameters(); 890 } 891 892 }; 893 894 oop* frame::interpreter_callee_receiver_addr(Symbol* signature) { 895 ArgumentSizeComputer asc(signature); 896 int size = asc.size(); 897 return (oop *)interpreter_frame_tos_at(size); 898 } 899 900 901 void frame::oops_interpreted_do(OopClosure* f, CLDClosure* cld_f, 902 const RegisterMap* map, bool query_oop_map_cache) { 903 assert(is_interpreted_frame(), "Not an interpreted frame"); 904 assert(map != NULL, "map must be set"); 905 Thread *thread = Thread::current(); 906 methodHandle m (thread, interpreter_frame_method()); 907 jint bci = interpreter_frame_bci(); 908 909 assert(!Universe::heap()->is_in(m()), 910 "must be valid oop"); 911 assert(m->is_method(), "checking frame value"); 912 assert((m->is_native() && bci == 0) || 913 (!m->is_native() && bci >= 0 && bci < m->code_size()), 914 "invalid bci value"); 915 916 // Handle the monitor elements in the activation 917 for ( 918 BasicObjectLock* current = interpreter_frame_monitor_end(); 919 current < interpreter_frame_monitor_begin(); 920 current = next_monitor_in_interpreter_frame(current) 921 ) { 922 #ifdef ASSERT 923 interpreter_frame_verify_monitor(current); 924 #endif 925 current->oops_do(f); 926 } 927 928 // process fixed part 929 if (cld_f != NULL) { 930 // The method pointer in the frame might be the only path to the method's 931 // klass, and the klass needs to be kept alive while executing. The GCs 932 // don't trace through method pointers, so typically in similar situations 933 // the mirror or the class loader of the klass are installed as a GC root. 934 // To minimize the overhead of doing that here, we ask the GC to pass down a 935 // closure that knows how to keep klasses alive given a ClassLoaderData. 936 cld_f->do_cld(m->method_holder()->class_loader_data()); 937 } 938 939 if (m->is_native() PPC32_ONLY(&& m->is_static())) { 940 f->do_oop(interpreter_frame_temp_oop_addr()); 941 } 942 943 int max_locals = m->is_native() ? m->size_of_parameters() : m->max_locals(); 944 945 Symbol* signature = NULL; 946 bool has_receiver = false; 947 948 // Process a callee's arguments if we are at a call site 949 // (i.e., if we are at an invoke bytecode) 950 // This is used sometimes for calling into the VM, not for another 951 // interpreted or compiled frame. 952 if (!m->is_native()) { 953 Bytecode_invoke call = Bytecode_invoke_check(m, bci); 954 if (call.is_valid()) { 955 signature = call.signature(); 956 has_receiver = call.has_receiver(); 957 if (map->include_argument_oops() && 958 interpreter_frame_expression_stack_size() > 0) { 959 ResourceMark rm(thread); // is this right ??? 960 // we are at a call site & the expression stack is not empty 961 // => process callee's arguments 962 // 963 // Note: The expression stack can be empty if an exception 964 // occurred during method resolution/execution. In all 965 // cases we empty the expression stack completely be- 966 // fore handling the exception (the exception handling 967 // code in the interpreter calls a blocking runtime 968 // routine which can cause this code to be executed). 969 // (was bug gri 7/27/98) 970 oops_interpreted_arguments_do(signature, has_receiver, f); 971 } 972 } 973 } 974 975 InterpreterFrameClosure blk(this, max_locals, m->max_stack(), f); 976 977 // process locals & expression stack 978 InterpreterOopMap mask; 979 if (query_oop_map_cache) { 980 m->mask_for(bci, &mask); 981 } else { 982 OopMapCache::compute_one_oop_map(m, bci, &mask); 983 } 984 mask.iterate_oop(&blk); 985 } 986 987 988 void frame::oops_interpreted_arguments_do(Symbol* signature, bool has_receiver, OopClosure* f) { 989 InterpretedArgumentOopFinder finder(signature, has_receiver, this, f); 990 finder.oops_do(); 991 } 992 993 void frame::oops_code_blob_do(OopClosure* f, CodeBlobClosure* cf, const RegisterMap* reg_map) { 994 assert(_cb != NULL, "sanity check"); 995 if (_cb->oop_maps() != NULL) { 996 OopMapSet::oops_do(this, reg_map, f); 997 998 // Preserve potential arguments for a callee. We handle this by dispatching 999 // on the codeblob. For c2i, we do 1000 if (reg_map->include_argument_oops()) { 1001 _cb->preserve_callee_argument_oops(*this, reg_map, f); 1002 } 1003 } 1004 // In cases where perm gen is collected, GC will want to mark 1005 // oops referenced from nmethods active on thread stacks so as to 1006 // prevent them from being collected. However, this visit should be 1007 // restricted to certain phases of the collection only. The 1008 // closure decides how it wants nmethods to be traced. 1009 if (cf != NULL) 1010 cf->do_code_blob(_cb); 1011 } 1012 1013 class CompiledArgumentOopFinder: public SignatureInfo { 1014 protected: 1015 OopClosure* _f; 1016 int _offset; // the current offset, incremented with each argument 1017 bool _has_receiver; // true if the callee has a receiver 1018 bool _has_appendix; // true if the call has an appendix 1019 frame _fr; 1020 RegisterMap* _reg_map; 1021 int _arg_size; 1022 VMRegPair* _regs; // VMReg list of arguments 1023 1024 void set(int size, BasicType type) { 1025 if (type == T_OBJECT || type == T_ARRAY) handle_oop_offset(); 1026 _offset += size; 1027 } 1028 1029 virtual void handle_oop_offset() { 1030 // Extract low order register number from register array. 1031 // In LP64-land, the high-order bits are valid but unhelpful. 1032 VMReg reg = _regs[_offset].first(); 1033 oop *loc = _fr.oopmapreg_to_location(reg, _reg_map); 1034 _f->do_oop(loc); 1035 } 1036 1037 public: 1038 CompiledArgumentOopFinder(Symbol* signature, bool has_receiver, bool has_appendix, OopClosure* f, frame fr, const RegisterMap* reg_map) 1039 : SignatureInfo(signature) { 1040 1041 // initialize CompiledArgumentOopFinder 1042 _f = f; 1043 _offset = 0; 1044 _has_receiver = has_receiver; 1045 _has_appendix = has_appendix; 1046 _fr = fr; 1047 _reg_map = (RegisterMap*)reg_map; 1048 _arg_size = ArgumentSizeComputer(signature).size() + (has_receiver ? 1 : 0) + (has_appendix ? 1 : 0); 1049 1050 int arg_size; 1051 _regs = SharedRuntime::find_callee_arguments(signature, has_receiver, has_appendix, &arg_size); 1052 assert(arg_size == _arg_size, "wrong arg size"); 1053 } 1054 1055 void oops_do() { 1056 if (_has_receiver) { 1057 handle_oop_offset(); 1058 _offset++; 1059 } 1060 iterate_parameters(); 1061 if (_has_appendix) { 1062 handle_oop_offset(); 1063 _offset++; 1064 } 1065 } 1066 }; 1067 1068 void frame::oops_compiled_arguments_do(Symbol* signature, bool has_receiver, bool has_appendix, const RegisterMap* reg_map, OopClosure* f) { 1069 ResourceMark rm; 1070 CompiledArgumentOopFinder finder(signature, has_receiver, has_appendix, f, *this, reg_map); 1071 finder.oops_do(); 1072 } 1073 1074 1075 // Get receiver out of callers frame, i.e. find parameter 0 in callers 1076 // frame. Consult ADLC for where parameter 0 is to be found. Then 1077 // check local reg_map for it being a callee-save register or argument 1078 // register, both of which are saved in the local frame. If not found 1079 // there, it must be an in-stack argument of the caller. 1080 // Note: caller.sp() points to callee-arguments 1081 oop frame::retrieve_receiver(RegisterMap* reg_map) { 1082 frame caller = *this; 1083 1084 // First consult the ADLC on where it puts parameter 0 for this signature. 1085 VMReg reg = SharedRuntime::name_for_receiver(); 1086 oop* oop_adr = caller.oopmapreg_to_location(reg, reg_map); 1087 if (oop_adr == NULL) { 1088 guarantee(oop_adr != NULL, "bad register save location"); 1089 return NULL; 1090 } 1091 oop r = *oop_adr; 1092 assert(Universe::heap()->is_in_or_null(r), err_msg("bad receiver: " INTPTR_FORMAT " (" INTX_FORMAT ")", (void *) r, (void *) r)); 1093 return r; 1094 } 1095 1096 1097 oop* frame::oopmapreg_to_location(VMReg reg, const RegisterMap* reg_map) const { 1098 if(reg->is_reg()) { 1099 // If it is passed in a register, it got spilled in the stub frame. 1100 return (oop *)reg_map->location(reg); 1101 } else { 1102 int sp_offset_in_bytes = reg->reg2stack() * VMRegImpl::stack_slot_size; 1103 return (oop*)(((address)unextended_sp()) + sp_offset_in_bytes); 1104 } 1105 } 1106 1107 BasicLock* frame::get_native_monitor() { 1108 nmethod* nm = (nmethod*)_cb; 1109 assert(_cb != NULL && _cb->is_nmethod() && nm->method()->is_native(), 1110 "Should not call this unless it's a native nmethod"); 1111 int byte_offset = in_bytes(nm->native_basic_lock_sp_offset()); 1112 assert(byte_offset >= 0, "should not see invalid offset"); 1113 return (BasicLock*) &sp()[byte_offset / wordSize]; 1114 } 1115 1116 oop frame::get_native_receiver() { 1117 nmethod* nm = (nmethod*)_cb; 1118 assert(_cb != NULL && _cb->is_nmethod() && nm->method()->is_native(), 1119 "Should not call this unless it's a native nmethod"); 1120 int byte_offset = in_bytes(nm->native_receiver_sp_offset()); 1121 assert(byte_offset >= 0, "should not see invalid offset"); 1122 oop owner = ((oop*) sp())[byte_offset / wordSize]; 1123 assert( Universe::heap()->is_in(owner), "bad receiver" ); 1124 return owner; 1125 } 1126 1127 void frame::oops_entry_do(OopClosure* f, const RegisterMap* map) { 1128 assert(map != NULL, "map must be set"); 1129 if (map->include_argument_oops()) { 1130 // must collect argument oops, as nobody else is doing it 1131 Thread *thread = Thread::current(); 1132 methodHandle m (thread, entry_frame_call_wrapper()->callee_method()); 1133 EntryFrameOopFinder finder(this, m->signature(), m->is_static()); 1134 finder.arguments_do(f); 1135 } 1136 // Traverse the Handle Block saved in the entry frame 1137 entry_frame_call_wrapper()->oops_do(f); 1138 } 1139 1140 1141 void frame::oops_do_internal(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf, RegisterMap* map, bool use_interpreter_oop_map_cache) { 1142 #ifndef PRODUCT 1143 // simulate GC crash here to dump java thread in error report 1144 if (CrashGCForDumpingJavaThread) { 1145 char *t = NULL; 1146 *t = 'c'; 1147 } 1148 #endif 1149 if (is_interpreted_frame()) { 1150 oops_interpreted_do(f, cld_f, map, use_interpreter_oop_map_cache); 1151 } else if (is_entry_frame()) { 1152 oops_entry_do(f, map); 1153 } else if (CodeCache::contains(pc())) { 1154 oops_code_blob_do(f, cf, map); 1155 #ifdef SHARK 1156 } else if (is_fake_stub_frame()) { 1157 // nothing to do 1158 #endif // SHARK 1159 } else { 1160 ShouldNotReachHere(); 1161 } 1162 } 1163 1164 void frame::nmethods_do(CodeBlobClosure* cf) { 1165 if (_cb != NULL && _cb->is_nmethod()) { 1166 cf->do_code_blob(_cb); 1167 } 1168 } 1169 1170 1171 // call f() on the interpreted Method*s in the stack. 1172 // Have to walk the entire code cache for the compiled frames Yuck. 1173 void frame::metadata_do(void f(Metadata*)) { 1174 if (_cb != NULL && Interpreter::contains(pc())) { 1175 Method* m = this->interpreter_frame_method(); 1176 assert(m != NULL, "huh?"); 1177 f(m); 1178 } 1179 } 1180 1181 void frame::gc_prologue() { 1182 if (is_interpreted_frame()) { 1183 // set bcx to bci to become Method* position independent during GC 1184 interpreter_frame_set_bcx(interpreter_frame_bci()); 1185 } 1186 } 1187 1188 1189 void frame::gc_epilogue() { 1190 if (is_interpreted_frame()) { 1191 // set bcx back to bcp for interpreter 1192 interpreter_frame_set_bcx((intptr_t)interpreter_frame_bcp()); 1193 } 1194 // call processor specific epilog function 1195 pd_gc_epilog(); 1196 } 1197 1198 1199 # ifdef ENABLE_ZAP_DEAD_LOCALS 1200 1201 void frame::CheckValueClosure::do_oop(oop* p) { 1202 if (CheckOopishValues && Universe::heap()->is_in_reserved(*p)) { 1203 warning("value @ " INTPTR_FORMAT " looks oopish (" INTPTR_FORMAT ") (thread = " INTPTR_FORMAT ")", p, (address)*p, Thread::current()); 1204 } 1205 } 1206 frame::CheckValueClosure frame::_check_value; 1207 1208 1209 void frame::CheckOopClosure::do_oop(oop* p) { 1210 if (*p != NULL && !(*p)->is_oop()) { 1211 warning("value @ " INTPTR_FORMAT " should be an oop (" INTPTR_FORMAT ") (thread = " INTPTR_FORMAT ")", p, (address)*p, Thread::current()); 1212 } 1213 } 1214 frame::CheckOopClosure frame::_check_oop; 1215 1216 void frame::check_derived_oop(oop* base, oop* derived) { 1217 _check_oop.do_oop(base); 1218 } 1219 1220 1221 void frame::ZapDeadClosure::do_oop(oop* p) { 1222 if (TraceZapDeadLocals) tty->print_cr("zapping @ " INTPTR_FORMAT " containing " INTPTR_FORMAT, p, (address)*p); 1223 *p = cast_to_oop<intptr_t>(0xbabebabe); 1224 } 1225 frame::ZapDeadClosure frame::_zap_dead; 1226 1227 void frame::zap_dead_locals(JavaThread* thread, const RegisterMap* map) { 1228 assert(thread == Thread::current(), "need to synchronize to do this to another thread"); 1229 // Tracing - part 1 1230 if (TraceZapDeadLocals) { 1231 ResourceMark rm(thread); 1232 tty->print_cr("--------------------------------------------------------------------------------"); 1233 tty->print("Zapping dead locals in "); 1234 print_on(tty); 1235 tty->cr(); 1236 } 1237 // Zapping 1238 if (is_entry_frame ()) zap_dead_entry_locals (thread, map); 1239 else if (is_interpreted_frame()) zap_dead_interpreted_locals(thread, map); 1240 else if (is_compiled_frame()) zap_dead_compiled_locals (thread, map); 1241 1242 else 1243 // could be is_runtime_frame 1244 // so remove error: ShouldNotReachHere(); 1245 ; 1246 // Tracing - part 2 1247 if (TraceZapDeadLocals) { 1248 tty->cr(); 1249 } 1250 } 1251 1252 1253 void frame::zap_dead_interpreted_locals(JavaThread *thread, const RegisterMap* map) { 1254 // get current interpreter 'pc' 1255 assert(is_interpreted_frame(), "Not an interpreted frame"); 1256 Method* m = interpreter_frame_method(); 1257 int bci = interpreter_frame_bci(); 1258 1259 int max_locals = m->is_native() ? m->size_of_parameters() : m->max_locals(); 1260 1261 // process dynamic part 1262 InterpreterFrameClosure value_blk(this, max_locals, m->max_stack(), 1263 &_check_value); 1264 InterpreterFrameClosure oop_blk(this, max_locals, m->max_stack(), 1265 &_check_oop ); 1266 InterpreterFrameClosure dead_blk(this, max_locals, m->max_stack(), 1267 &_zap_dead ); 1268 1269 // get frame map 1270 InterpreterOopMap mask; 1271 m->mask_for(bci, &mask); 1272 mask.iterate_all( &oop_blk, &value_blk, &dead_blk); 1273 } 1274 1275 1276 void frame::zap_dead_compiled_locals(JavaThread* thread, const RegisterMap* reg_map) { 1277 1278 ResourceMark rm(thread); 1279 assert(_cb != NULL, "sanity check"); 1280 if (_cb->oop_maps() != NULL) { 1281 OopMapSet::all_do(this, reg_map, &_check_oop, check_derived_oop, &_check_value); 1282 } 1283 } 1284 1285 1286 void frame::zap_dead_entry_locals(JavaThread*, const RegisterMap*) { 1287 if (TraceZapDeadLocals) warning("frame::zap_dead_entry_locals unimplemented"); 1288 } 1289 1290 1291 void frame::zap_dead_deoptimized_locals(JavaThread*, const RegisterMap*) { 1292 if (TraceZapDeadLocals) warning("frame::zap_dead_deoptimized_locals unimplemented"); 1293 } 1294 1295 # endif // ENABLE_ZAP_DEAD_LOCALS 1296 1297 void frame::verify(const RegisterMap* map) { 1298 // for now make sure receiver type is correct 1299 if (is_interpreted_frame()) { 1300 Method* method = interpreter_frame_method(); 1301 guarantee(method->is_method(), "method is wrong in frame::verify"); 1302 if (!method->is_static()) { 1303 // fetch the receiver 1304 oop* p = (oop*) interpreter_frame_local_at(0); 1305 // make sure we have the right receiver type 1306 } 1307 } 1308 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(), "must be empty before verify");) 1309 oops_do_internal(&VerifyOopClosure::verify_oop, NULL, NULL, (RegisterMap*)map, false); 1310 } 1311 1312 1313 #ifdef ASSERT 1314 bool frame::verify_return_pc(address x) { 1315 if (StubRoutines::returns_to_call_stub(x)) { 1316 return true; 1317 } 1318 if (CodeCache::contains(x)) { 1319 return true; 1320 } 1321 if (Interpreter::contains(x)) { 1322 return true; 1323 } 1324 return false; 1325 } 1326 #endif 1327 1328 #ifdef ASSERT 1329 void frame::interpreter_frame_verify_monitor(BasicObjectLock* value) const { 1330 assert(is_interpreted_frame(), "Not an interpreted frame"); 1331 // verify that the value is in the right part of the frame 1332 address low_mark = (address) interpreter_frame_monitor_end(); 1333 address high_mark = (address) interpreter_frame_monitor_begin(); 1334 address current = (address) value; 1335 1336 const int monitor_size = frame::interpreter_frame_monitor_size(); 1337 guarantee((high_mark - current) % monitor_size == 0 , "Misaligned top of BasicObjectLock*"); 1338 guarantee( high_mark > current , "Current BasicObjectLock* higher than high_mark"); 1339 1340 guarantee((current - low_mark) % monitor_size == 0 , "Misaligned bottom of BasicObjectLock*"); 1341 guarantee( current >= low_mark , "Current BasicObjectLock* below than low_mark"); 1342 } 1343 #endif 1344 1345 #ifndef PRODUCT 1346 void frame::describe(FrameValues& values, int frame_no) { 1347 // boundaries: sp and the 'real' frame pointer 1348 values.describe(-1, sp(), err_msg("sp for #%d", frame_no), 1); 1349 intptr_t* frame_pointer = real_fp(); // Note: may differ from fp() 1350 1351 // print frame info at the highest boundary 1352 intptr_t* info_address = MAX2(sp(), frame_pointer); 1353 1354 if (info_address != frame_pointer) { 1355 // print frame_pointer explicitly if not marked by the frame info 1356 values.describe(-1, frame_pointer, err_msg("frame pointer for #%d", frame_no), 1); 1357 } 1358 1359 if (is_entry_frame() || is_compiled_frame() || is_interpreted_frame() || is_native_frame()) { 1360 // Label values common to most frames 1361 values.describe(-1, unextended_sp(), err_msg("unextended_sp for #%d", frame_no)); 1362 } 1363 1364 if (is_interpreted_frame()) { 1365 Method* m = interpreter_frame_method(); 1366 int bci = interpreter_frame_bci(); 1367 1368 // Label the method and current bci 1369 values.describe(-1, info_address, 1370 FormatBuffer<1024>("#%d method %s @ %d", frame_no, m->name_and_sig_as_C_string(), bci), 2); 1371 values.describe(-1, info_address, 1372 err_msg("- %d locals %d max stack", m->max_locals(), m->max_stack()), 1); 1373 if (m->max_locals() > 0) { 1374 intptr_t* l0 = interpreter_frame_local_at(0); 1375 intptr_t* ln = interpreter_frame_local_at(m->max_locals() - 1); 1376 values.describe(-1, MAX2(l0, ln), err_msg("locals for #%d", frame_no), 1); 1377 // Report each local and mark as owned by this frame 1378 for (int l = 0; l < m->max_locals(); l++) { 1379 intptr_t* l0 = interpreter_frame_local_at(l); 1380 values.describe(frame_no, l0, err_msg("local %d", l)); 1381 } 1382 } 1383 1384 // Compute the actual expression stack size 1385 InterpreterOopMap mask; 1386 OopMapCache::compute_one_oop_map(m, bci, &mask); 1387 intptr_t* tos = NULL; 1388 // Report each stack element and mark as owned by this frame 1389 for (int e = 0; e < mask.expression_stack_size(); e++) { 1390 tos = MAX2(tos, interpreter_frame_expression_stack_at(e)); 1391 values.describe(frame_no, interpreter_frame_expression_stack_at(e), 1392 err_msg("stack %d", e)); 1393 } 1394 if (tos != NULL) { 1395 values.describe(-1, tos, err_msg("expression stack for #%d", frame_no), 1); 1396 } 1397 if (interpreter_frame_monitor_begin() != interpreter_frame_monitor_end()) { 1398 values.describe(frame_no, (intptr_t*)interpreter_frame_monitor_begin(), "monitors begin"); 1399 values.describe(frame_no, (intptr_t*)interpreter_frame_monitor_end(), "monitors end"); 1400 } 1401 } else if (is_entry_frame()) { 1402 // For now just label the frame 1403 values.describe(-1, info_address, err_msg("#%d entry frame", frame_no), 2); 1404 } else if (is_compiled_frame()) { 1405 // For now just label the frame 1406 nmethod* nm = cb()->as_nmethod_or_null(); 1407 values.describe(-1, info_address, 1408 FormatBuffer<1024>("#%d nmethod " INTPTR_FORMAT " for method %s%s", frame_no, 1409 nm, nm->method()->name_and_sig_as_C_string(), 1410 (_deopt_state == is_deoptimized) ? 1411 " (deoptimized)" : 1412 ((_deopt_state == unknown) ? " (state unknown)" : "")), 1413 2); 1414 } else if (is_native_frame()) { 1415 // For now just label the frame 1416 nmethod* nm = cb()->as_nmethod_or_null(); 1417 values.describe(-1, info_address, 1418 FormatBuffer<1024>("#%d nmethod " INTPTR_FORMAT " for native method %s", frame_no, 1419 nm, nm->method()->name_and_sig_as_C_string()), 2); 1420 } else { 1421 // provide default info if not handled before 1422 char *info = (char *) "special frame"; 1423 if ((_cb != NULL) && 1424 (_cb->name() != NULL)) { 1425 info = (char *)_cb->name(); 1426 } 1427 values.describe(-1, info_address, err_msg("#%d <%s>", frame_no, info), 2); 1428 } 1429 1430 // platform dependent additional data 1431 describe_pd(values, frame_no); 1432 } 1433 1434 #endif 1435 1436 1437 //----------------------------------------------------------------------------------- 1438 // StackFrameStream implementation 1439 1440 StackFrameStream::StackFrameStream(JavaThread *thread, bool update) : _reg_map(thread, update) { 1441 assert(thread->has_last_Java_frame(), "sanity check"); 1442 _fr = thread->last_frame(); 1443 _is_done = false; 1444 } 1445 1446 1447 #ifndef PRODUCT 1448 1449 void FrameValues::describe(int owner, intptr_t* location, const char* description, int priority) { 1450 FrameValue fv; 1451 fv.location = location; 1452 fv.owner = owner; 1453 fv.priority = priority; 1454 fv.description = NEW_RESOURCE_ARRAY(char, strlen(description) + 1); 1455 strcpy(fv.description, description); 1456 _values.append(fv); 1457 } 1458 1459 1460 #ifdef ASSERT 1461 void FrameValues::validate() { 1462 _values.sort(compare); 1463 bool error = false; 1464 FrameValue prev; 1465 prev.owner = -1; 1466 for (int i = _values.length() - 1; i >= 0; i--) { 1467 FrameValue fv = _values.at(i); 1468 if (fv.owner == -1) continue; 1469 if (prev.owner == -1) { 1470 prev = fv; 1471 continue; 1472 } 1473 if (prev.location == fv.location) { 1474 if (fv.owner != prev.owner) { 1475 tty->print_cr("overlapping storage"); 1476 tty->print_cr(" " INTPTR_FORMAT ": " INTPTR_FORMAT " %s", prev.location, *prev.location, prev.description); 1477 tty->print_cr(" " INTPTR_FORMAT ": " INTPTR_FORMAT " %s", fv.location, *fv.location, fv.description); 1478 error = true; 1479 } 1480 } else { 1481 prev = fv; 1482 } 1483 } 1484 assert(!error, "invalid layout"); 1485 } 1486 #endif // ASSERT 1487 1488 void FrameValues::print(JavaThread* thread) { 1489 _values.sort(compare); 1490 1491 // Sometimes values like the fp can be invalid values if the 1492 // register map wasn't updated during the walk. Trim out values 1493 // that aren't actually in the stack of the thread. 1494 int min_index = 0; 1495 int max_index = _values.length() - 1; 1496 intptr_t* v0 = _values.at(min_index).location; 1497 intptr_t* v1 = _values.at(max_index).location; 1498 1499 if (thread == Thread::current()) { 1500 while (!thread->is_in_stack((address)v0)) { 1501 v0 = _values.at(++min_index).location; 1502 } 1503 while (!thread->is_in_stack((address)v1)) { 1504 v1 = _values.at(--max_index).location; 1505 } 1506 } else { 1507 while (!thread->on_local_stack((address)v0)) { 1508 v0 = _values.at(++min_index).location; 1509 } 1510 while (!thread->on_local_stack((address)v1)) { 1511 v1 = _values.at(--max_index).location; 1512 } 1513 } 1514 intptr_t* min = MIN2(v0, v1); 1515 intptr_t* max = MAX2(v0, v1); 1516 intptr_t* cur = max; 1517 intptr_t* last = NULL; 1518 for (int i = max_index; i >= min_index; i--) { 1519 FrameValue fv = _values.at(i); 1520 while (cur > fv.location) { 1521 tty->print_cr(" " INTPTR_FORMAT ": " INTPTR_FORMAT, cur, *cur); 1522 cur--; 1523 } 1524 if (last == fv.location) { 1525 const char* spacer = " " LP64_ONLY(" "); 1526 tty->print_cr(" %s %s %s", spacer, spacer, fv.description); 1527 } else { 1528 tty->print_cr(" " INTPTR_FORMAT ": " INTPTR_FORMAT " %s", fv.location, *fv.location, fv.description); 1529 last = fv.location; 1530 cur--; 1531 } 1532 } 1533 } 1534 1535 #endif // ndef PRODUCT