296 } 297 } 298 299 bool frame::is_interpreted_frame() const { 300 return Interpreter::contains(pc()); 301 } 302 303 int frame::frame_size(RegisterMap* map) const { 304 frame sender = this->sender(map); 305 return sender.sp() - sp(); 306 } 307 308 intptr_t* frame::entry_frame_argument_at(int offset) const { 309 // convert offset to index to deal with tsi 310 int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize); 311 // Entry frame's arguments are always in relation to unextended_sp() 312 return &unextended_sp()[index]; 313 } 314 315 // sender_sp 316 #ifdef CC_INTERP 317 intptr_t* frame::interpreter_frame_sender_sp() const { 318 assert(is_interpreted_frame(), "interpreted frame expected"); 319 // QQQ why does this specialize method exist if frame::sender_sp() does same thing? 320 // seems odd and if we always know interpreted vs. non then sender_sp() is really 321 // doing too much work. 322 return get_interpreterState()->sender_sp(); 323 } 324 325 // monitor elements 326 327 BasicObjectLock* frame::interpreter_frame_monitor_begin() const { 328 return get_interpreterState()->monitor_base(); 329 } 330 331 BasicObjectLock* frame::interpreter_frame_monitor_end() const { 332 return (BasicObjectLock*) get_interpreterState()->stack_base(); 333 } 334 335 #else // CC_INTERP 336 337 intptr_t* frame::interpreter_frame_sender_sp() const { 338 assert(is_interpreted_frame(), "interpreted frame expected"); 339 return (intptr_t*) at(interpreter_frame_sender_sp_offset); 340 } 341 342 void frame::set_interpreter_frame_sender_sp(intptr_t* sender_sp) { 343 assert(is_interpreted_frame(), "interpreted frame expected"); 344 ptr_at_put(interpreter_frame_sender_sp_offset, (intptr_t) sender_sp); 345 } 346 347 348 // monitor elements 349 350 BasicObjectLock* frame::interpreter_frame_monitor_begin() const { 351 return (BasicObjectLock*) addr_at(interpreter_frame_monitor_block_bottom_offset); 352 } 353 354 BasicObjectLock* frame::interpreter_frame_monitor_end() const { 355 BasicObjectLock* result = (BasicObjectLock*) *addr_at(interpreter_frame_monitor_block_top_offset); 356 // make sure the pointer points inside the frame 357 assert(sp() <= (intptr_t*) result, "monitor end should be above the stack pointer"); 358 assert((intptr_t*) result < fp(), "monitor end should be strictly below the frame pointer"); 359 return result; 360 } 361 362 void frame::interpreter_frame_set_monitor_end(BasicObjectLock* value) { 363 *((BasicObjectLock**)addr_at(interpreter_frame_monitor_block_top_offset)) = value; 364 } 365 366 // Used by template based interpreter deoptimization 367 void frame::interpreter_frame_set_last_sp(intptr_t* sp) { 368 *((intptr_t**)addr_at(interpreter_frame_last_sp_offset)) = sp; 369 } 370 #endif // CC_INTERP 371 372 frame frame::sender_for_entry_frame(RegisterMap* map) const { 373 assert(map != NULL, "map must be set"); 374 // Java frame called from C; skip all C frames and return top C 375 // frame of that chunk as the sender 376 JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor(); 377 assert(!entry_frame_is_first(), "next Java fp must be non zero"); 378 assert(jfa->last_Java_sp() > sp(), "must be above this frame on stack"); 379 map->clear(); 380 assert(map->include_argument_oops(), "should be set by clear"); 381 if (jfa->last_Java_pc() != NULL ) { 382 frame fr(jfa->last_Java_sp(), jfa->last_Java_fp(), jfa->last_Java_pc()); 383 return fr; 384 } 385 frame fr(jfa->last_Java_sp(), jfa->last_Java_fp()); 386 return fr; 387 } 388 389 //------------------------------------------------------------------------------ 390 // frame::verify_deopt_original_pc 510 map->set_include_argument_oops(false); 511 512 if (is_entry_frame()) 513 return sender_for_entry_frame(map); 514 if (is_interpreted_frame()) 515 return sender_for_interpreter_frame(map); 516 assert(_cb == CodeCache::find_blob(pc()),"Must be the same"); 517 518 // This test looks odd: why is it not is_compiled_frame() ? That's 519 // because stubs also have OOP maps. 520 if (_cb != NULL) { 521 return sender_for_compiled_frame(map); 522 } 523 524 // Must be native-compiled frame, i.e. the marshaling code for native 525 // methods that exists in the core system. 526 return frame(sender_sp(), link(), sender_pc()); 527 } 528 529 bool frame::is_interpreted_frame_valid(JavaThread* thread) const { 530 // QQQ 531 #ifdef CC_INTERP 532 #else 533 assert(is_interpreted_frame(), "Not an interpreted frame"); 534 // These are reasonable sanity checks 535 if (fp() == 0 || (intptr_t(fp()) & (wordSize-1)) != 0) { 536 return false; 537 } 538 if (sp() == 0 || (intptr_t(sp()) & (wordSize-1)) != 0) { 539 return false; 540 } 541 if (fp() + interpreter_frame_initial_sp_offset < sp()) { 542 return false; 543 } 544 // These are hacks to keep us out of trouble. 545 // The problem with these is that they mask other problems 546 if (fp() <= sp()) { // this attempts to deal with unsigned comparison above 547 return false; 548 } 549 550 // do some validation of frame elements 551 552 // first the method 566 } 567 568 // validate bci/bcx 569 570 address bcp = interpreter_frame_bcp(); 571 if (m->validate_bci_from_bcp(bcp) < 0) { 572 return false; 573 } 574 575 // validate constantPoolCache* 576 ConstantPoolCache* cp = *interpreter_frame_cache_addr(); 577 if (cp == NULL || !cp->is_metaspace_object()) return false; 578 579 // validate locals 580 581 address locals = (address) *interpreter_frame_locals_addr(); 582 583 if (locals > thread->stack_base() || locals < (address) fp()) return false; 584 585 // We'd have to be pretty unlucky to be mislead at this point 586 587 #endif // CC_INTERP 588 return true; 589 } 590 591 BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result) { 592 #ifdef CC_INTERP 593 // Needed for JVMTI. The result should always be in the 594 // interpreterState object 595 interpreterState istate = get_interpreterState(); 596 #endif // CC_INTERP 597 assert(is_interpreted_frame(), "interpreted frame expected"); 598 Method* method = interpreter_frame_method(); 599 BasicType type = method->result_type(); 600 601 intptr_t* tos_addr; 602 if (method->is_native()) { 603 // TODO : ensure AARCH64 does the same as Intel here i.e. push v0 then r0 604 // Prior to calling into the runtime to report the method_exit the possible 605 // return value is pushed to the native stack. If the result is a jfloat/jdouble 606 // then ST0 is saved before EAX/EDX. See the note in generate_native_result 607 tos_addr = (intptr_t*)sp(); 608 if (type == T_FLOAT || type == T_DOUBLE) { 609 // This is times two because we do a push(ltos) after pushing XMM0 610 // and that takes two interpreter stack slots. 611 tos_addr += 2 * Interpreter::stackElementWords; 612 } 613 } else { 614 tos_addr = (intptr_t*)interpreter_frame_tos_address(); 615 } 616 617 switch (type) { 618 case T_OBJECT : 619 case T_ARRAY : { 620 oop obj; 621 if (method->is_native()) { 622 #ifdef CC_INTERP 623 obj = istate->_oop_temp; 624 #else 625 obj = cast_to_oop(at(interpreter_frame_oop_temp_offset)); 626 #endif // CC_INTERP 627 } else { 628 oop* obj_p = (oop*)tos_addr; 629 obj = (obj_p == NULL) ? (oop)NULL : *obj_p; 630 } 631 assert(obj == NULL || Universe::heap()->is_in(obj), "sanity check"); 632 *oop_result = obj; 633 break; 634 } 635 case T_BOOLEAN : value_result->z = *(jboolean*)tos_addr; break; 636 case T_BYTE : value_result->b = *(jbyte*)tos_addr; break; 637 case T_CHAR : value_result->c = *(jchar*)tos_addr; break; 638 case T_SHORT : value_result->s = *(jshort*)tos_addr; break; 639 case T_INT : value_result->i = *(jint*)tos_addr; break; 640 case T_LONG : value_result->j = *(jlong*)tos_addr; break; 641 case T_FLOAT : { 642 value_result->f = *(jfloat*)tos_addr; 643 break; 644 } 645 case T_DOUBLE : value_result->d = *(jdouble*)tos_addr; break; 646 case T_VOID : /* Nothing to do */ break; | 296 } 297 } 298 299 bool frame::is_interpreted_frame() const { 300 return Interpreter::contains(pc()); 301 } 302 303 int frame::frame_size(RegisterMap* map) const { 304 frame sender = this->sender(map); 305 return sender.sp() - sp(); 306 } 307 308 intptr_t* frame::entry_frame_argument_at(int offset) const { 309 // convert offset to index to deal with tsi 310 int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize); 311 // Entry frame's arguments are always in relation to unextended_sp() 312 return &unextended_sp()[index]; 313 } 314 315 // sender_sp 316 intptr_t* frame::interpreter_frame_sender_sp() const { 317 assert(is_interpreted_frame(), "interpreted frame expected"); 318 return (intptr_t*) at(interpreter_frame_sender_sp_offset); 319 } 320 321 void frame::set_interpreter_frame_sender_sp(intptr_t* sender_sp) { 322 assert(is_interpreted_frame(), "interpreted frame expected"); 323 ptr_at_put(interpreter_frame_sender_sp_offset, (intptr_t) sender_sp); 324 } 325 326 327 // monitor elements 328 329 BasicObjectLock* frame::interpreter_frame_monitor_begin() const { 330 return (BasicObjectLock*) addr_at(interpreter_frame_monitor_block_bottom_offset); 331 } 332 333 BasicObjectLock* frame::interpreter_frame_monitor_end() const { 334 BasicObjectLock* result = (BasicObjectLock*) *addr_at(interpreter_frame_monitor_block_top_offset); 335 // make sure the pointer points inside the frame 336 assert(sp() <= (intptr_t*) result, "monitor end should be above the stack pointer"); 337 assert((intptr_t*) result < fp(), "monitor end should be strictly below the frame pointer"); 338 return result; 339 } 340 341 void frame::interpreter_frame_set_monitor_end(BasicObjectLock* value) { 342 *((BasicObjectLock**)addr_at(interpreter_frame_monitor_block_top_offset)) = value; 343 } 344 345 // Used by template based interpreter deoptimization 346 void frame::interpreter_frame_set_last_sp(intptr_t* sp) { 347 *((intptr_t**)addr_at(interpreter_frame_last_sp_offset)) = sp; 348 } 349 350 frame frame::sender_for_entry_frame(RegisterMap* map) const { 351 assert(map != NULL, "map must be set"); 352 // Java frame called from C; skip all C frames and return top C 353 // frame of that chunk as the sender 354 JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor(); 355 assert(!entry_frame_is_first(), "next Java fp must be non zero"); 356 assert(jfa->last_Java_sp() > sp(), "must be above this frame on stack"); 357 map->clear(); 358 assert(map->include_argument_oops(), "should be set by clear"); 359 if (jfa->last_Java_pc() != NULL ) { 360 frame fr(jfa->last_Java_sp(), jfa->last_Java_fp(), jfa->last_Java_pc()); 361 return fr; 362 } 363 frame fr(jfa->last_Java_sp(), jfa->last_Java_fp()); 364 return fr; 365 } 366 367 //------------------------------------------------------------------------------ 368 // frame::verify_deopt_original_pc 488 map->set_include_argument_oops(false); 489 490 if (is_entry_frame()) 491 return sender_for_entry_frame(map); 492 if (is_interpreted_frame()) 493 return sender_for_interpreter_frame(map); 494 assert(_cb == CodeCache::find_blob(pc()),"Must be the same"); 495 496 // This test looks odd: why is it not is_compiled_frame() ? That's 497 // because stubs also have OOP maps. 498 if (_cb != NULL) { 499 return sender_for_compiled_frame(map); 500 } 501 502 // Must be native-compiled frame, i.e. the marshaling code for native 503 // methods that exists in the core system. 504 return frame(sender_sp(), link(), sender_pc()); 505 } 506 507 bool frame::is_interpreted_frame_valid(JavaThread* thread) const { 508 assert(is_interpreted_frame(), "Not an interpreted frame"); 509 // These are reasonable sanity checks 510 if (fp() == 0 || (intptr_t(fp()) & (wordSize-1)) != 0) { 511 return false; 512 } 513 if (sp() == 0 || (intptr_t(sp()) & (wordSize-1)) != 0) { 514 return false; 515 } 516 if (fp() + interpreter_frame_initial_sp_offset < sp()) { 517 return false; 518 } 519 // These are hacks to keep us out of trouble. 520 // The problem with these is that they mask other problems 521 if (fp() <= sp()) { // this attempts to deal with unsigned comparison above 522 return false; 523 } 524 525 // do some validation of frame elements 526 527 // first the method 541 } 542 543 // validate bci/bcx 544 545 address bcp = interpreter_frame_bcp(); 546 if (m->validate_bci_from_bcp(bcp) < 0) { 547 return false; 548 } 549 550 // validate constantPoolCache* 551 ConstantPoolCache* cp = *interpreter_frame_cache_addr(); 552 if (cp == NULL || !cp->is_metaspace_object()) return false; 553 554 // validate locals 555 556 address locals = (address) *interpreter_frame_locals_addr(); 557 558 if (locals > thread->stack_base() || locals < (address) fp()) return false; 559 560 // We'd have to be pretty unlucky to be mislead at this point 561 return true; 562 } 563 564 BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result) { 565 assert(is_interpreted_frame(), "interpreted frame expected"); 566 Method* method = interpreter_frame_method(); 567 BasicType type = method->result_type(); 568 569 intptr_t* tos_addr; 570 if (method->is_native()) { 571 // TODO : ensure AARCH64 does the same as Intel here i.e. push v0 then r0 572 // Prior to calling into the runtime to report the method_exit the possible 573 // return value is pushed to the native stack. If the result is a jfloat/jdouble 574 // then ST0 is saved before EAX/EDX. See the note in generate_native_result 575 tos_addr = (intptr_t*)sp(); 576 if (type == T_FLOAT || type == T_DOUBLE) { 577 // This is times two because we do a push(ltos) after pushing XMM0 578 // and that takes two interpreter stack slots. 579 tos_addr += 2 * Interpreter::stackElementWords; 580 } 581 } else { 582 tos_addr = (intptr_t*)interpreter_frame_tos_address(); 583 } 584 585 switch (type) { 586 case T_OBJECT : 587 case T_ARRAY : { 588 oop obj; 589 if (method->is_native()) { 590 obj = cast_to_oop(at(interpreter_frame_oop_temp_offset)); 591 } else { 592 oop* obj_p = (oop*)tos_addr; 593 obj = (obj_p == NULL) ? (oop)NULL : *obj_p; 594 } 595 assert(obj == NULL || Universe::heap()->is_in(obj), "sanity check"); 596 *oop_result = obj; 597 break; 598 } 599 case T_BOOLEAN : value_result->z = *(jboolean*)tos_addr; break; 600 case T_BYTE : value_result->b = *(jbyte*)tos_addr; break; 601 case T_CHAR : value_result->c = *(jchar*)tos_addr; break; 602 case T_SHORT : value_result->s = *(jshort*)tos_addr; break; 603 case T_INT : value_result->i = *(jint*)tos_addr; break; 604 case T_LONG : value_result->j = *(jlong*)tos_addr; break; 605 case T_FLOAT : { 606 value_result->f = *(jfloat*)tos_addr; 607 break; 608 } 609 case T_DOUBLE : value_result->d = *(jdouble*)tos_addr; break; 610 case T_VOID : /* Nothing to do */ break; |