< prev index next >

src/hotspot/cpu/x86/c1_Runtime1_x86.cpp

Print this page




 410   return map;
 411 }
 412 
 413 #define __ this->
 414 
 415 void C1_MacroAssembler::save_live_registers_no_oop_map(bool save_fpu_registers) {
 416   __ block_comment("save_live_registers");
 417 
 418   __ pusha();         // integer registers
 419 
 420   // assert(float_regs_as_doubles_off % 2 == 0, "misaligned offset");
 421   // assert(xmm_regs_as_doubles_off % 2 == 0, "misaligned offset");
 422 
 423   __ subptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size);
 424 
 425 #ifdef ASSERT
 426   __ movptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef);
 427 #endif
 428 
 429   if (save_fpu_registers) {

 430     if (UseSSE < 2) {
 431       // save FPU stack
 432       __ fnsave(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size));
 433       __ fwait();
 434 
 435 #ifdef ASSERT
 436       Label ok;
 437       __ cmpw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std());
 438       __ jccb(Assembler::equal, ok);
 439       __ stop("corrupted control word detected");
 440       __ bind(ok);
 441 #endif
 442 
 443       // Reset the control word to guard against exceptions being unmasked
 444       // since fstp_d can cause FPU stack underflow exceptions.  Write it
 445       // into the on stack copy and then reload that to make sure that the
 446       // current and future values are correct.
 447       __ movw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std());
 448       __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size));
 449 
 450       // Save the FPU registers in de-opt-able form
 451       int offset = 0;
 452       for (int n = 0; n < FrameMap::nof_fpu_regs; n++) {
 453         __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset));
 454         offset += 8;
 455       }
 456     }

 457 
 458     if (UseSSE >= 2) {
 459       // save XMM registers
 460       // XMM registers can contain float or double values, but this is not known here,
 461       // so always save them as doubles.
 462       // note that float values are _not_ converted automatically, so for float values
 463       // the second word contains only garbage data.
 464       int xmm_bypass_limit = FrameMap::nof_xmm_regs;
 465       int offset = 0;
 466 #ifdef _LP64
 467       if (UseAVX < 3) {
 468         xmm_bypass_limit = xmm_bypass_limit / 2;
 469       }
 470 #endif
 471       for (int n = 0; n < xmm_bypass_limit; n++) {
 472         XMMRegister xmm_name = as_XMMRegister(n);
 473         __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset), xmm_name);
 474         offset += 8;
 475       }

 476     } else if (UseSSE == 1) {
 477       // save XMM registers as float because double not supported without SSE2(num MMX == num fpu)
 478       int offset = 0;
 479       for (int n = 0; n < FrameMap::nof_fpu_regs; n++) {
 480         XMMRegister xmm_name = as_XMMRegister(n);
 481         __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset), xmm_name);
 482         offset += 8;
 483       }

 484     }
 485   }
 486 
 487   // FPU stack must be empty now
 488   __ verify_FPU(0, "save_live_registers");
 489 }
 490 
 491 #undef __
 492 #define __ sasm->
 493 
 494 static void restore_fpu(C1_MacroAssembler* sasm, bool restore_fpu_registers) {

 495   if (restore_fpu_registers) {
 496     if (UseSSE >= 2) {
 497       // restore XMM registers
 498       int xmm_bypass_limit = FrameMap::nof_xmm_regs;
 499 #ifdef _LP64
 500       if (UseAVX < 3) {
 501         xmm_bypass_limit = xmm_bypass_limit / 2;
 502       }
 503 #endif











 504       int offset = 0;
 505       for (int n = 0; n < xmm_bypass_limit; n++) {
 506         XMMRegister xmm_name = as_XMMRegister(n);
 507         __ movdbl(xmm_name, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset));
 508         offset += 8;
 509       }
 510     } else if (UseSSE == 1) {
 511       // restore XMM registers(num MMX == num fpu)
 512       int offset = 0;
 513       for (int n = 0; n < FrameMap::nof_fpu_regs; n++) {
 514         XMMRegister xmm_name = as_XMMRegister(n);
 515         __ movflt(xmm_name, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset));
 516         offset += 8;
 517       }
 518     }
 519 
 520     if (UseSSE < 2) {
 521       __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size));
 522     } else {
 523       // check that FPU stack is really empty
 524       __ verify_FPU(0, "restore_live_registers");
 525     }
 526 
 527   } else {
 528     // check that FPU stack is really empty
 529     __ verify_FPU(0, "restore_live_registers");
 530   }

 531 
 532 #ifdef ASSERT
 533   {
 534     Label ok;
 535     __ cmpptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef);
 536     __ jcc(Assembler::equal, ok);
 537     __ stop("bad offsets in frame");
 538     __ bind(ok);
 539   }
 540 #endif // ASSERT
 541 
 542   __ addptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size);
 543 }
 544 
 545 #undef __
 546 #define __ this->
 547 
 548 void C1_MacroAssembler::restore_live_registers(bool restore_fpu_registers) {
 549   __ block_comment("restore_live_registers");
 550 


 682     __ movptr(Address(thread, JavaThread::vm_result_offset()),   NULL_WORD);
 683     __ movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
 684     break;
 685   case handle_exception_nofpu_id:
 686   case handle_exception_id:
 687     // At this point all registers MAY be live.
 688     oop_map = save_live_registers(sasm, 1 /*thread*/, id != handle_exception_nofpu_id);
 689     break;
 690   case handle_exception_from_callee_id: {
 691     // At this point all registers except exception oop (RAX) and
 692     // exception pc (RDX) are dead.
 693     const int frame_size = 2 /*BP, return address*/ NOT_LP64(+ 1 /*thread*/) WIN64_ONLY(+ frame::arg_reg_save_area_bytes / BytesPerWord);
 694     oop_map = new OopMap(frame_size * VMRegImpl::slots_per_word, 0);
 695     sasm->set_frame_size(frame_size);
 696     WIN64_ONLY(__ subq(rsp, frame::arg_reg_save_area_bytes));
 697     break;
 698   }
 699   default:  ShouldNotReachHere();
 700   }
 701 
 702 #ifdef TIERED
 703   // C2 can leave the fpu stack dirty
 704   if (UseSSE < 2) {

 705     __ empty_FPU_stack();
 706   }
 707 #endif // TIERED
 708 
 709   // verify that only rax, and rdx is valid at this time
 710   __ invalidate_registers(false, true, true, false, true, true);
 711   // verify that rax, contains a valid exception
 712   __ verify_not_null_oop(exception_oop);
 713 
 714   // load address of JavaThread object for thread-local data
 715   NOT_LP64(__ get_thread(thread);)
 716 
 717 #ifdef ASSERT
 718   // check that fields in JavaThread for exception oop and issuing pc are
 719   // empty before writing to them
 720   Label oop_empty;
 721   __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t) NULL_WORD);
 722   __ jcc(Assembler::equal, oop_empty);
 723   __ stop("exception oop already set");
 724   __ bind(oop_empty);
 725 
 726   Label pc_empty;
 727   __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0);


 789   // verify that only rax, is valid at this time
 790   __ invalidate_registers(false, true, true, true, true, true);
 791 
 792 #ifdef ASSERT
 793   // check that fields in JavaThread for exception oop and issuing pc are empty
 794   NOT_LP64(__ get_thread(thread);)
 795   Label oop_empty;
 796   __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), 0);
 797   __ jcc(Assembler::equal, oop_empty);
 798   __ stop("exception oop must be empty");
 799   __ bind(oop_empty);
 800 
 801   Label pc_empty;
 802   __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0);
 803   __ jcc(Assembler::equal, pc_empty);
 804   __ stop("exception pc must be empty");
 805   __ bind(pc_empty);
 806 #endif
 807 
 808   // clear the FPU stack in case any FPU results are left behind
 809   __ empty_FPU_stack();
 810 
 811   // save exception_oop in callee-saved register to preserve it during runtime calls
 812   __ verify_not_null_oop(exception_oop);
 813   __ movptr(exception_oop_callee_saved, exception_oop);
 814 
 815   NOT_LP64(__ get_thread(thread);)
 816   // Get return address (is on top of stack after leave).
 817   __ movptr(exception_pc, Address(rsp, 0));
 818 
 819   // search the exception handler address of the caller (using the return address)
 820   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, exception_pc);
 821   // rax: exception handler address of the caller
 822 
 823   // Only RAX and RSI are valid at this time, all other registers have been destroyed by the call.
 824   __ invalidate_registers(false, true, true, true, false, true);
 825 
 826   // move result of call into correct register
 827   __ movptr(handler_addr, rax);
 828 
 829   // Restore exception oop to RAX (required convention of exception handler).


1460       }
1461       break;
1462 
1463     case dtrace_object_alloc_id:
1464       { // rax,: object
1465         StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
1466         // we can't gc here so skip the oopmap but make sure that all
1467         // the live registers get saved.
1468         save_live_registers(sasm, 1);
1469 
1470         __ NOT_LP64(push(rax)) LP64_ONLY(mov(c_rarg0, rax));
1471         __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc)));
1472         NOT_LP64(__ pop(rax));
1473 
1474         restore_live_registers(sasm);
1475       }
1476       break;
1477 
1478     case fpu2long_stub_id:
1479       {













1480         // rax, and rdx are destroyed, but should be free since the result is returned there
1481         // preserve rsi,ecx
1482         __ push(rsi);
1483         __ push(rcx);
1484         LP64_ONLY(__ push(rdx);)
1485 
1486         // check for NaN
1487         Label return0, do_return, return_min_jlong, do_convert;
1488 
1489         Address value_high_word(rsp, wordSize + 4);
1490         Address value_low_word(rsp, wordSize);
1491         Address result_high_word(rsp, 3*wordSize + 4);
1492         Address result_low_word(rsp, 3*wordSize);
1493 
1494         __ subptr(rsp, 32);                    // more than enough on 32bit
1495         __ fst_d(value_low_word);
1496         __ movl(rax, value_high_word);
1497         __ andl(rax, 0x7ff00000);
1498         __ cmpl(rax, 0x7ff00000);
1499         __ jcc(Assembler::notEqual, do_convert);
1500         __ movl(rax, value_high_word);
1501         __ andl(rax, 0xfffff);
1502         __ orl(rax, value_low_word);
1503         __ jcc(Assembler::notZero, return0);
1504 


1509         __ movw(Address(rsp, 2), rax);
1510         __ fldcw(Address(rsp, 2));
1511         __ fwait();
1512         __ fistp_d(result_low_word);
1513         __ fldcw(Address(rsp, 0));
1514         __ fwait();
1515         // This gets the entire long in rax on 64bit
1516         __ movptr(rax, result_low_word);
1517         // testing of high bits
1518         __ movl(rdx, result_high_word);
1519         __ mov(rcx, rax);
1520         // What the heck is the point of the next instruction???
1521         __ xorl(rcx, 0x0);
1522         __ movl(rsi, 0x80000000);
1523         __ xorl(rsi, rdx);
1524         __ orl(rcx, rsi);
1525         __ jcc(Assembler::notEqual, do_return);
1526         __ fldz();
1527         __ fcomp_d(value_low_word);
1528         __ fnstsw_ax();
1529 #ifdef _LP64
1530         __ testl(rax, 0x4100);  // ZF & CF == 0
1531         __ jcc(Assembler::equal, return_min_jlong);
1532 #else
1533         __ sahf();
1534         __ jcc(Assembler::above, return_min_jlong);
1535 #endif // _LP64
1536         // return max_jlong
1537 #ifndef _LP64
1538         __ movl(rdx, 0x7fffffff);
1539         __ movl(rax, 0xffffffff);
1540 #else
1541         __ mov64(rax, CONST64(0x7fffffffffffffff));
1542 #endif // _LP64
1543         __ jmp(do_return);
1544 
1545         __ bind(return_min_jlong);
1546 #ifndef _LP64
1547         __ movl(rdx, 0x80000000);
1548         __ xorl(rax, rax);
1549 #else
1550         __ mov64(rax, UCONST64(0x8000000000000000));
1551 #endif // _LP64
1552         __ jmp(do_return);
1553 
1554         __ bind(return0);
1555         __ fpop();
1556 #ifndef _LP64
1557         __ xorptr(rdx,rdx);
1558         __ xorptr(rax,rax);
1559 #else
1560         __ xorptr(rax, rax);
1561 #endif // _LP64
1562 
1563         __ bind(do_return);
1564         __ addptr(rsp, 32);
1565         LP64_ONLY(__ pop(rdx);)
1566         __ pop(rcx);
1567         __ pop(rsi);
1568         __ ret(0);

1569       }
1570       break;
1571 
1572     case predicate_failed_trap_id:
1573       {
1574         StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments);
1575 
1576         OopMap* map = save_live_registers(sasm, 1);
1577 
1578         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));
1579         oop_maps = new OopMapSet();
1580         oop_maps->add_gc_map(call_offset, map);
1581         restore_live_registers(sasm);
1582         __ leave();
1583         DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
1584         assert(deopt_blob != NULL, "deoptimization blob must have been created");
1585 
1586         __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
1587       }
1588       break;


 410   return map;
 411 }
 412 
 413 #define __ this->
 414 
 415 void C1_MacroAssembler::save_live_registers_no_oop_map(bool save_fpu_registers) {
 416   __ block_comment("save_live_registers");
 417 
 418   __ pusha();         // integer registers
 419 
 420   // assert(float_regs_as_doubles_off % 2 == 0, "misaligned offset");
 421   // assert(xmm_regs_as_doubles_off % 2 == 0, "misaligned offset");
 422 
 423   __ subptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size);
 424 
 425 #ifdef ASSERT
 426   __ movptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef);
 427 #endif
 428 
 429   if (save_fpu_registers) {
 430 #ifndef _LP64
 431     if (UseSSE < 2) {
 432       // save FPU stack
 433       __ fnsave(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size));
 434       __ fwait();
 435 
 436 #ifdef ASSERT
 437       Label ok;
 438       __ cmpw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std());
 439       __ jccb(Assembler::equal, ok);
 440       __ stop("corrupted control word detected");
 441       __ bind(ok);
 442 #endif
 443 
 444       // Reset the control word to guard against exceptions being unmasked
 445       // since fstp_d can cause FPU stack underflow exceptions.  Write it
 446       // into the on stack copy and then reload that to make sure that the
 447       // current and future values are correct.
 448       __ movw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std());
 449       __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size));
 450 
 451       // Save the FPU registers in de-opt-able form
 452       int offset = 0;
 453       for (int n = 0; n < FrameMap::nof_fpu_regs; n++) {
 454         __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset));
 455         offset += 8;
 456       }
 457     }
 458 #endif // !_LP64
 459 
 460     if (UseSSE >= 2) {
 461       // save XMM registers
 462       // XMM registers can contain float or double values, but this is not known here,
 463       // so always save them as doubles.
 464       // note that float values are _not_ converted automatically, so for float values
 465       // the second word contains only garbage data.
 466       int xmm_bypass_limit = FrameMap::nof_xmm_regs;
 467       int offset = 0;
 468 #ifdef _LP64
 469       if (UseAVX < 3) {
 470         xmm_bypass_limit = xmm_bypass_limit / 2;
 471       }
 472 #endif
 473       for (int n = 0; n < xmm_bypass_limit; n++) {
 474         XMMRegister xmm_name = as_XMMRegister(n);
 475         __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset), xmm_name);
 476         offset += 8;
 477       }
 478 #ifndef _LP64
 479     } else if (UseSSE == 1) {
 480       // save XMM registers as float because double not supported without SSE2(num MMX == num fpu)
 481       int offset = 0;
 482       for (int n = 0; n < FrameMap::nof_fpu_regs; n++) {
 483         XMMRegister xmm_name = as_XMMRegister(n);
 484         __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset), xmm_name);
 485         offset += 8;
 486       }
 487 #endif // !_LP64
 488     }
 489   }
 490 
 491   // FPU stack must be empty now
 492   NOT_LP64( __ verify_FPU(0, "save_live_registers"); )
 493 }
 494 
 495 #undef __
 496 #define __ sasm->
 497 
 498 static void restore_fpu(C1_MacroAssembler* sasm, bool restore_fpu_registers) {
 499 #ifdef _LP64
 500   if (restore_fpu_registers) {

 501     // restore XMM registers
 502     int xmm_bypass_limit = FrameMap::nof_xmm_regs;

 503     if (UseAVX < 3) {
 504       xmm_bypass_limit = xmm_bypass_limit / 2;
 505     }
 506     int offset = 0;
 507     for (int n = 0; n < xmm_bypass_limit; n++) {
 508       XMMRegister xmm_name = as_XMMRegister(n);
 509       __ movdbl(xmm_name, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset));
 510       offset += 8;
 511     }
 512   }
 513 #else
 514   if (restore_fpu_registers) {
 515     if (UseSSE >= 2) {
 516       // restore XMM registers
 517       int xmm_bypass_limit = FrameMap::nof_xmm_regs;
 518       int offset = 0;
 519       for (int n = 0; n < xmm_bypass_limit; n++) {
 520         XMMRegister xmm_name = as_XMMRegister(n);
 521         __ movdbl(xmm_name, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset));
 522         offset += 8;
 523       }
 524     } else if (UseSSE == 1) {
 525       // restore XMM registers(num MMX == num fpu)
 526       int offset = 0;
 527       for (int n = 0; n < FrameMap::nof_fpu_regs; n++) {
 528         XMMRegister xmm_name = as_XMMRegister(n);
 529         __ movflt(xmm_name, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset));
 530         offset += 8;
 531       }
 532     }
 533 
 534     if (UseSSE < 2) {
 535       __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size));
 536     } else {
 537       // check that FPU stack is really empty
 538       __ verify_FPU(0, "restore_live_registers");
 539     }

 540   } else {
 541     // check that FPU stack is really empty
 542     __ verify_FPU(0, "restore_live_registers");
 543   }
 544 #endif // _LP64
 545 
 546 #ifdef ASSERT
 547   {
 548     Label ok;
 549     __ cmpptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef);
 550     __ jcc(Assembler::equal, ok);
 551     __ stop("bad offsets in frame");
 552     __ bind(ok);
 553   }
 554 #endif // ASSERT
 555 
 556   __ addptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size);
 557 }
 558 
 559 #undef __
 560 #define __ this->
 561 
 562 void C1_MacroAssembler::restore_live_registers(bool restore_fpu_registers) {
 563   __ block_comment("restore_live_registers");
 564 


 696     __ movptr(Address(thread, JavaThread::vm_result_offset()),   NULL_WORD);
 697     __ movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
 698     break;
 699   case handle_exception_nofpu_id:
 700   case handle_exception_id:
 701     // At this point all registers MAY be live.
 702     oop_map = save_live_registers(sasm, 1 /*thread*/, id != handle_exception_nofpu_id);
 703     break;
 704   case handle_exception_from_callee_id: {
 705     // At this point all registers except exception oop (RAX) and
 706     // exception pc (RDX) are dead.
 707     const int frame_size = 2 /*BP, return address*/ NOT_LP64(+ 1 /*thread*/) WIN64_ONLY(+ frame::arg_reg_save_area_bytes / BytesPerWord);
 708     oop_map = new OopMap(frame_size * VMRegImpl::slots_per_word, 0);
 709     sasm->set_frame_size(frame_size);
 710     WIN64_ONLY(__ subq(rsp, frame::arg_reg_save_area_bytes));
 711     break;
 712   }
 713   default:  ShouldNotReachHere();
 714   }
 715 
 716 #if !defined(_LP64) && defined(TIERED)

 717   if (UseSSE < 2) {
 718     // C2 can leave the fpu stack dirty
 719     __ empty_FPU_stack();
 720   }
 721 #endif // !_LP64 && TIERED
 722 
 723   // verify that only rax, and rdx is valid at this time
 724   __ invalidate_registers(false, true, true, false, true, true);
 725   // verify that rax, contains a valid exception
 726   __ verify_not_null_oop(exception_oop);
 727 
 728   // load address of JavaThread object for thread-local data
 729   NOT_LP64(__ get_thread(thread);)
 730 
 731 #ifdef ASSERT
 732   // check that fields in JavaThread for exception oop and issuing pc are
 733   // empty before writing to them
 734   Label oop_empty;
 735   __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t) NULL_WORD);
 736   __ jcc(Assembler::equal, oop_empty);
 737   __ stop("exception oop already set");
 738   __ bind(oop_empty);
 739 
 740   Label pc_empty;
 741   __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0);


 803   // verify that only rax, is valid at this time
 804   __ invalidate_registers(false, true, true, true, true, true);
 805 
 806 #ifdef ASSERT
 807   // check that fields in JavaThread for exception oop and issuing pc are empty
 808   NOT_LP64(__ get_thread(thread);)
 809   Label oop_empty;
 810   __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), 0);
 811   __ jcc(Assembler::equal, oop_empty);
 812   __ stop("exception oop must be empty");
 813   __ bind(oop_empty);
 814 
 815   Label pc_empty;
 816   __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0);
 817   __ jcc(Assembler::equal, pc_empty);
 818   __ stop("exception pc must be empty");
 819   __ bind(pc_empty);
 820 #endif
 821 
 822   // clear the FPU stack in case any FPU results are left behind
 823   NOT_LP64( __ empty_FPU_stack(); )
 824 
 825   // save exception_oop in callee-saved register to preserve it during runtime calls
 826   __ verify_not_null_oop(exception_oop);
 827   __ movptr(exception_oop_callee_saved, exception_oop);
 828 
 829   NOT_LP64(__ get_thread(thread);)
 830   // Get return address (is on top of stack after leave).
 831   __ movptr(exception_pc, Address(rsp, 0));
 832 
 833   // search the exception handler address of the caller (using the return address)
 834   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, exception_pc);
 835   // rax: exception handler address of the caller
 836 
 837   // Only RAX and RSI are valid at this time, all other registers have been destroyed by the call.
 838   __ invalidate_registers(false, true, true, true, false, true);
 839 
 840   // move result of call into correct register
 841   __ movptr(handler_addr, rax);
 842 
 843   // Restore exception oop to RAX (required convention of exception handler).


1474       }
1475       break;
1476 
1477     case dtrace_object_alloc_id:
1478       { // rax,: object
1479         StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
1480         // we can't gc here so skip the oopmap but make sure that all
1481         // the live registers get saved.
1482         save_live_registers(sasm, 1);
1483 
1484         __ NOT_LP64(push(rax)) LP64_ONLY(mov(c_rarg0, rax));
1485         __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc)));
1486         NOT_LP64(__ pop(rax));
1487 
1488         restore_live_registers(sasm);
1489       }
1490       break;
1491 
1492     case fpu2long_stub_id:
1493       {
1494 #ifdef _LP64
1495         Label done;
1496         __ cvttsd2siq(rax, Address(rsp, wordSize));
1497         __ cmp64(rax, ExternalAddress((address) StubRoutines::x86::double_sign_flip()));
1498         __ jccb(Assembler::notEqual, done);
1499         __ movq(rax, Address(rsp, wordSize));
1500         __ subptr(rsp, 8);
1501         __ movq(Address(rsp, 0), rax);
1502         __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::d2l_fixup())));
1503         __ pop(rax);
1504         __ bind(done);
1505         __ ret(0);
1506 #else
1507         // rax, and rdx are destroyed, but should be free since the result is returned there
1508         // preserve rsi,ecx
1509         __ push(rsi);
1510         __ push(rcx);

1511 
1512         // check for NaN
1513         Label return0, do_return, return_min_jlong, do_convert;
1514 
1515         Address value_high_word(rsp, wordSize + 4);
1516         Address value_low_word(rsp, wordSize);
1517         Address result_high_word(rsp, 3*wordSize + 4);
1518         Address result_low_word(rsp, 3*wordSize);
1519 
1520         __ subptr(rsp, 32);                    // more than enough on 32bit
1521         __ fst_d(value_low_word);
1522         __ movl(rax, value_high_word);
1523         __ andl(rax, 0x7ff00000);
1524         __ cmpl(rax, 0x7ff00000);
1525         __ jcc(Assembler::notEqual, do_convert);
1526         __ movl(rax, value_high_word);
1527         __ andl(rax, 0xfffff);
1528         __ orl(rax, value_low_word);
1529         __ jcc(Assembler::notZero, return0);
1530 


1535         __ movw(Address(rsp, 2), rax);
1536         __ fldcw(Address(rsp, 2));
1537         __ fwait();
1538         __ fistp_d(result_low_word);
1539         __ fldcw(Address(rsp, 0));
1540         __ fwait();
1541         // This gets the entire long in rax on 64bit
1542         __ movptr(rax, result_low_word);
1543         // testing of high bits
1544         __ movl(rdx, result_high_word);
1545         __ mov(rcx, rax);
1546         // What the heck is the point of the next instruction???
1547         __ xorl(rcx, 0x0);
1548         __ movl(rsi, 0x80000000);
1549         __ xorl(rsi, rdx);
1550         __ orl(rcx, rsi);
1551         __ jcc(Assembler::notEqual, do_return);
1552         __ fldz();
1553         __ fcomp_d(value_low_word);
1554         __ fnstsw_ax();




1555         __ sahf();
1556         __ jcc(Assembler::above, return_min_jlong);

1557         // return max_jlong

1558         __ movl(rdx, 0x7fffffff);
1559         __ movl(rax, 0xffffffff);



1560         __ jmp(do_return);
1561 
1562         __ bind(return_min_jlong);

1563         __ movl(rdx, 0x80000000);
1564         __ xorl(rax, rax);



1565         __ jmp(do_return);
1566 
1567         __ bind(return0);
1568         __ fpop();

1569         __ xorptr(rdx,rdx);
1570         __ xorptr(rax,rax);



1571 
1572         __ bind(do_return);
1573         __ addptr(rsp, 32);

1574         __ pop(rcx);
1575         __ pop(rsi);
1576         __ ret(0);
1577 #endif // _LP64
1578       }
1579       break;
1580 
1581     case predicate_failed_trap_id:
1582       {
1583         StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments);
1584 
1585         OopMap* map = save_live_registers(sasm, 1);
1586 
1587         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));
1588         oop_maps = new OopMapSet();
1589         oop_maps->add_gc_map(call_offset, map);
1590         restore_live_registers(sasm);
1591         __ leave();
1592         DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
1593         assert(deopt_blob != NULL, "deoptimization blob must have been created");
1594 
1595         __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
1596       }
1597       break;
< prev index next >