src/cpu/sparc/vm/stubGenerator_sparc.cpp

Print this page




 549     __ ret();
 550     __ delayed()->restore();
 551 
 552     return start;
 553   }
 554 
 555 
 556   address generate_stop_subroutine() {
 557     StubCodeMark mark(this, "StubRoutines", "stop_subroutine");
 558     address start = __ pc();
 559 
 560     __ stop_subroutine();
 561 
 562     return start;
 563   }
 564 
 565   address generate_flush_callers_register_windows() {
 566     StubCodeMark mark(this, "StubRoutines", "flush_callers_register_windows");
 567     address start = __ pc();
 568 
 569     __ flush_windows();
 570     __ retl(false);
 571     __ delayed()->add( FP, STACK_BIAS, O0 );
 572     // The returned value must be a stack pointer whose register save area
 573     // is flushed, and will stay flushed while the caller executes.
 574 
 575     return start;
 576   }
 577 
 578   // Helper functions for v8 atomic operations.
 579   //
 580   void get_v8_oop_lock_ptr(Register lock_ptr_reg, Register mark_oop_reg, Register scratch_reg) {
 581     if (mark_oop_reg == noreg) {
 582       address lock_ptr = (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr();
 583       __ set((intptr_t)lock_ptr, lock_ptr_reg);
 584     } else {
 585       assert(scratch_reg != noreg, "just checking");
 586       address lock_ptr = (address)StubRoutines::Sparc::_v8_oop_lock_cache;
 587       __ set((intptr_t)lock_ptr, lock_ptr_reg);
 588       __ and3(mark_oop_reg, StubRoutines::Sparc::v8_oop_lock_mask_in_place, scratch_reg);
 589       __ add(lock_ptr_reg, scratch_reg, lock_ptr_reg);
 590     }
 591   }
 592 
 593   void generate_v8_lock_prologue(Register lock_reg, Register lock_ptr_reg, Register yield_reg, Label& retry, Label& dontyield, Register mark_oop_reg = noreg, Register scratch_reg = noreg) {
 594 
 595     get_v8_oop_lock_ptr(lock_ptr_reg, mark_oop_reg, scratch_reg);
 596     __ set(StubRoutines::Sparc::locked, lock_reg);
 597     // Initialize yield counter
 598     __ mov(G0,yield_reg);
 599 
 600     __ BIND(retry);
 601     __ cmp_and_br_short(yield_reg, V8AtomicOperationUnderLockSpinCount, Assembler::less, Assembler::pt, dontyield);
 602 
 603     // This code can only be called from inside the VM, this
 604     // stub is only invoked from Atomic::add().  We do not
 605     // want to use call_VM, because _last_java_sp and such
 606     // must already be set.
 607     //
 608     // Save the regs and make space for a C call
 609     __ save(SP, -96, SP);
 610     __ save_all_globals_into_locals();
 611     BLOCK_COMMENT("call os::naked_sleep");
 612     __ call(CAST_FROM_FN_PTR(address, os::naked_sleep));
 613     __ delayed()->nop();
 614     __ restore_globals_from_locals();
 615     __ restore();
 616     // reset the counter
 617     __ mov(G0,yield_reg);
 618 
 619     __ BIND(dontyield);
 620 
 621     // try to get lock
 622     __ swap(lock_ptr_reg, 0, lock_reg);
 623 
 624     // did we get the lock?
 625     __ cmp(lock_reg, StubRoutines::Sparc::unlocked);
 626     __ br(Assembler::notEqual, true, Assembler::pn, retry);
 627     __ delayed()->add(yield_reg,1,yield_reg);
 628 
 629     // yes, got lock. do the operation here.
 630   }
 631 
 632   void generate_v8_lock_epilogue(Register lock_reg, Register lock_ptr_reg, Register yield_reg, Label& retry, Label& dontyield, Register mark_oop_reg = noreg, Register scratch_reg = noreg) {
 633     __ st(lock_reg, lock_ptr_reg, 0); // unlock
 634   }
 635 
 636   // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest).
 637   //
 638   // Arguments :
 639   //
 640   //      exchange_value: O0
 641   //      dest:           O1
 642   //
 643   // Results:
 644   //
 645   //     O0: the value previously stored in dest
 646   //
 647   address generate_atomic_xchg() {
 648     StubCodeMark mark(this, "StubRoutines", "atomic_xchg");
 649     address start = __ pc();
 650 
 651     if (UseCASForSwap) {
 652       // Use CAS instead of swap, just in case the MP hardware
 653       // prefers to work with just one kind of synch. instruction.
 654       Label retry;
 655       __ BIND(retry);
 656       __ mov(O0, O3);       // scratch copy of exchange value
 657       __ ld(O1, 0, O2);     // observe the previous value
 658       // try to replace O2 with O3
 659       __ cas_under_lock(O1, O2, O3,
 660       (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr(),false);
 661       __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry);
 662 
 663       __ retl(false);
 664       __ delayed()->mov(O2, O0);  // report previous value to caller
 665 
 666     } else {
 667       if (VM_Version::v9_instructions_work()) {
 668         __ retl(false);
 669         __ delayed()->swap(O1, 0, O0);
 670       } else {
 671         const Register& lock_reg = O2;
 672         const Register& lock_ptr_reg = O3;
 673         const Register& yield_reg = O4;
 674 
 675         Label retry;
 676         Label dontyield;
 677 
 678         generate_v8_lock_prologue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
 679         // got the lock, do the swap
 680         __ swap(O1, 0, O0);
 681 
 682         generate_v8_lock_epilogue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
 683         __ retl(false);
 684         __ delayed()->nop();
 685       }
 686     }
 687 
 688     return start;
 689   }
 690 
 691 
 692   // Support for jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value)
 693   //
 694   // Arguments :
 695   //
 696   //      exchange_value: O0
 697   //      dest:           O1
 698   //      compare_value:  O2
 699   //
 700   // Results:
 701   //
 702   //     O0: the value previously stored in dest
 703   //
 704   // Overwrites (v8): O3,O4,O5
 705   //
 706   address generate_atomic_cmpxchg() {
 707     StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg");
 708     address start = __ pc();
 709 
 710     // cmpxchg(dest, compare_value, exchange_value)
 711     __ cas_under_lock(O1, O2, O0,
 712       (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr(),false);
 713     __ retl(false);
 714     __ delayed()->nop();
 715 
 716     return start;
 717   }
 718 
 719   // Support for jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong *dest, jlong compare_value)
 720   //
 721   // Arguments :
 722   //
 723   //      exchange_value: O1:O0
 724   //      dest:           O2
 725   //      compare_value:  O4:O3
 726   //
 727   // Results:
 728   //
 729   //     O1:O0: the value previously stored in dest
 730   //
 731   // This only works on V9, on V8 we don't generate any
 732   // code and just return NULL.
 733   //
 734   // Overwrites: G1,G2,G3
 735   //
 736   address generate_atomic_cmpxchg_long() {
 737     StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long");
 738     address start = __ pc();
 739 
 740     if (!VM_Version::supports_cx8())
 741         return NULL;;
 742     __ sllx(O0, 32, O0);
 743     __ srl(O1, 0, O1);
 744     __ or3(O0,O1,O0);      // O0 holds 64-bit value from compare_value
 745     __ sllx(O3, 32, O3);
 746     __ srl(O4, 0, O4);
 747     __ or3(O3,O4,O3);     // O3 holds 64-bit value from exchange_value
 748     __ casx(O2, O3, O0);
 749     __ srl(O0, 0, O1);    // unpacked return value in O1:O0
 750     __ retl(false);
 751     __ delayed()->srlx(O0, 32, O0);
 752 
 753     return start;
 754   }
 755 
 756 
 757   // Support for jint Atomic::add(jint add_value, volatile jint* dest).
 758   //
 759   // Arguments :
 760   //
 761   //      add_value: O0   (e.g., +1 or -1)
 762   //      dest:      O1
 763   //
 764   // Results:
 765   //
 766   //     O0: the new value stored in dest
 767   //
 768   // Overwrites (v9): O3
 769   // Overwrites (v8): O3,O4,O5
 770   //
 771   address generate_atomic_add() {
 772     StubCodeMark mark(this, "StubRoutines", "atomic_add");
 773     address start = __ pc();
 774     __ BIND(_atomic_add_stub);
 775 
 776     if (VM_Version::v9_instructions_work()) {
 777       Label(retry);
 778       __ BIND(retry);
 779 
 780       __ lduw(O1, 0, O2);
 781       __ add(O0, O2, O3);
 782       __ cas(O1, O2, O3);
 783       __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry);
 784       __ retl(false);
 785       __ delayed()->add(O0, O2, O0); // note that cas made O2==O3
 786     } else {
 787       const Register& lock_reg = O2;
 788       const Register& lock_ptr_reg = O3;
 789       const Register& value_reg = O4;
 790       const Register& yield_reg = O5;
 791 
 792       Label(retry);
 793       Label(dontyield);
 794 
 795       generate_v8_lock_prologue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
 796       // got lock, do the increment
 797       __ ld(O1, 0, value_reg);
 798       __ add(O0, value_reg, value_reg);
 799       __ st(value_reg, O1, 0);
 800 
 801       // %%% only for RMO and PSO
 802       __ membar(Assembler::StoreStore);
 803 
 804       generate_v8_lock_epilogue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
 805 
 806       __ retl(false);
 807       __ delayed()->mov(value_reg, O0);
 808     }
 809 
 810     return start;
 811   }
 812   Label _atomic_add_stub;  // called from other stubs
 813 
 814 
 815   //------------------------------------------------------------------------------------------------------------------------
 816   // The following routine generates a subroutine to throw an asynchronous
 817   // UnknownError when an unsafe access gets a fault that could not be
 818   // reasonably prevented by the programmer.  (Example: SIGBUS/OBJERR.)
 819   //
 820   // Arguments :
 821   //
 822   //      trapping PC:    O7
 823   //
 824   // Results:
 825   //     posts an asynchronous exception, skips the trapping instruction
 826   //
 827 
 828   address generate_handler_for_unsafe_access() {
 829     StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access");
 830     address start = __ pc();
 831 
 832     const int preserve_register_words = (64 * 2);
 833     Address preserve_addr(FP, (-preserve_register_words * wordSize) + STACK_BIAS);
 834 
 835     Register Lthread = L7_thread_cache;
 836     int i;
 837 
 838     __ save_frame(0);
 839     __ mov(G1, L1);
 840     __ mov(G2, L2);
 841     __ mov(G3, L3);
 842     __ mov(G4, L4);
 843     __ mov(G5, L5);
 844     for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) {
 845       __ stf(FloatRegisterImpl::D, as_FloatRegister(i), preserve_addr, i * wordSize);
 846     }
 847 
 848     address entry_point = CAST_FROM_FN_PTR(address, handle_unsafe_access);
 849     BLOCK_COMMENT("call handle_unsafe_access");
 850     __ call(entry_point, relocInfo::runtime_call_type);
 851     __ delayed()->nop();
 852 
 853     __ mov(L1, G1);
 854     __ mov(L2, G2);
 855     __ mov(L3, G3);
 856     __ mov(L4, G4);
 857     __ mov(L5, G5);
 858     for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) {
 859       __ ldf(FloatRegisterImpl::D, preserve_addr, as_FloatRegister(i), i * wordSize);
 860     }
 861 
 862     __ verify_thread();
 863 
 864     __ jmp(O0, 0);
 865     __ delayed()->restore();
 866 
 867     return start;
 868   }
 869 
 870 
 871   // Support for uint StubRoutine::Sparc::partial_subtype_check( Klass sub, Klass super );
 872   // Arguments :
 873   //
 874   //      ret  : O0, returned
 875   //      icc/xcc: set as O0 (depending on wordSize)
 876   //      sub  : O1, argument, not changed
 877   //      super: O2, argument, not changed
 878   //      raddr: O7, blown by call




 549     __ ret();
 550     __ delayed()->restore();
 551 
 552     return start;
 553   }
 554 
 555 
 556   address generate_stop_subroutine() {
 557     StubCodeMark mark(this, "StubRoutines", "stop_subroutine");
 558     address start = __ pc();
 559 
 560     __ stop_subroutine();
 561 
 562     return start;
 563   }
 564 
 565   address generate_flush_callers_register_windows() {
 566     StubCodeMark mark(this, "StubRoutines", "flush_callers_register_windows");
 567     address start = __ pc();
 568 
 569     __ flushw();
 570     __ retl(false);
 571     __ delayed()->add( FP, STACK_BIAS, O0 );
 572     // The returned value must be a stack pointer whose register save area
 573     // is flushed, and will stay flushed while the caller executes.
 574 
 575     return start;
 576   }
 577 


























































 578   // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest).
 579   //
 580   // Arguments:
 581   //
 582   //      exchange_value: O0
 583   //      dest:           O1
 584   //
 585   // Results:
 586   //
 587   //     O0: the value previously stored in dest
 588   //
 589   address generate_atomic_xchg() {
 590     StubCodeMark mark(this, "StubRoutines", "atomic_xchg");
 591     address start = __ pc();
 592 
 593     if (UseCASForSwap) {
 594       // Use CAS instead of swap, just in case the MP hardware
 595       // prefers to work with just one kind of synch. instruction.
 596       Label retry;
 597       __ BIND(retry);
 598       __ mov(O0, O3);       // scratch copy of exchange value
 599       __ ld(O1, 0, O2);     // observe the previous value
 600       // try to replace O2 with O3
 601       __ cas(O1, O2, O3);

 602       __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry);
 603 
 604       __ retl(false);
 605       __ delayed()->mov(O2, O0);  // report previous value to caller

 606     } else {

 607       __ retl(false);
 608       __ delayed()->swap(O1, 0, O0);
















 609     }
 610 
 611     return start;
 612   }
 613 
 614 
 615   // Support for jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value)
 616   //
 617   // Arguments:
 618   //
 619   //      exchange_value: O0
 620   //      dest:           O1
 621   //      compare_value:  O2
 622   //
 623   // Results:
 624   //
 625   //     O0: the value previously stored in dest
 626   //


 627   address generate_atomic_cmpxchg() {
 628     StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg");
 629     address start = __ pc();
 630 
 631     // cmpxchg(dest, compare_value, exchange_value)
 632     __ cas(O1, O2, O0);

 633     __ retl(false);
 634     __ delayed()->nop();
 635 
 636     return start;
 637   }
 638 
 639   // Support for jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong *dest, jlong compare_value)
 640   //
 641   // Arguments:
 642   //
 643   //      exchange_value: O1:O0
 644   //      dest:           O2
 645   //      compare_value:  O4:O3
 646   //
 647   // Results:
 648   //
 649   //     O1:O0: the value previously stored in dest
 650   //



 651   // Overwrites: G1,G2,G3
 652   //
 653   address generate_atomic_cmpxchg_long() {
 654     StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long");
 655     address start = __ pc();
 656 


 657     __ sllx(O0, 32, O0);
 658     __ srl(O1, 0, O1);
 659     __ or3(O0,O1,O0);      // O0 holds 64-bit value from compare_value
 660     __ sllx(O3, 32, O3);
 661     __ srl(O4, 0, O4);
 662     __ or3(O3,O4,O3);     // O3 holds 64-bit value from exchange_value
 663     __ casx(O2, O3, O0);
 664     __ srl(O0, 0, O1);    // unpacked return value in O1:O0
 665     __ retl(false);
 666     __ delayed()->srlx(O0, 32, O0);
 667 
 668     return start;
 669   }
 670 
 671 
 672   // Support for jint Atomic::add(jint add_value, volatile jint* dest).
 673   //
 674   // Arguments:
 675   //
 676   //      add_value: O0   (e.g., +1 or -1)
 677   //      dest:      O1
 678   //
 679   // Results:
 680   //
 681   //     O0: the new value stored in dest
 682   //
 683   // Overwrites: O3

 684   //
 685   address generate_atomic_add() {
 686     StubCodeMark mark(this, "StubRoutines", "atomic_add");
 687     address start = __ pc();
 688     __ BIND(_atomic_add_stub);
 689 

 690     Label(retry);
 691     __ BIND(retry);
 692 
 693     __ lduw(O1, 0, O2);
 694     __ add(O0, O2, O3);
 695     __ cas(O1, O2, O3);
 696     __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry);
 697     __ retl(false);
 698     __ delayed()->add(O0, O2, O0); // note that cas made O2==O3























 699 
 700     return start;
 701   }
 702   Label _atomic_add_stub;  // called from other stubs
 703 
 704 
 705   //------------------------------------------------------------------------------------------------------------------------
 706   // The following routine generates a subroutine to throw an asynchronous
 707   // UnknownError when an unsafe access gets a fault that could not be
 708   // reasonably prevented by the programmer.  (Example: SIGBUS/OBJERR.)
 709   //
 710   // Arguments :
 711   //
 712   //      trapping PC:    O7
 713   //
 714   // Results:
 715   //     posts an asynchronous exception, skips the trapping instruction
 716   //
 717 
 718   address generate_handler_for_unsafe_access() {
 719     StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access");
 720     address start = __ pc();
 721 
 722     const int preserve_register_words = (64 * 2);
 723     Address preserve_addr(FP, (-preserve_register_words * wordSize) + STACK_BIAS);
 724 
 725     Register Lthread = L7_thread_cache;
 726     int i;
 727 
 728     __ save_frame(0);
 729     __ mov(G1, L1);
 730     __ mov(G2, L2);
 731     __ mov(G3, L3);
 732     __ mov(G4, L4);
 733     __ mov(G5, L5);
 734     for (i = 0; i < 64; i += 2) {
 735       __ stf(FloatRegisterImpl::D, as_FloatRegister(i), preserve_addr, i * wordSize);
 736     }
 737 
 738     address entry_point = CAST_FROM_FN_PTR(address, handle_unsafe_access);
 739     BLOCK_COMMENT("call handle_unsafe_access");
 740     __ call(entry_point, relocInfo::runtime_call_type);
 741     __ delayed()->nop();
 742 
 743     __ mov(L1, G1);
 744     __ mov(L2, G2);
 745     __ mov(L3, G3);
 746     __ mov(L4, G4);
 747     __ mov(L5, G5);
 748     for (i = 0; i < 64; i += 2) {
 749       __ ldf(FloatRegisterImpl::D, preserve_addr, as_FloatRegister(i), i * wordSize);
 750     }
 751 
 752     __ verify_thread();
 753 
 754     __ jmp(O0, 0);
 755     __ delayed()->restore();
 756 
 757     return start;
 758   }
 759 
 760 
 761   // Support for uint StubRoutine::Sparc::partial_subtype_check( Klass sub, Klass super );
 762   // Arguments :
 763   //
 764   //      ret  : O0, returned
 765   //      icc/xcc: set as O0 (depending on wordSize)
 766   //      sub  : O1, argument, not changed
 767   //      super: O2, argument, not changed
 768   //      raddr: O7, blown by call