src/cpu/sparc/vm/nativeInst_sparc.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Sdiff src/cpu/sparc/vm

src/cpu/sparc/vm/nativeInst_sparc.cpp

Print this page




 673 
 674   while (--idx) {
 675     nm = nativeMovRegMem_at( nm->next_instruction_address() );
 676     nm->print();
 677     for (idx1 = 0; idx1 < ARRAY_SIZE(offsets); idx1++) {
 678       nm->set_offset( nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1] );
 679       assert(nm->offset() == (nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1]),
 680              "check unit test");
 681       nm->print();
 682     }
 683     nm->add_offset_in_bytes( low10(0xbb) * wordSize );
 684     nm->print();
 685   }
 686 
 687   VM_Version::revert();
 688 #endif // ASSERT
 689 }
 690 
 691 // End code for unit testing implementation of NativeMovRegMem class
 692 
 693 //--------------------------------------------------------------------------------
 694 
 695 
 696 void NativeMovRegMemPatching::copy_instruction_to(address new_instruction_address) {
 697   Untested("copy_instruction_to");
 698   int instruction_size = next_instruction_address() - instruction_address();
 699   for (int i = 0; i < instruction_size; i += wordSize) {
 700     *(long*)(new_instruction_address + i) = *(long*)(address(this) + i);
 701   }
 702 }
 703 
 704 
 705 void NativeMovRegMemPatching::verify() {
 706   NativeInstruction::verify();
 707   // make sure code pattern is actually a "ld" or "st" of some sort.
 708   int i0 = long_at(0);
 709   int op3 = inv_op3(i0);
 710 
 711   assert((int)nop_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
 712 
 713   if (!(is_op(i0, Assembler::ldst_op) &&
 714         inv_immed(i0) &&
 715         0 != (op3 < op3_ldst_int_limit
 716          ? (1 <<  op3                      ) & (op3_mask_ld  | op3_mask_st)
 717          : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf)))) {
 718     int i1 = long_at(ldst_offset);
 719     Register rd = inv_rd(i0);
 720 
 721     op3 = inv_op3(i1);
 722     if (!is_op(i1, Assembler::ldst_op) && rd == inv_rs2(i1) &&
 723          0 != (op3 < op3_ldst_int_limit
 724               ? (1 <<  op3                      ) & (op3_mask_ld  | op3_mask_st)
 725               : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf))) {
 726       fatal("not a ld* or st* op");
 727     }
 728   }
 729 }
 730 
 731 
 732 void NativeMovRegMemPatching::print() {
 733   if (is_immediate()) {
 734     tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + %d]", p2i(instruction_address()), offset());
 735   } else {
 736     tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + reg]", p2i(instruction_address()));
 737   }
 738 }
 739 
 740 
 741 // Code for unit testing implementation of NativeMovRegMemPatching class
 742 void NativeMovRegMemPatching::test() {
 743 #ifdef ASSERT
 744   ResourceMark rm;
 745   CodeBuffer cb("test", 1000, 1000);
 746   MacroAssembler* a = new MacroAssembler(&cb);
 747   NativeMovRegMemPatching* nm;
 748   uint idx = 0;
 749   uint idx1;
 750   int offsets[] = {
 751     0x0,
 752     0xffffffff,
 753     0x7fffffff,
 754     0x80000000,
 755     4096,
 756     4097,
 757     0x20,
 758     0x4000,
 759   };
 760 
 761   VM_Version::allow_all();
 762 
 763   AddressLiteral al(0xffffffff, relocInfo::external_word_type);
 764   a->ldsw( G5, al.low10(), G4); idx++;
 765   a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
 766   a->ldsw( G5, I3, G4 ); idx++;
 767   a->ldsb( G5, al.low10(), G4); idx++;
 768   a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
 769   a->ldsb( G5, I3, G4 ); idx++;
 770   a->ldsh( G5, al.low10(), G4); idx++;
 771   a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
 772   a->ldsh( G5, I3, G4 ); idx++;
 773   a->lduw( G5, al.low10(), G4); idx++;
 774   a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
 775   a->lduw( G5, I3, G4 ); idx++;
 776   a->ldub( G5, al.low10(), G4); idx++;
 777   a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
 778   a->ldub( G5, I3, G4 ); idx++;
 779   a->lduh( G5, al.low10(), G4); idx++;
 780   a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
 781   a->lduh( G5, I3, G4 ); idx++;
 782   a->ldx(  G5, al.low10(), G4); idx++;
 783   a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
 784   a->ldx(  G5, I3, G4 ); idx++;
 785   a->ldd(  G5, al.low10(), G4); idx++;
 786   a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
 787   a->ldd(  G5, I3, G4 ); idx++;
 788   a->ldf(  FloatRegisterImpl::D, O2, -1, F14 ); idx++;
 789   a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
 790   a->ldf(  FloatRegisterImpl::S, O0, I3, F15 ); idx++;
 791 
 792   a->stw( G5, G4, al.low10()); idx++;
 793   a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
 794   a->stw( G5, G4, I3 ); idx++;
 795   a->stb( G5, G4, al.low10()); idx++;
 796   a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
 797   a->stb( G5, G4, I3 ); idx++;
 798   a->sth( G5, G4, al.low10()); idx++;
 799   a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
 800   a->sth( G5, G4, I3 ); idx++;
 801   a->stx( G5, G4, al.low10()); idx++;
 802   a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
 803   a->stx( G5, G4, I3 ); idx++;
 804   a->std( G5, G4, al.low10()); idx++;
 805   a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
 806   a->std( G5, G4, I3 ); idx++;
 807   a->stf( FloatRegisterImpl::S, F18, O2, -1 ); idx++;
 808   a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
 809   a->stf( FloatRegisterImpl::S, F15, O0, I3 ); idx++;
 810 
 811   nm = nativeMovRegMemPatching_at( cb.insts_begin() );
 812   nm->print();
 813   nm->set_offset( low10(0) );
 814   nm->print();
 815   nm->add_offset_in_bytes( low10(0xbb) * wordSize );
 816   nm->print();
 817 
 818   while (--idx) {
 819     nm = nativeMovRegMemPatching_at( nm->next_instruction_address() );
 820     nm->print();
 821     for (idx1 = 0; idx1 < ARRAY_SIZE(offsets); idx1++) {
 822       nm->set_offset( nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1] );
 823       assert(nm->offset() == (nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1]),
 824              "check unit test");
 825       nm->print();
 826     }
 827     nm->add_offset_in_bytes( low10(0xbb) * wordSize );
 828     nm->print();
 829   }
 830 
 831   VM_Version::revert();
 832 #endif // ASSERT
 833 }
 834 // End code for unit testing implementation of NativeMovRegMemPatching class
 835 
 836 
 837 //--------------------------------------------------------------------------------
 838 
 839 
 840 void NativeJump::verify() {
 841   NativeInstruction::verify();
 842   int i0 = long_at(sethi_offset);
 843   int i1 = long_at(jmpl_offset);
 844   assert((int)jmpl_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
 845   // verify the pattern "sethi %hi22(imm), treg ;  jmpl treg, %lo10(imm), lreg"
 846   Register rd = inv_rd(i0);
 847 #ifndef _LP64
 848   if (!(is_op2(i0, Assembler::sethi_op2) && rd != G0 &&
 849         (is_op3(i1, Assembler::jmpl_op3, Assembler::arith_op) ||
 850         (TraceJumps && is_op3(i1, Assembler::add_op3, Assembler::arith_op))) &&
 851         inv_immed(i1) && (unsigned)get_simm13(i1) < (1 << 10) &&
 852         rd == inv_rs1(i1))) {
 853     fatal("not a jump_to instruction");
 854   }
 855 #else




 673 
 674   while (--idx) {
 675     nm = nativeMovRegMem_at( nm->next_instruction_address() );
 676     nm->print();
 677     for (idx1 = 0; idx1 < ARRAY_SIZE(offsets); idx1++) {
 678       nm->set_offset( nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1] );
 679       assert(nm->offset() == (nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1]),
 680              "check unit test");
 681       nm->print();
 682     }
 683     nm->add_offset_in_bytes( low10(0xbb) * wordSize );
 684     nm->print();
 685   }
 686 
 687   VM_Version::revert();
 688 #endif // ASSERT
 689 }
 690 
 691 // End code for unit testing implementation of NativeMovRegMem class
 692 















































































































































 693 
 694 //--------------------------------------------------------------------------------
 695 
 696 
 697 void NativeJump::verify() {
 698   NativeInstruction::verify();
 699   int i0 = long_at(sethi_offset);
 700   int i1 = long_at(jmpl_offset);
 701   assert((int)jmpl_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
 702   // verify the pattern "sethi %hi22(imm), treg ;  jmpl treg, %lo10(imm), lreg"
 703   Register rd = inv_rd(i0);
 704 #ifndef _LP64
 705   if (!(is_op2(i0, Assembler::sethi_op2) && rd != G0 &&
 706         (is_op3(i1, Assembler::jmpl_op3, Assembler::arith_op) ||
 707         (TraceJumps && is_op3(i1, Assembler::add_op3, Assembler::arith_op))) &&
 708         inv_immed(i1) && (unsigned)get_simm13(i1) < (1 << 10) &&
 709         rd == inv_rs1(i1))) {
 710     fatal("not a jump_to instruction");
 711   }
 712 #else


src/cpu/sparc/vm/nativeInst_sparc.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File