726 ldub(Lbcp, bcp_offset + 1, Rtmp); sll(Rtmp, 16, Rtmp); or3(Rtmp, Rdst, Rdst); 727 #ifdef _LP64 728 ldsb(Lbcp, bcp_offset + 0, Rtmp); sll(Rtmp, 24, Rtmp); 729 #else 730 // Unsigned load is faster than signed on some implementations 731 ldub(Lbcp, bcp_offset + 0, Rtmp); sll(Rtmp, 24, Rtmp); 732 #endif 733 or3(Rtmp, Rdst, Rdst ); 734 735 bind(aligned); 736 if (should_set_CC == set_CC) tst(Rdst); 737 } 738 739 740 void InterpreterMacroAssembler::get_cache_index_at_bcp(Register cache, Register tmp, 741 int bcp_offset, size_t index_size) { 742 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); 743 if (index_size == sizeof(u2)) { 744 get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned); 745 } else if (index_size == sizeof(u4)) { 746 assert(EnableInvokeDynamic, "giant index used only for EnableInvokeDynamic"); 747 get_4_byte_integer_at_bcp(bcp_offset, cache, tmp); 748 assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line"); 749 xor3(tmp, -1, tmp); // convert to plain index 750 } else if (index_size == sizeof(u1)) { 751 assert(EnableMethodHandles, "tiny index used only for EnableMethodHandles"); 752 ldub(Lbcp, bcp_offset, tmp); 753 } else { 754 ShouldNotReachHere(); 755 } 756 } 757 758 759 void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register tmp, 760 int bcp_offset, size_t index_size) { 761 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); 762 assert_different_registers(cache, tmp); 763 assert_not_delayed(); 764 get_cache_index_at_bcp(cache, tmp, bcp_offset, index_size); 765 // convert from field index to ConstantPoolCacheEntry index and from 766 // word index to byte offset 767 sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp); 768 add(LcpoolCache, tmp, cache); 769 } 770 771 | 726 ldub(Lbcp, bcp_offset + 1, Rtmp); sll(Rtmp, 16, Rtmp); or3(Rtmp, Rdst, Rdst); 727 #ifdef _LP64 728 ldsb(Lbcp, bcp_offset + 0, Rtmp); sll(Rtmp, 24, Rtmp); 729 #else 730 // Unsigned load is faster than signed on some implementations 731 ldub(Lbcp, bcp_offset + 0, Rtmp); sll(Rtmp, 24, Rtmp); 732 #endif 733 or3(Rtmp, Rdst, Rdst ); 734 735 bind(aligned); 736 if (should_set_CC == set_CC) tst(Rdst); 737 } 738 739 740 void InterpreterMacroAssembler::get_cache_index_at_bcp(Register cache, Register tmp, 741 int bcp_offset, size_t index_size) { 742 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); 743 if (index_size == sizeof(u2)) { 744 get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned); 745 } else if (index_size == sizeof(u4)) { 746 assert(EnableInvokeDynamic, "giant index used only for JSR 292"); 747 get_4_byte_integer_at_bcp(bcp_offset, cache, tmp); 748 assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line"); 749 xor3(tmp, -1, tmp); // convert to plain index 750 } else if (index_size == sizeof(u1)) { 751 assert(EnableInvokeDynamic, "tiny index used only for JSR 292"); 752 ldub(Lbcp, bcp_offset, tmp); 753 } else { 754 ShouldNotReachHere(); 755 } 756 } 757 758 759 void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register tmp, 760 int bcp_offset, size_t index_size) { 761 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); 762 assert_different_registers(cache, tmp); 763 assert_not_delayed(); 764 get_cache_index_at_bcp(cache, tmp, bcp_offset, index_size); 765 // convert from field index to ConstantPoolCacheEntry index and from 766 // word index to byte offset 767 sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp); 768 add(LcpoolCache, tmp, cache); 769 } 770 771 |