68 #endif // AMD64
69 }
70
71 inline u4 Bytes::swap_u4(u4 x) {
72 #ifdef AMD64
73 return bswap_32(x);
74 #else
75 u4 ret;
76 __asm__ __volatile__ (
77 "bswap %0"
78 :"=r" (ret) // output : register 0 => ret
79 :"0" (x) // input : x => register 0
80 :"0" // clobbered register
81 );
82 return ret;
83 #endif // AMD64
84 }
85
86 #ifdef AMD64
87 inline u8 Bytes::swap_u8(u8 x) {
88 #ifdef SPARC_WORKS
89 // workaround for SunStudio12 CR6615391
90 __asm__ __volatile__ (
91 "bswapq %0"
92 :"=r" (x) // output : register 0 => x
93 :"0" (x) // input : x => register 0
94 :"0" // clobbered register
95 );
96 return x;
97 #else
98 return bswap_64(x);
99 #endif
100 }
101 #else
102 // Helper function for swap_u8
103 inline u8 Bytes::swap_u8_base(u4 x, u4 y) {
104 return (((u8)swap_u4(x))<<32) | swap_u4(y);
105 }
106
107 inline u8 Bytes::swap_u8(u8 x) {
108 return swap_u8_base(*(u4*)&x, *(((u4*)&x)+1));
109 }
110 #endif // !AMD64
111
112 #endif // OS_CPU_BSD_X86_BYTES_BSD_X86_INLINE_HPP
|
68 #endif // AMD64
69 }
70
71 inline u4 Bytes::swap_u4(u4 x) {
72 #ifdef AMD64
73 return bswap_32(x);
74 #else
75 u4 ret;
76 __asm__ __volatile__ (
77 "bswap %0"
78 :"=r" (ret) // output : register 0 => ret
79 :"0" (x) // input : x => register 0
80 :"0" // clobbered register
81 );
82 return ret;
83 #endif // AMD64
84 }
85
86 #ifdef AMD64
87 inline u8 Bytes::swap_u8(u8 x) {
88 return bswap_64(x);
89 }
90 #else
91 // Helper function for swap_u8
92 inline u8 Bytes::swap_u8_base(u4 x, u4 y) {
93 return (((u8)swap_u4(x))<<32) | swap_u4(y);
94 }
95
96 inline u8 Bytes::swap_u8(u8 x) {
97 return swap_u8_base(*(u4*)&x, *(((u4*)&x)+1));
98 }
99 #endif // !AMD64
100
101 #endif // OS_CPU_BSD_X86_BYTES_BSD_X86_INLINE_HPP
|