46 #endif // AMD64
47 }
48
49 inline u4 Bytes::swap_u4(u4 x) {
50 #ifdef AMD64
51 return bswap_32(x);
52 #else
53 u4 ret;
54 __asm__ __volatile__ (
55 "bswap %0"
56 :"=r" (ret) // output : register 0 => ret
57 :"0" (x) // input : x => register 0
58 :"0" // clobbered register
59 );
60 return ret;
61 #endif // AMD64
62 }
63
64 #ifdef AMD64
65 inline u8 Bytes::swap_u8(u8 x) {
66 #ifdef SPARC_WORKS
67 // workaround for SunStudio12 CR6615391
68 __asm__ __volatile__ (
69 "bswapq %0"
70 :"=r" (x) // output : register 0 => x
71 :"0" (x) // input : x => register 0
72 :"0" // clobbered register
73 );
74 return x;
75 #else
76 return bswap_64(x);
77 #endif
78 }
79 #else
80 // Helper function for swap_u8
81 inline u8 Bytes::swap_u8_base(u4 x, u4 y) {
82 return (((u8)swap_u4(x))<<32) | swap_u4(y);
83 }
84
85 inline u8 Bytes::swap_u8(u8 x) {
86 return swap_u8_base(*(u4*)&x, *(((u4*)&x)+1));
87 }
88 #endif // !AMD64
89
90 #endif // OS_CPU_LINUX_X86_BYTES_LINUX_X86_INLINE_HPP
|
46 #endif // AMD64
47 }
48
49 inline u4 Bytes::swap_u4(u4 x) {
50 #ifdef AMD64
51 return bswap_32(x);
52 #else
53 u4 ret;
54 __asm__ __volatile__ (
55 "bswap %0"
56 :"=r" (ret) // output : register 0 => ret
57 :"0" (x) // input : x => register 0
58 :"0" // clobbered register
59 );
60 return ret;
61 #endif // AMD64
62 }
63
64 #ifdef AMD64
65 inline u8 Bytes::swap_u8(u8 x) {
66 return bswap_64(x);
67 }
68 #else
69 // Helper function for swap_u8
70 inline u8 Bytes::swap_u8_base(u4 x, u4 y) {
71 return (((u8)swap_u4(x))<<32) | swap_u4(y);
72 }
73
74 inline u8 Bytes::swap_u8(u8 x) {
75 return swap_u8_base(*(u4*)&x, *(((u4*)&x)+1));
76 }
77 #endif // !AMD64
78
79 #endif // OS_CPU_LINUX_X86_BYTES_LINUX_X86_INLINE_HPP
|