--- old/src/cpu/x86/vm/bytes_x86.hpp 2017-04-11 14:40:00.073945462 -0700 +++ new/src/cpu/x86/vm/bytes_x86.hpp 2017-04-11 14:39:59.913942565 -0700 @@ -36,36 +36,85 @@ #endif // AMD64 public: - // Returns true if the byte ordering used by Java is different from the native byte ordering - // of the underlying machine. For example, this is true for Intel x86, but false for Solaris - // on Sparc. - static inline bool is_Java_byte_ordering_different(){ return true; } - - // Efficient reading and writing of unaligned unsigned data in platform-specific byte ordering - // (no special code is needed since x86 CPUs can access unaligned data) - static inline u2 get_native_u2(address p) { return *(u2*)p; } - static inline u4 get_native_u4(address p) { return *(u4*)p; } - static inline u8 get_native_u8(address p) { return *(u8*)p; } - - static inline void put_native_u2(address p, u2 x) { *(u2*)p = x; } - static inline void put_native_u4(address p, u4 x) { *(u4*)p = x; } - static inline void put_native_u8(address p, u8 x) { *(u8*)p = x; } - + template + static inline T get_native(const void* p) { + assert(p != NULL, "null pointer"); + + T x; + + if (is_ptr_aligned(p, sizeof(T))) { + x = *(T*)p; + } else { + memcpy(&x, p, sizeof(T)); + } + + return x; + } + + template + static inline void put_native(void* p, T x) { + assert(p != NULL, "null pointer"); + + if (is_ptr_aligned(p, sizeof(T))) { + *(T*)p = x; + } else { + memcpy(p, &x, sizeof(T)); + } + } + + static inline u2 get_native_u2(address p) { return get_native((void*)p); } + static inline u4 get_native_u4(address p) { return get_native((void*)p); } + static inline u8 get_native_u8(address p) { return get_native((void*)p); } + static inline void put_native_u2(address p, u2 x) { put_native((void*)p, x); } + static inline void put_native_u4(address p, u4 x) { put_native((void*)p, x); } + static inline void put_native_u8(address p, u8 x) { put_native((void*)p, x); } // Efficient reading and writing of unaligned unsigned data in Java // byte ordering (i.e. big-endian ordering). Byte-order reversal is // needed since x86 CPUs use little-endian format. - static inline u2 get_Java_u2(address p) { return swap_u2(get_native_u2(p)); } - static inline u4 get_Java_u4(address p) { return swap_u4(get_native_u4(p)); } - static inline u8 get_Java_u8(address p) { return swap_u8(get_native_u8(p)); } - - static inline void put_Java_u2(address p, u2 x) { put_native_u2(p, swap_u2(x)); } - static inline void put_Java_u4(address p, u4 x) { put_native_u4(p, swap_u4(x)); } - static inline void put_Java_u8(address p, u8 x) { put_native_u8(p, swap_u8(x)); } - + template + static inline T get_Java(const address p) { + T x = get_native(p); + + if (Endian::is_Java_byte_ordering_different()) { + x = swap(x); + } + + return x; + } + + template + static inline void put_Java(address p, T x) { + if (Endian::is_Java_byte_ordering_different()) { + x = swap(x); + } + + put_native(p, x); + } + + static inline u2 get_Java_u2(address p) { return get_Java(p); } + static inline u4 get_Java_u4(address p) { return get_Java(p); } + static inline u8 get_Java_u8(address p) { return get_Java(p); } + + static inline void put_Java_u2(address p, u2 x) { put_Java(p, x); } + static inline void put_Java_u4(address p, u4 x) { put_Java(p, x); } + static inline void put_Java_u8(address p, u8 x) { put_Java(p, x); } // Efficient swapping of byte ordering + template + static T swap(T x) { + switch (sizeof(T)) { + case sizeof(u1): return x; + case sizeof(u2): return swap_u2(x); + case sizeof(u4): return swap_u4(x); + case sizeof(u8): return swap_u8(x); + default: + guarantee(false, "invalid size: " SIZE_FORMAT "\n", sizeof(T)); + return 0; + } + } + static inline u2 swap_u2(u2 x); // compiler-dependent implementation static inline u4 swap_u4(u4 x); // compiler-dependent implementation static inline u8 swap_u8(u8 x);