< prev index next >

src/cpu/x86/vm/bytes_x86.hpp

Print this page




  26 #define CPU_X86_VM_BYTES_X86_HPP
  27 
  28 #include "memory/allocation.hpp"
  29 #include "utilities/macros.hpp"
  30 
  31 class Bytes: AllStatic {
  32  private:
  33 #ifndef AMD64
  34   // Helper function for swap_u8
  35   static inline u8   swap_u8_base(u4 x, u4 y);        // compiler-dependent implementation
  36 #endif // AMD64
  37 
  38  public:
  39   // Efficient reading and writing of unaligned unsigned data in platform-specific byte ordering
  40   template <typename T>
  41   static inline T get_native(const void* p) {
  42     assert(p != NULL, "null pointer");
  43 
  44     T x;
  45 
  46     if (is_ptr_aligned(p, sizeof(T))) {
  47       x = *(T*)p;
  48     } else {
  49       memcpy(&x, p, sizeof(T));
  50     }
  51 
  52     return x;
  53   }
  54 
  55   template <typename T>
  56   static inline void put_native(void* p, T x) {
  57     assert(p != NULL, "null pointer");
  58 
  59     if (is_ptr_aligned(p, sizeof(T))) {
  60       *(T*)p = x;
  61     } else {
  62       memcpy(p, &x, sizeof(T));
  63     }
  64   }
  65 
  66   static inline u2   get_native_u2(address p)         { return get_native<u2>((void*)p); }
  67   static inline u4   get_native_u4(address p)         { return get_native<u4>((void*)p); }
  68   static inline u8   get_native_u8(address p)         { return get_native<u8>((void*)p); }
  69   static inline void put_native_u2(address p, u2 x)   { put_native<u2>((void*)p, x); }
  70   static inline void put_native_u4(address p, u4 x)   { put_native<u4>((void*)p, x); }
  71   static inline void put_native_u8(address p, u8 x)   { put_native<u8>((void*)p, x); }
  72 
  73   // Efficient reading and writing of unaligned unsigned data in Java
  74   // byte ordering (i.e. big-endian ordering). Byte-order reversal is
  75   // needed since x86 CPUs use little-endian format.
  76   template <typename T>
  77   static inline T get_Java(const address p) {
  78     T x = get_native<T>(p);
  79 




  26 #define CPU_X86_VM_BYTES_X86_HPP
  27 
  28 #include "memory/allocation.hpp"
  29 #include "utilities/macros.hpp"
  30 
  31 class Bytes: AllStatic {
  32  private:
  33 #ifndef AMD64
  34   // Helper function for swap_u8
  35   static inline u8   swap_u8_base(u4 x, u4 y);        // compiler-dependent implementation
  36 #endif // AMD64
  37 
  38  public:
  39   // Efficient reading and writing of unaligned unsigned data in platform-specific byte ordering
  40   template <typename T>
  41   static inline T get_native(const void* p) {
  42     assert(p != NULL, "null pointer");
  43 
  44     T x;
  45 
  46     if (is_aligned(p, sizeof(T))) {
  47       x = *(T*)p;
  48     } else {
  49       memcpy(&x, p, sizeof(T));
  50     }
  51 
  52     return x;
  53   }
  54 
  55   template <typename T>
  56   static inline void put_native(void* p, T x) {
  57     assert(p != NULL, "null pointer");
  58 
  59     if (is_aligned(p, sizeof(T))) {
  60       *(T*)p = x;
  61     } else {
  62       memcpy(p, &x, sizeof(T));
  63     }
  64   }
  65 
  66   static inline u2   get_native_u2(address p)         { return get_native<u2>((void*)p); }
  67   static inline u4   get_native_u4(address p)         { return get_native<u4>((void*)p); }
  68   static inline u8   get_native_u8(address p)         { return get_native<u8>((void*)p); }
  69   static inline void put_native_u2(address p, u2 x)   { put_native<u2>((void*)p, x); }
  70   static inline void put_native_u4(address p, u4 x)   { put_native<u4>((void*)p, x); }
  71   static inline void put_native_u8(address p, u8 x)   { put_native<u8>((void*)p, x); }
  72 
  73   // Efficient reading and writing of unaligned unsigned data in Java
  74   // byte ordering (i.e. big-endian ordering). Byte-order reversal is
  75   // needed since x86 CPUs use little-endian format.
  76   template <typename T>
  77   static inline T get_Java(const address p) {
  78     T x = get_native<T>(p);
  79 


< prev index next >