1 /*
   2  * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef OS_CPU_BSD_X86_BYTES_BSD_X86_INLINE_HPP
  26 #define OS_CPU_BSD_X86_BYTES_BSD_X86_INLINE_HPP
  27 
  28 #ifdef __APPLE__
  29 #include <libkern/OSByteOrder.h>
  30 #endif
  31 
  32 #if defined(AMD64)
  33 #  if defined(__APPLE__)
  34 #    define bswap_16(x) OSSwapInt16(x)
  35 #    define bswap_32(x) OSSwapInt32(x)
  36 #    define bswap_64(x) OSSwapInt64(x)
  37 #  elif defined(__OpenBSD__)
  38 #    define bswap_16(x) swap16(x)
  39 #    define bswap_32(x) swap32(x)
  40 #    define bswap_64(x) swap64(x)
  41 #  elif defined(__NetBSD__)
  42 #    define bswap_16(x) bswap16(x)
  43 #    define bswap_32(x) bswap32(x)
  44 #    define bswap_64(x) bswap64(x)
  45 #  else
  46 #    define bswap_16(x) __bswap16(x)
  47 #    define bswap_32(x) __bswap32(x)
  48 #    define bswap_64(x) __bswap64(x)
  49 #  endif
  50 #endif
  51 
  52 // Efficient swapping of data bytes from Java byte
  53 // ordering to native byte ordering and vice versa.
  54 inline u2   Bytes::swap_u2(u2 x) {
  55 #ifdef AMD64
  56   return bswap_16(x);
  57 #else
  58   u2 ret;
  59   __asm__ __volatile__ (
  60     "movw %0, %%ax;"
  61     "xchg %%al, %%ah;"
  62     "movw %%ax, %0"
  63     :"=r" (ret)      // output : register 0 => ret
  64     :"0"  (x)        // input  : x => register 0
  65     :"ax", "0"       // clobbered registers
  66   );
  67   return ret;
  68 #endif // AMD64
  69 }
  70 
  71 inline u4   Bytes::swap_u4(u4 x) {
  72 #ifdef AMD64
  73   return bswap_32(x);
  74 #else
  75   u4 ret;
  76   __asm__ __volatile__ (
  77     "bswap %0"
  78     :"=r" (ret)      // output : register 0 => ret
  79     :"0"  (x)        // input  : x => register 0
  80     :"0"             // clobbered register
  81   );
  82   return ret;
  83 #endif // AMD64
  84 }
  85 
  86 #ifdef AMD64
  87 inline u8 Bytes::swap_u8(u8 x) {
  88 #ifdef SPARC_WORKS
  89   // workaround for SunStudio12 CR6615391
  90   __asm__ __volatile__ (
  91     "bswapq %0"
  92     :"=r" (x)        // output : register 0 => x
  93     :"0"  (x)        // input  : x => register 0
  94     :"0"             // clobbered register
  95   );
  96   return x;
  97 #else
  98   return bswap_64(x);
  99 #endif
 100 }
 101 #else
 102 // Helper function for swap_u8
 103 inline u8   Bytes::swap_u8_base(u4 x, u4 y) {
 104   return (((u8)swap_u4(x))<<32) | swap_u4(y);
 105 }
 106 
 107 inline u8 Bytes::swap_u8(u8 x) {
 108   return swap_u8_base(*(u4*)&x, *(((u4*)&x)+1));
 109 }
 110 #endif // !AMD64
 111 
 112 #endif // OS_CPU_BSD_X86_BYTES_BSD_X86_INLINE_HPP