src/os_cpu/linux_zero/vm/os_linux_zero.hpp

Print this page
rev 4738 : Clean up PPC defines.

Reorganize PPC defines.  Distinguish PPC, PPC64 and PPC32.
PPC should guard code needed on PPC regardless of word size.
PPC32 and PPC64 should guard code needed in the 64-bit or
the 32-bit port.


  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #ifndef OS_CPU_LINUX_ZERO_VM_OS_LINUX_ZERO_HPP
  27 #define OS_CPU_LINUX_ZERO_VM_OS_LINUX_ZERO_HPP
  28 
  29   static void setup_fpu() {}
  30 
  31   static bool is_allocatable(size_t bytes);
  32 
  33   // Used to register dynamic code cache area with the OS
  34   // Note: Currently only used in 64 bit Windows implementations
  35   static bool register_code_area(char *low, char *high) { return true; }
  36 
  37   // Atomically copy 64 bits of data
  38   static void atomic_copy64(volatile void *src, volatile void *dst) {
  39 #if defined(PPC) && !defined(_LP64)
  40     double tmp;
  41     asm volatile ("lfd  %0, 0(%1)\n"
  42                   "stfd %0, 0(%2)\n"
  43                   : "=f"(tmp)
  44                   : "b"(src), "b"(dst));
  45 #elif defined(S390) && !defined(_LP64)
  46     double tmp;
  47     asm volatile ("ld  %0, 0(%1)\n"
  48                   "std %0, 0(%2)\n"
  49                   : "=r"(tmp)
  50                   : "a"(src), "a"(dst));
  51 #else
  52     *(jlong *) dst = *(jlong *) src;
  53 #endif
  54   }
  55 
  56 #endif // OS_CPU_LINUX_ZERO_VM_OS_LINUX_ZERO_HPP


  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #ifndef OS_CPU_LINUX_ZERO_VM_OS_LINUX_ZERO_HPP
  27 #define OS_CPU_LINUX_ZERO_VM_OS_LINUX_ZERO_HPP
  28 
  29   static void setup_fpu() {}
  30 
  31   static bool is_allocatable(size_t bytes);
  32 
  33   // Used to register dynamic code cache area with the OS
  34   // Note: Currently only used in 64 bit Windows implementations
  35   static bool register_code_area(char *low, char *high) { return true; }
  36 
  37   // Atomically copy 64 bits of data
  38   static void atomic_copy64(volatile void *src, volatile void *dst) {
  39 #if defined(PPC32)
  40     double tmp;
  41     asm volatile ("lfd  %0, 0(%1)\n"
  42                   "stfd %0, 0(%2)\n"
  43                   : "=f"(tmp)
  44                   : "b"(src), "b"(dst));
  45 #elif defined(S390) && !defined(_LP64)
  46     double tmp;
  47     asm volatile ("ld  %0, 0(%1)\n"
  48                   "std %0, 0(%2)\n"
  49                   : "=r"(tmp)
  50                   : "a"(src), "a"(dst));
  51 #else
  52     *(jlong *) dst = *(jlong *) src;
  53 #endif
  54   }
  55 
  56 #endif // OS_CPU_LINUX_ZERO_VM_OS_LINUX_ZERO_HPP