1 /*
 2  * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
 3  * Copyright 2007, 2008, 2010, 2018, Red Hat, Inc.
 4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 5  *
 6  * This code is free software; you can redistribute it and/or modify it
 7  * under the terms of the GNU General Public License version 2 only, as
 8  * published by the Free Software Foundation.
 9  *
10  * This code is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13  * version 2 for more details (a copy is included in the LICENSE file that
14  * accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License version
17  * 2 along with this work; if not, write to the Free Software Foundation,
18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19  *
20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21  * or visit www.oracle.com if you need additional information or have any
22  * questions.
23  *
24  */
25 
26 #ifndef OS_CPU_LINUX_ZERO_OS_LINUX_ZERO_HPP
27 #define OS_CPU_LINUX_ZERO_OS_LINUX_ZERO_HPP
28 
29   static void setup_fpu() {}
30 
31   static bool is_allocatable(size_t bytes);
32 
33   // Atomically copy 64 bits of data
34   static void atomic_copy64(const volatile void *src, volatile void *dst) {
35 #if defined(PPC32) && !defined(__SPE__)
36     double tmp;
37     asm volatile ("lfd  %0, %2\n"
38                   "stfd %0, %1\n"
39                   : "=&f"(tmp), "=Q"(*(volatile double*)dst)
40                   : "Q"(*(volatile double*)src));
41 #elif defined(PPC32) && defined(__SPE__)
42     long tmp;
43     asm volatile ("evldd  %0, %2\n"
44                   "evstdd %0, %1\n"
45                   : "=&r"(tmp), "=Q"(*(volatile long*)dst)
46                   : "Q"(*(volatile long*)src));
47 #elif defined(S390) && !defined(_LP64)
48     double tmp;
49     asm volatile ("ld  %0, %2\n"
50                   "std %0, %1\n"
51                   : "=&f"(tmp), "=Q"(*(volatile double*)dst)
52                   : "Q"(*(volatile double*)src));
53 #elif defined(__ARM_ARCH_7A__)
54     // Note that a ldrexd + clrex combination is only needed for
55     // correctness on the OS level (context-switches). In this
56     // case, clrex *may* be beneficial for performance. For now
57     // don't bother with clrex as this is Zero.
58     jlong tmp;
59     asm volatile ("ldrexd  %0, [%1]\n"
60                   : "=r"(tmp)
61                   : "r"(src), "m"(src));
62     *(jlong *) dst = tmp;
63 #else
64     *(jlong *) dst = *(const jlong *) src;
65 #endif
66   }
67 
68 #endif // OS_CPU_LINUX_ZERO_OS_LINUX_ZERO_HPP