13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #ifndef OS_CPU_LINUX_ZERO_OS_LINUX_ZERO_HPP
27 #define OS_CPU_LINUX_ZERO_OS_LINUX_ZERO_HPP
28
29 static void setup_fpu() {}
30
31 static bool is_allocatable(size_t bytes);
32
33 // Used to register dynamic code cache area with the OS
34 // Note: Currently only used in 64 bit Windows implementations
35 static bool register_code_area(char *low, char *high) { return true; }
36
37 // Atomically copy 64 bits of data
38 static void atomic_copy64(const volatile void *src, volatile void *dst) {
39 #if defined(PPC32) && !defined(__SPE__)
40 double tmp;
41 asm volatile ("lfd %0, %2\n"
42 "stfd %0, %1\n"
43 : "=&f"(tmp), "=Q"(*(volatile double*)dst)
44 : "Q"(*(volatile double*)src));
45 #elif defined(PPC32) && defined(__SPE__)
46 long tmp;
47 asm volatile ("evldd %0, %2\n"
48 "evstdd %0, %1\n"
49 : "=&r"(tmp), "=Q"(*(volatile long*)dst)
50 : "Q"(*(volatile long*)src));
51 #elif defined(S390) && !defined(_LP64)
52 double tmp;
53 asm volatile ("ld %0, %2\n"
54 "std %0, %1\n"
55 : "=&f"(tmp), "=Q"(*(volatile double*)dst)
56 : "Q"(*(volatile double*)src));
|
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #ifndef OS_CPU_LINUX_ZERO_OS_LINUX_ZERO_HPP
27 #define OS_CPU_LINUX_ZERO_OS_LINUX_ZERO_HPP
28
29 static void setup_fpu() {}
30
31 static bool is_allocatable(size_t bytes);
32
33 // Atomically copy 64 bits of data
34 static void atomic_copy64(const volatile void *src, volatile void *dst) {
35 #if defined(PPC32) && !defined(__SPE__)
36 double tmp;
37 asm volatile ("lfd %0, %2\n"
38 "stfd %0, %1\n"
39 : "=&f"(tmp), "=Q"(*(volatile double*)dst)
40 : "Q"(*(volatile double*)src));
41 #elif defined(PPC32) && defined(__SPE__)
42 long tmp;
43 asm volatile ("evldd %0, %2\n"
44 "evstdd %0, %1\n"
45 : "=&r"(tmp), "=Q"(*(volatile long*)dst)
46 : "Q"(*(volatile long*)src));
47 #elif defined(S390) && !defined(_LP64)
48 double tmp;
49 asm volatile ("ld %0, %2\n"
50 "std %0, %1\n"
51 : "=&f"(tmp), "=Q"(*(volatile double*)dst)
52 : "Q"(*(volatile double*)src));
|