6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #ifndef OS_CPU_LINUX_ZERO_VM_OS_LINUX_ZERO_HPP
27 #define OS_CPU_LINUX_ZERO_VM_OS_LINUX_ZERO_HPP
28
29 static void setup_fpu() {}
30
31 static bool is_allocatable(size_t bytes);
32
33 // Used to register dynamic code cache area with the OS
34 // Note: Currently only used in 64 bit Windows implementations
35 static bool register_code_area(char *low, char *high) { return true; }
36
37 // Atomically copy 64 bits of data
38 static void atomic_copy64(volatile void *src, volatile void *dst) {
39 #if defined(PPC) && !defined(_LP64)
40 double tmp;
41 asm volatile ("lfd %0, 0(%1)\n"
42 "stfd %0, 0(%2)\n"
43 : "=f"(tmp)
44 : "b"(src), "b"(dst));
45 #elif defined(S390) && !defined(_LP64)
46 double tmp;
47 asm volatile ("ld %0, 0(%1)\n"
48 "std %0, 0(%2)\n"
49 : "=r"(tmp)
50 : "a"(src), "a"(dst));
51 #else
52 *(jlong *) dst = *(jlong *) src;
53 #endif
54 }
55
56 #endif // OS_CPU_LINUX_ZERO_VM_OS_LINUX_ZERO_HPP
|
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #ifndef OS_CPU_BSD_ZERO_VM_OS_BSD_ZERO_HPP
27 #define OS_CPU_BSD_ZERO_VM_OS_BSD_ZERO_HPP
28
29 static void setup_fpu() {}
30
31 static bool is_allocatable(size_t bytes);
32
33 // Used to register dynamic code cache area with the OS
34 // Note: Currently only used in 64 bit Windows implementations
35 static bool register_code_area(char *low, char *high) { return true; }
36
37 // Atomically copy 64 bits of data
38 static void atomic_copy64(volatile void *src, volatile void *dst) {
39 #if defined(PPC) && !defined(_LP64)
40 double tmp;
41 asm volatile ("lfd %0, 0(%1)\n"
42 "stfd %0, 0(%2)\n"
43 : "=f"(tmp)
44 : "b"(src), "b"(dst));
45 #elif defined(S390) && !defined(_LP64)
46 double tmp;
47 asm volatile ("ld %0, 0(%1)\n"
48 "std %0, 0(%2)\n"
49 : "=r"(tmp)
50 : "a"(src), "a"(dst));
51 #else
52 *(jlong *) dst = *(jlong *) src;
53 #endif
54 }
55
56 #endif // OS_CPU_BSD_ZERO_VM_OS_BSD_ZERO_HPP
|