/* * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. * */ #ifndef OS_BSD_VM_OS_BSD_HPP #define OS_BSD_VM_OS_BSD_HPP // Bsd_OS defines the interface to Bsd operating systems // Information about the protection of the page at address '0' on this os. static bool zero_page_read_protected() { return true; } class Bsd { friend class os; // For signal-chaining static bool libjsig_is_loaded; // libjsig that interposes sigaction(), // __sigaction(), signal() is loaded static struct sigaction *(*get_signal_action)(int); static struct sigaction *get_preinstalled_handler(int); static void save_preinstalled_handler(int, struct sigaction&); static void check_signal_handler(int sig); #ifdef __APPLE__ // mach_absolute_time static mach_timebase_info_data_t _timebase_info; static volatile uint64_t _max_abstime; #else static int (*_clock_gettime)(clockid_t, struct timespec *); #endif static GrowableArray* _cpu_to_node; protected: static julong _physical_memory; static pthread_t _main_thread; static int _page_size; static julong available_memory(); static julong physical_memory() { return _physical_memory; } static void initialize_system_info(); static void rebuild_cpu_to_node_map(); static GrowableArray* cpu_to_node() { return _cpu_to_node; } static bool hugetlbfs_sanity_check(bool warn, size_t page_size); public: static void init_thread_fpu_state(); static pthread_t main_thread(void) { return _main_thread; } static void hotspot_sigmask(Thread* thread); static pid_t gettid(); static int page_size(void) { return _page_size; } static void set_page_size(int val) { _page_size = val; } static address ucontext_get_pc(const ucontext_t* uc); static void ucontext_set_pc(ucontext_t* uc, address pc); static intptr_t* ucontext_get_sp(const ucontext_t* uc); static intptr_t* ucontext_get_fp(const ucontext_t* uc); // For Analyzer Forte AsyncGetCallTrace profiling support: // // This interface should be declared in os_bsd_i486.hpp, but // that file provides extensions to the os class and not the // Bsd class. static ExtendedPC fetch_frame_from_ucontext(Thread* thread, const ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp); static bool get_frame_at_stack_banging_point(JavaThread* thread, ucontext_t* uc, frame* fr); // This boolean allows users to forward their own non-matching signals // to JVM_handle_bsd_signal, harmlessly. static bool signal_handlers_are_installed; static int get_our_sigflags(int); static void set_our_sigflags(int, int); static void signal_sets_init(); static void install_signal_handlers(); static void set_signal_handler(int, bool); static sigset_t* unblocked_signals(); static sigset_t* vm_signals(); // For signal-chaining static struct sigaction *get_chained_signal_action(int sig); static bool chained_handler(int sig, siginfo_t* siginfo, void* context); // Real-time clock functions static void clock_init(void); // Stack repair handling // none present private: typedef int (*sched_getcpu_func_t)(void); typedef int (*numa_node_to_cpus_func_t)(int node, unsigned long *buffer, int bufferlen); typedef int (*numa_max_node_func_t)(void); typedef int (*numa_available_func_t)(void); typedef int (*numa_tonode_memory_func_t)(void *start, size_t size, int node); typedef void (*numa_interleave_memory_func_t)(void *start, size_t size, unsigned long *nodemask); static sched_getcpu_func_t _sched_getcpu; static numa_node_to_cpus_func_t _numa_node_to_cpus; static numa_max_node_func_t _numa_max_node; static numa_available_func_t _numa_available; static numa_tonode_memory_func_t _numa_tonode_memory; static numa_interleave_memory_func_t _numa_interleave_memory; static unsigned long* _numa_all_nodes; static void set_sched_getcpu(sched_getcpu_func_t func) { _sched_getcpu = func; } static void set_numa_node_to_cpus(numa_node_to_cpus_func_t func) { _numa_node_to_cpus = func; } static void set_numa_max_node(numa_max_node_func_t func) { _numa_max_node = func; } static void set_numa_available(numa_available_func_t func) { _numa_available = func; } static void set_numa_tonode_memory(numa_tonode_memory_func_t func) { _numa_tonode_memory = func; } static void set_numa_interleave_memory(numa_interleave_memory_func_t func) { _numa_interleave_memory = func; } static void set_numa_all_nodes(unsigned long* ptr) { _numa_all_nodes = ptr; } public: static int sched_getcpu() { return _sched_getcpu != NULL ? _sched_getcpu() : -1; } static int numa_node_to_cpus(int node, unsigned long *buffer, int bufferlen) { return _numa_node_to_cpus != NULL ? _numa_node_to_cpus(node, buffer, bufferlen) : -1; } static int numa_max_node() { return _numa_max_node != NULL ? _numa_max_node() : -1; } static int numa_available() { return _numa_available != NULL ? _numa_available() : -1; } static int numa_tonode_memory(void *start, size_t size, int node) { return _numa_tonode_memory != NULL ? _numa_tonode_memory(start, size, node) : -1; } static void numa_interleave_memory(void *start, size_t size) { if (_numa_interleave_memory != NULL && _numa_all_nodes != NULL) { _numa_interleave_memory(start, size, _numa_all_nodes); } } static int get_node_by_cpu(int cpu_id); }; #endif // OS_BSD_VM_OS_BSD_HPP