97 // that file provides extensions to the os class and not the
98 // Bsd class.
99 static ExtendedPC fetch_frame_from_ucontext(Thread* thread, const ucontext_t* uc,
100 intptr_t** ret_sp, intptr_t** ret_fp);
101
102 static bool get_frame_at_stack_banging_point(JavaThread* thread, ucontext_t* uc, frame* fr);
103
104 // This boolean allows users to forward their own non-matching signals
105 // to JVM_handle_bsd_signal, harmlessly.
106 static bool signal_handlers_are_installed;
107
108 static int get_our_sigflags(int);
109 static void set_our_sigflags(int, int);
110 static void signal_sets_init();
111 static void install_signal_handlers();
112 static void set_signal_handler(int, bool);
113 static bool is_sig_ignored(int sig);
114
115 static sigset_t* unblocked_signals();
116 static sigset_t* vm_signals();
117 static sigset_t* allowdebug_blocked_signals();
118
119 // For signal-chaining
120 static struct sigaction *get_chained_signal_action(int sig);
121 static bool chained_handler(int sig, siginfo_t* siginfo, void* context);
122
123 // Real-time clock functions
124 static void clock_init(void);
125
126 // Stack repair handling
127
128 // none present
129
130 private:
131 typedef int (*sched_getcpu_func_t)(void);
132 typedef int (*numa_node_to_cpus_func_t)(int node, unsigned long *buffer, int bufferlen);
133 typedef int (*numa_max_node_func_t)(void);
134 typedef int (*numa_available_func_t)(void);
135 typedef int (*numa_tonode_memory_func_t)(void *start, size_t size, int node);
136 typedef void (*numa_interleave_memory_func_t)(void *start, size_t size, unsigned long *nodemask);
137
149 static void set_numa_available(numa_available_func_t func) { _numa_available = func; }
150 static void set_numa_tonode_memory(numa_tonode_memory_func_t func) { _numa_tonode_memory = func; }
151 static void set_numa_interleave_memory(numa_interleave_memory_func_t func) { _numa_interleave_memory = func; }
152 static void set_numa_all_nodes(unsigned long* ptr) { _numa_all_nodes = ptr; }
153 public:
154 static int sched_getcpu() { return _sched_getcpu != NULL ? _sched_getcpu() : -1; }
155 static int numa_node_to_cpus(int node, unsigned long *buffer, int bufferlen) {
156 return _numa_node_to_cpus != NULL ? _numa_node_to_cpus(node, buffer, bufferlen) : -1;
157 }
158 static int numa_max_node() { return _numa_max_node != NULL ? _numa_max_node() : -1; }
159 static int numa_available() { return _numa_available != NULL ? _numa_available() : -1; }
160 static int numa_tonode_memory(void *start, size_t size, int node) {
161 return _numa_tonode_memory != NULL ? _numa_tonode_memory(start, size, node) : -1;
162 }
163 static void numa_interleave_memory(void *start, size_t size) {
164 if (_numa_interleave_memory != NULL && _numa_all_nodes != NULL) {
165 _numa_interleave_memory(start, size, _numa_all_nodes);
166 }
167 }
168 static int get_node_by_cpu(int cpu_id);
169 };
170
171
172 class PlatformEvent : public CHeapObj<mtInternal> {
173 private:
174 double CachePad[4]; // increase odds that _mutex is sole occupant of cache line
175 volatile int _Event;
176 volatile int _nParked;
177 pthread_mutex_t _mutex[1];
178 pthread_cond_t _cond[1];
179 double PostPad[2];
180 Thread * _Assoc;
181
182 public: // TODO-FIXME: make dtor private
183 ~PlatformEvent() { guarantee(0, "invariant"); }
184
185 public:
186 PlatformEvent() {
187 int status;
188 status = pthread_cond_init(_cond, NULL);
189 assert_status(status == 0, status, "cond_init");
190 status = pthread_mutex_init(_mutex, NULL);
191 assert_status(status == 0, status, "mutex_init");
192 _Event = 0;
193 _nParked = 0;
194 _Assoc = NULL;
195 }
196
197 // Use caution with reset() and fired() -- they may require MEMBARs
198 void reset() { _Event = 0; }
199 int fired() { return _Event; }
200 void park();
201 void unpark();
202 int park(jlong millis);
203 void SetAssociation(Thread * a) { _Assoc = a; }
204 };
205
206 class PlatformParker : public CHeapObj<mtInternal> {
207 protected:
208 pthread_mutex_t _mutex[1];
209 pthread_cond_t _cond[1];
210
211 public: // TODO-FIXME: make dtor private
212 ~PlatformParker() { guarantee(0, "invariant"); }
213
214 public:
215 PlatformParker() {
216 int status;
217 status = pthread_cond_init(_cond, NULL);
218 assert_status(status == 0, status, "cond_init");
219 status = pthread_mutex_init(_mutex, NULL);
220 assert_status(status == 0, status, "mutex_init");
221 }
222 };
223
224 #endif // OS_BSD_VM_OS_BSD_HPP
|
97 // that file provides extensions to the os class and not the
98 // Bsd class.
99 static ExtendedPC fetch_frame_from_ucontext(Thread* thread, const ucontext_t* uc,
100 intptr_t** ret_sp, intptr_t** ret_fp);
101
102 static bool get_frame_at_stack_banging_point(JavaThread* thread, ucontext_t* uc, frame* fr);
103
104 // This boolean allows users to forward their own non-matching signals
105 // to JVM_handle_bsd_signal, harmlessly.
106 static bool signal_handlers_are_installed;
107
108 static int get_our_sigflags(int);
109 static void set_our_sigflags(int, int);
110 static void signal_sets_init();
111 static void install_signal_handlers();
112 static void set_signal_handler(int, bool);
113 static bool is_sig_ignored(int sig);
114
115 static sigset_t* unblocked_signals();
116 static sigset_t* vm_signals();
117
118 // For signal-chaining
119 static struct sigaction *get_chained_signal_action(int sig);
120 static bool chained_handler(int sig, siginfo_t* siginfo, void* context);
121
122 // Real-time clock functions
123 static void clock_init(void);
124
125 // Stack repair handling
126
127 // none present
128
129 private:
130 typedef int (*sched_getcpu_func_t)(void);
131 typedef int (*numa_node_to_cpus_func_t)(int node, unsigned long *buffer, int bufferlen);
132 typedef int (*numa_max_node_func_t)(void);
133 typedef int (*numa_available_func_t)(void);
134 typedef int (*numa_tonode_memory_func_t)(void *start, size_t size, int node);
135 typedef void (*numa_interleave_memory_func_t)(void *start, size_t size, unsigned long *nodemask);
136
148 static void set_numa_available(numa_available_func_t func) { _numa_available = func; }
149 static void set_numa_tonode_memory(numa_tonode_memory_func_t func) { _numa_tonode_memory = func; }
150 static void set_numa_interleave_memory(numa_interleave_memory_func_t func) { _numa_interleave_memory = func; }
151 static void set_numa_all_nodes(unsigned long* ptr) { _numa_all_nodes = ptr; }
152 public:
153 static int sched_getcpu() { return _sched_getcpu != NULL ? _sched_getcpu() : -1; }
154 static int numa_node_to_cpus(int node, unsigned long *buffer, int bufferlen) {
155 return _numa_node_to_cpus != NULL ? _numa_node_to_cpus(node, buffer, bufferlen) : -1;
156 }
157 static int numa_max_node() { return _numa_max_node != NULL ? _numa_max_node() : -1; }
158 static int numa_available() { return _numa_available != NULL ? _numa_available() : -1; }
159 static int numa_tonode_memory(void *start, size_t size, int node) {
160 return _numa_tonode_memory != NULL ? _numa_tonode_memory(start, size, node) : -1;
161 }
162 static void numa_interleave_memory(void *start, size_t size) {
163 if (_numa_interleave_memory != NULL && _numa_all_nodes != NULL) {
164 _numa_interleave_memory(start, size, _numa_all_nodes);
165 }
166 }
167 static int get_node_by_cpu(int cpu_id);
168 };
169
170 #endif // OS_BSD_VM_OS_BSD_HPP
|