8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_RUNTIME_THREAD_INLINE_HPP
26 #define SHARE_VM_RUNTIME_THREAD_INLINE_HPP
27
28 #define SHARE_VM_RUNTIME_THREAD_INLINE_HPP_SCOPE
29
30 #include "runtime/atomic.hpp"
31 #include "runtime/os.inline.hpp"
32 #include "runtime/thread.hpp"
33
34 #undef SHARE_VM_RUNTIME_THREAD_INLINE_HPP_SCOPE
35
36 inline void Thread::set_suspend_flag(SuspendFlags f) {
37 assert(sizeof(jint) == sizeof(_suspend_flags), "size mismatch");
38 uint32_t flags;
39 do {
40 flags = _suspend_flags;
41 }
42 while (Atomic::cmpxchg((jint)(flags | f),
43 (volatile jint*)&_suspend_flags,
44 (jint)flags) != (jint)flags);
45 }
46 inline void Thread::clear_suspend_flag(SuspendFlags f) {
47 assert(sizeof(jint) == sizeof(_suspend_flags), "size mismatch");
48 uint32_t flags;
49 do {
50 flags = _suspend_flags;
51 }
52 while (Atomic::cmpxchg((jint)(flags & ~f),
53 (volatile jint*)&_suspend_flags,
54 (jint)flags) != (jint)flags);
55 }
72 inline void Thread::clear_trace_flag() {
73 clear_suspend_flag(_trace_flag);
74 }
75
76 inline jlong Thread::cooked_allocated_bytes() {
77 jlong allocated_bytes = OrderAccess::load_acquire(&_allocated_bytes);
78 if (UseTLAB) {
79 size_t used_bytes = tlab().used_bytes();
80 if (used_bytes <= ThreadLocalAllocBuffer::max_size_in_bytes()) {
81 // Comparing used_bytes with the maximum allowed size will ensure
82 // that we don't add the used bytes from a semi-initialized TLAB
83 // ending up with incorrect values. There is still a race between
84 // incrementing _allocated_bytes and clearing the TLAB, that might
85 // cause double counting in rare cases.
86 return allocated_bytes + used_bytes;
87 }
88 }
89 return allocated_bytes;
90 }
91
92 inline void JavaThread::set_ext_suspended() {
93 set_suspend_flag (_ext_suspended);
94 }
95 inline void JavaThread::clear_ext_suspended() {
96 clear_suspend_flag(_ext_suspended);
97 }
98
99 inline void JavaThread::set_external_suspend() {
100 set_suspend_flag(_external_suspend);
101 }
102 inline void JavaThread::clear_external_suspend() {
103 clear_suspend_flag(_external_suspend);
104 }
105
106 inline void JavaThread::set_deopt_suspend() {
107 set_suspend_flag(_deopt_suspend);
108 }
109 inline void JavaThread::clear_deopt_suspend() {
110 clear_suspend_flag(_deopt_suspend);
111 }
158 #ifdef ASSERT
159 if (os::uses_stack_guard_pages()) {
160 assert(_stack_guard_state != stack_guard_unused, "guard pages must be in use");
161 }
162 #endif
163 return _stack_guard_state == stack_guard_enabled;
164 }
165
166 // The release make sure this store is done after storing the handshake
167 // operation or global state
168 inline void JavaThread::set_polling_page(void* poll_value) {
169 OrderAccess::release_store(polling_page_addr(), poll_value);
170 }
171
172 // The aqcquire make sure reading of polling page is done before
173 // the reading the handshake operation or the global state
174 inline volatile void* JavaThread::get_polling_page() {
175 return OrderAccess::load_acquire(polling_page_addr());
176 }
177
178 #endif // SHARE_VM_RUNTIME_THREAD_INLINE_HPP
|
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_RUNTIME_THREAD_INLINE_HPP
26 #define SHARE_VM_RUNTIME_THREAD_INLINE_HPP
27
28 #include "runtime/atomic.hpp"
29 #include "runtime/os.inline.hpp"
30 #include "runtime/thread.hpp"
31 #include "runtime/threadSMR.hpp"
32
33 inline void Thread::set_suspend_flag(SuspendFlags f) {
34 assert(sizeof(jint) == sizeof(_suspend_flags), "size mismatch");
35 uint32_t flags;
36 do {
37 flags = _suspend_flags;
38 }
39 while (Atomic::cmpxchg((jint)(flags | f),
40 (volatile jint*)&_suspend_flags,
41 (jint)flags) != (jint)flags);
42 }
43 inline void Thread::clear_suspend_flag(SuspendFlags f) {
44 assert(sizeof(jint) == sizeof(_suspend_flags), "size mismatch");
45 uint32_t flags;
46 do {
47 flags = _suspend_flags;
48 }
49 while (Atomic::cmpxchg((jint)(flags & ~f),
50 (volatile jint*)&_suspend_flags,
51 (jint)flags) != (jint)flags);
52 }
69 inline void Thread::clear_trace_flag() {
70 clear_suspend_flag(_trace_flag);
71 }
72
73 inline jlong Thread::cooked_allocated_bytes() {
74 jlong allocated_bytes = OrderAccess::load_acquire(&_allocated_bytes);
75 if (UseTLAB) {
76 size_t used_bytes = tlab().used_bytes();
77 if (used_bytes <= ThreadLocalAllocBuffer::max_size_in_bytes()) {
78 // Comparing used_bytes with the maximum allowed size will ensure
79 // that we don't add the used bytes from a semi-initialized TLAB
80 // ending up with incorrect values. There is still a race between
81 // incrementing _allocated_bytes and clearing the TLAB, that might
82 // cause double counting in rare cases.
83 return allocated_bytes + used_bytes;
84 }
85 }
86 return allocated_bytes;
87 }
88
89 inline ThreadsList* Thread::cmpxchg_threads_hazard_ptr(ThreadsList* exchange_value, ThreadsList* compare_value) {
90 return (ThreadsList*)Atomic::cmpxchg(exchange_value, &_threads_hazard_ptr, compare_value);
91 }
92
93 inline ThreadsList* Thread::get_threads_hazard_ptr() {
94 return (ThreadsList*)OrderAccess::load_acquire(&_threads_hazard_ptr);
95 }
96
97 inline void Thread::set_threads_hazard_ptr(ThreadsList* new_list) {
98 OrderAccess::release_store_fence(&_threads_hazard_ptr, new_list);
99 }
100
101 inline void JavaThread::set_ext_suspended() {
102 set_suspend_flag (_ext_suspended);
103 }
104 inline void JavaThread::clear_ext_suspended() {
105 clear_suspend_flag(_ext_suspended);
106 }
107
108 inline void JavaThread::set_external_suspend() {
109 set_suspend_flag(_external_suspend);
110 }
111 inline void JavaThread::clear_external_suspend() {
112 clear_suspend_flag(_external_suspend);
113 }
114
115 inline void JavaThread::set_deopt_suspend() {
116 set_suspend_flag(_deopt_suspend);
117 }
118 inline void JavaThread::clear_deopt_suspend() {
119 clear_suspend_flag(_deopt_suspend);
120 }
167 #ifdef ASSERT
168 if (os::uses_stack_guard_pages()) {
169 assert(_stack_guard_state != stack_guard_unused, "guard pages must be in use");
170 }
171 #endif
172 return _stack_guard_state == stack_guard_enabled;
173 }
174
175 // The release make sure this store is done after storing the handshake
176 // operation or global state
177 inline void JavaThread::set_polling_page(void* poll_value) {
178 OrderAccess::release_store(polling_page_addr(), poll_value);
179 }
180
181 // The aqcquire make sure reading of polling page is done before
182 // the reading the handshake operation or the global state
183 inline volatile void* JavaThread::get_polling_page() {
184 return OrderAccess::load_acquire(polling_page_addr());
185 }
186
187 inline bool JavaThread::is_exiting() const {
188 // Use load-acquire so that setting of _terminated by
189 // JavaThread::exit() is seen more quickly.
190 TerminatedTypes l_terminated = (TerminatedTypes)
191 OrderAccess::load_acquire((volatile jint *) &_terminated);
192 return l_terminated == _thread_exiting || check_is_terminated(l_terminated);
193 }
194
195 inline bool JavaThread::is_terminated() {
196 // Use load-acquire so that setting of _terminated by
197 // JavaThread::exit() is seen more quickly.
198 TerminatedTypes l_terminated = (TerminatedTypes)
199 OrderAccess::load_acquire((volatile jint *) &_terminated);
200 return check_is_terminated(_terminated);
201 }
202
203 inline void JavaThread::set_terminated(TerminatedTypes t) {
204 // use release-store so the setting of _terminated is seen more quickly
205 OrderAccess::release_store((volatile jint *) &_terminated, (jint) t);
206 }
207
208 // special for Threads::remove() which is static:
209 inline void JavaThread::set_terminated_value() {
210 // use release-store so the setting of _terminated is seen more quickly
211 OrderAccess::release_store((volatile jint *) &_terminated, (jint) _thread_terminated);
212 }
213
214 template <class T>
215 inline void Threads::threads_do_smr(T *tc, Thread *self) {
216 ThreadsListHandle handle(self);
217 handle.threads_do(tc);
218 }
219
220 inline ThreadsList* Threads::get_smr_java_thread_list() {
221 return (ThreadsList*)OrderAccess::load_acquire(&_smr_java_thread_list);
222 }
223
224 inline ThreadsList* Threads::xchg_smr_java_thread_list(ThreadsList* new_list) {
225 return (ThreadsList*)Atomic::xchg(new_list, &_smr_java_thread_list);
226 }
227
228 inline void Threads::inc_smr_deleted_thread_cnt() {
229 Atomic::inc(&_smr_deleted_thread_cnt);
230 }
231
232 inline void Threads::update_smr_deleted_thread_time_max(jint new_value) {
233 while (true) {
234 jint cur_value = _smr_deleted_thread_time_max;
235 if (new_value <= cur_value) {
236 // No need to update max value so we're done.
237 break;
238 }
239 if (Atomic::cmpxchg(new_value, &_smr_deleted_thread_time_max, cur_value) == cur_value) {
240 // Updated max value so we're done. Otherwise try it all again.
241 break;
242 }
243 }
244 }
245
246 inline void Threads::add_smr_deleted_thread_times(jint add_value) {
247 Atomic::add(add_value, &_smr_deleted_thread_times);
248 }
249
250 inline void Threads::inc_smr_tlh_cnt() {
251 Atomic::inc(&_smr_tlh_cnt);
252 }
253
254 inline void Threads::update_smr_tlh_time_max(jint new_value) {
255 while (true) {
256 jint cur_value = _smr_tlh_time_max;
257 if (new_value <= cur_value) {
258 // No need to update max value so we're done.
259 break;
260 }
261 if (Atomic::cmpxchg(new_value, &_smr_tlh_time_max, cur_value) == cur_value) {
262 // Updated max value so we're done. Otherwise try it all again.
263 break;
264 }
265 }
266 }
267
268 inline void Threads::add_smr_tlh_times(jint add_value) {
269 Atomic::add(add_value, &_smr_tlh_times);
270 }
271
272 #endif // SHARE_VM_RUNTIME_THREAD_INLINE_HPP
|