1 /*
   2  * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_RUNTIME_THREAD_INLINE_HPP
  26 #define SHARE_VM_RUNTIME_THREAD_INLINE_HPP
  27 
  28 #include "runtime/atomic.hpp"
  29 #include "runtime/os.inline.hpp"
  30 #include "runtime/thread.hpp"
  31 #include "runtime/threadSMR.hpp"
  32 
  33 inline void Thread::set_suspend_flag(SuspendFlags f) {
  34   assert(sizeof(jint) == sizeof(_suspend_flags), "size mismatch");
  35   uint32_t flags;
  36   do {
  37     flags = _suspend_flags;
  38   }
  39   while (Atomic::cmpxchg((jint)(flags | f),
  40                          (volatile jint*)&_suspend_flags,
  41                          (jint)flags) != (jint)flags);
  42 }
  43 inline void Thread::clear_suspend_flag(SuspendFlags f) {
  44   assert(sizeof(jint) == sizeof(_suspend_flags), "size mismatch");
  45   uint32_t flags;
  46   do {
  47     flags = _suspend_flags;
  48   }
  49   while (Atomic::cmpxchg((jint)(flags & ~f),
  50                          (volatile jint*)&_suspend_flags,
  51                          (jint)flags) != (jint)flags);
  52 }
  53 
  54 inline void Thread::set_has_async_exception() {
  55   set_suspend_flag(_has_async_exception);
  56 }
  57 inline void Thread::clear_has_async_exception() {
  58   clear_suspend_flag(_has_async_exception);
  59 }
  60 inline void Thread::set_critical_native_unlock() {
  61   set_suspend_flag(_critical_native_unlock);
  62 }
  63 inline void Thread::clear_critical_native_unlock() {
  64   clear_suspend_flag(_critical_native_unlock);
  65 }
  66 inline void Thread::set_trace_flag() {
  67   set_suspend_flag(_trace_flag);
  68 }
  69 inline void Thread::clear_trace_flag() {
  70   clear_suspend_flag(_trace_flag);
  71 }
  72 
  73 inline jlong Thread::cooked_allocated_bytes() {
  74   jlong allocated_bytes = OrderAccess::load_acquire(&_allocated_bytes);
  75   if (UseTLAB) {
  76     size_t used_bytes = tlab().used_bytes();
  77     if (used_bytes <= ThreadLocalAllocBuffer::max_size_in_bytes()) {
  78       // Comparing used_bytes with the maximum allowed size will ensure
  79       // that we don't add the used bytes from a semi-initialized TLAB
  80       // ending up with incorrect values. There is still a race between
  81       // incrementing _allocated_bytes and clearing the TLAB, that might
  82       // cause double counting in rare cases.
  83       return allocated_bytes + used_bytes;
  84     }
  85   }
  86   return allocated_bytes;
  87 }
  88 
  89 inline ThreadsList* Thread::cmpxchg_threads_hazard_ptr(ThreadsList* exchange_value, ThreadsList* compare_value) {
  90   return (ThreadsList*)Atomic::cmpxchg(exchange_value, &_threads_hazard_ptr, compare_value);
  91 }
  92 
  93 inline ThreadsList* Thread::get_threads_hazard_ptr() {
  94   return (ThreadsList*)OrderAccess::load_acquire(&_threads_hazard_ptr);
  95 }
  96 
  97 inline void Thread::set_threads_hazard_ptr(ThreadsList* new_list) {
  98   OrderAccess::release_store_fence(&_threads_hazard_ptr, new_list);
  99 }
 100 
 101 inline void JavaThread::set_ext_suspended() {
 102   set_suspend_flag (_ext_suspended);
 103 }
 104 inline void JavaThread::clear_ext_suspended() {
 105   clear_suspend_flag(_ext_suspended);
 106 }
 107 
 108 inline void JavaThread::set_external_suspend() {
 109   set_suspend_flag(_external_suspend);
 110 }
 111 inline void JavaThread::clear_external_suspend() {
 112   clear_suspend_flag(_external_suspend);
 113 }
 114 
 115 inline void JavaThread::set_deopt_suspend() {
 116   set_suspend_flag(_deopt_suspend);
 117 }
 118 inline void JavaThread::clear_deopt_suspend() {
 119   clear_suspend_flag(_deopt_suspend);
 120 }
 121 
 122 inline void JavaThread::set_pending_async_exception(oop e) {
 123   _pending_async_exception = e;
 124   _special_runtime_exit_condition = _async_exception;
 125   set_has_async_exception();
 126 }
 127 
 128 #if defined(PPC64) || defined (AARCH64)
 129 inline JavaThreadState JavaThread::thread_state() const    {
 130   return (JavaThreadState) OrderAccess::load_acquire((volatile jint*)&_thread_state);
 131 }
 132 
 133 inline void JavaThread::set_thread_state(JavaThreadState s) {
 134   OrderAccess::release_store((volatile jint*)&_thread_state, (jint)s);
 135 }
 136 #endif
 137 
 138 inline void JavaThread::set_done_attaching_via_jni() {
 139   _jni_attach_state = _attached_via_jni;
 140   OrderAccess::fence();
 141 }
 142 
 143 inline bool JavaThread::stack_guard_zone_unused() {
 144   return _stack_guard_state == stack_guard_unused;
 145 }
 146 
 147 inline bool JavaThread::stack_yellow_reserved_zone_disabled() {
 148   return _stack_guard_state == stack_guard_yellow_reserved_disabled;
 149 }
 150 
 151 inline bool JavaThread::stack_reserved_zone_disabled() {
 152   return _stack_guard_state == stack_guard_reserved_disabled;
 153 }
 154 
 155 inline size_t JavaThread::stack_available(address cur_sp) {
 156   // This code assumes java stacks grow down
 157   address low_addr; // Limit on the address for deepest stack depth
 158   if (_stack_guard_state == stack_guard_unused) {
 159     low_addr = stack_end();
 160   } else {
 161     low_addr = stack_reserved_zone_base();
 162   }
 163   return cur_sp > low_addr ? cur_sp - low_addr : 0;
 164 }
 165 
 166 inline bool JavaThread::stack_guards_enabled() {
 167 #ifdef ASSERT
 168   if (os::uses_stack_guard_pages()) {
 169     assert(_stack_guard_state != stack_guard_unused, "guard pages must be in use");
 170   }
 171 #endif
 172   return _stack_guard_state == stack_guard_enabled;
 173 }
 174 
 175 // The release make sure this store is done after storing the handshake
 176 // operation or global state
 177 inline void JavaThread::set_polling_page(void* poll_value) {
 178   OrderAccess::release_store(polling_page_addr(), poll_value);
 179 }
 180 
 181 // The aqcquire make sure reading of polling page is done before
 182 // the reading the handshake operation or the global state
 183 inline volatile void* JavaThread::get_polling_page() {
 184   return OrderAccess::load_acquire(polling_page_addr());
 185 }
 186 
 187 inline bool JavaThread::is_exiting() const {
 188   // Use load-acquire so that setting of _terminated by
 189   // JavaThread::exit() is seen more quickly.
 190   TerminatedTypes l_terminated = (TerminatedTypes)
 191       OrderAccess::load_acquire((volatile jint *) &_terminated);
 192   return l_terminated == _thread_exiting || check_is_terminated(l_terminated);
 193 }
 194 
 195 inline bool JavaThread::is_terminated() {
 196   // Use load-acquire so that setting of _terminated by
 197   // JavaThread::exit() is seen more quickly.
 198   TerminatedTypes l_terminated = (TerminatedTypes)
 199       OrderAccess::load_acquire((volatile jint *) &_terminated);
 200   return check_is_terminated(_terminated);
 201 }
 202 
 203 inline void JavaThread::set_terminated(TerminatedTypes t) {
 204   // use release-store so the setting of _terminated is seen more quickly
 205   OrderAccess::release_store((volatile jint *) &_terminated, (jint) t);
 206 }
 207 
 208 // special for Threads::remove() which is static:
 209 inline void JavaThread::set_terminated_value() {
 210   // use release-store so the setting of _terminated is seen more quickly
 211   OrderAccess::release_store((volatile jint *) &_terminated, (jint) _thread_terminated);
 212 }
 213 
 214 template <class T>
 215 inline void Threads::threads_do_smr(T *tc, Thread *self) {
 216   ThreadsListHandle handle(self);
 217   handle.threads_do(tc);
 218 }
 219 
 220 inline ThreadsList* Threads::get_smr_java_thread_list() {
 221   return (ThreadsList*)OrderAccess::load_acquire(&_smr_java_thread_list);
 222 }
 223 
 224 inline ThreadsList* Threads::xchg_smr_java_thread_list(ThreadsList* new_list) {
 225   return (ThreadsList*)Atomic::xchg(new_list, &_smr_java_thread_list);
 226 }
 227 
 228 inline void Threads::inc_smr_deleted_thread_cnt() {
 229   Atomic::inc(&_smr_deleted_thread_cnt);
 230 }
 231 
 232 inline void Threads::update_smr_deleted_thread_time_max(jint new_value) {
 233   while (true) {
 234     jint cur_value = _smr_deleted_thread_time_max;
 235     if (new_value <= cur_value) {
 236       // No need to update max value so we're done.
 237       break;
 238     }
 239     if (Atomic::cmpxchg(new_value, &_smr_deleted_thread_time_max, cur_value) == cur_value) {
 240       // Updated max value so we're done. Otherwise try it all again.
 241       break;
 242     }
 243   }
 244 }
 245 
 246 inline void Threads::add_smr_deleted_thread_times(jint add_value) {
 247   Atomic::add(add_value, &_smr_deleted_thread_times);
 248 }
 249 
 250 inline void Threads::inc_smr_tlh_cnt() {
 251   Atomic::inc(&_smr_tlh_cnt);
 252 }
 253 
 254 inline void Threads::update_smr_tlh_time_max(jint new_value) {
 255   while (true) {
 256     jint cur_value = _smr_tlh_time_max;
 257     if (new_value <= cur_value) {
 258       // No need to update max value so we're done.
 259       break;
 260     }
 261     if (Atomic::cmpxchg(new_value, &_smr_tlh_time_max, cur_value) == cur_value) {
 262       // Updated max value so we're done. Otherwise try it all again.
 263       break;
 264     }
 265   }
 266 }
 267 
 268 inline void Threads::add_smr_tlh_times(jint add_value) {
 269   Atomic::add(add_value, &_smr_tlh_times);
 270 }
 271 
 272 #endif // SHARE_VM_RUNTIME_THREAD_INLINE_HPP