1 /*
   2  * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_RUNTIME_THREAD_INLINE_HPP
  26 #define SHARE_VM_RUNTIME_THREAD_INLINE_HPP
  27 
  28 #define SHARE_VM_RUNTIME_THREAD_INLINE_HPP_SCOPE
  29 
  30 #include "runtime/atomic.hpp"
  31 #include "runtime/os.inline.hpp"
  32 #include "runtime/thread.hpp"
  33 
  34 #undef SHARE_VM_RUNTIME_THREAD_INLINE_HPP_SCOPE
  35 
  36 inline void Thread::set_suspend_flag(SuspendFlags f) {
  37   assert(sizeof(jint) == sizeof(_suspend_flags), "size mismatch");
  38   uint32_t flags;
  39   do {
  40     flags = _suspend_flags;
  41   }
  42   while (Atomic::cmpxchg((jint)(flags | f),
  43                          (volatile jint*)&_suspend_flags,
  44                          (jint)flags) != (jint)flags);
  45 }
  46 inline void Thread::clear_suspend_flag(SuspendFlags f) {
  47   assert(sizeof(jint) == sizeof(_suspend_flags), "size mismatch");
  48   uint32_t flags;
  49   do {
  50     flags = _suspend_flags;
  51   }
  52   while (Atomic::cmpxchg((jint)(flags & ~f),
  53                          (volatile jint*)&_suspend_flags,
  54                          (jint)flags) != (jint)flags);
  55 }
  56 
  57 inline void Thread::set_has_async_exception() {
  58   set_suspend_flag(_has_async_exception);
  59 }
  60 inline void Thread::clear_has_async_exception() {
  61   clear_suspend_flag(_has_async_exception);
  62 }
  63 inline void Thread::set_critical_native_unlock() {
  64   set_suspend_flag(_critical_native_unlock);
  65 }
  66 inline void Thread::clear_critical_native_unlock() {
  67   clear_suspend_flag(_critical_native_unlock);
  68 }
  69 inline void Thread::set_trace_flag() {
  70   set_suspend_flag(_trace_flag);
  71 }
  72 inline void Thread::clear_trace_flag() {
  73   clear_suspend_flag(_trace_flag);
  74 }
  75 
  76 inline jlong Thread::cooked_allocated_bytes() {
  77   jlong allocated_bytes = OrderAccess::load_acquire(&_allocated_bytes);
  78   if (UseTLAB) {
  79     size_t used_bytes = tlab().used_bytes();
  80     if (used_bytes <= ThreadLocalAllocBuffer::max_size_in_bytes()) {
  81       // Comparing used_bytes with the maximum allowed size will ensure
  82       // that we don't add the used bytes from a semi-initialized TLAB
  83       // ending up with incorrect values. There is still a race between
  84       // incrementing _allocated_bytes and clearing the TLAB, that might
  85       // cause double counting in rare cases.
  86       return allocated_bytes + used_bytes;
  87     }
  88   }
  89   return allocated_bytes;
  90 }
  91 
  92 inline void JavaThread::set_ext_suspended() {
  93   set_suspend_flag (_ext_suspended);
  94 }
  95 inline void JavaThread::clear_ext_suspended() {
  96   clear_suspend_flag(_ext_suspended);
  97 }
  98 
  99 inline void JavaThread::set_external_suspend() {
 100   set_suspend_flag(_external_suspend);
 101 }
 102 inline void JavaThread::clear_external_suspend() {
 103   clear_suspend_flag(_external_suspend);
 104 }
 105 
 106 inline void JavaThread::set_deopt_suspend() {
 107   set_suspend_flag(_deopt_suspend);
 108 }
 109 inline void JavaThread::clear_deopt_suspend() {
 110   clear_suspend_flag(_deopt_suspend);
 111 }
 112 
 113 inline void JavaThread::set_pending_async_exception(oop e) {
 114   _pending_async_exception = e;
 115   _special_runtime_exit_condition = _async_exception;
 116   set_has_async_exception();
 117 }
 118 
 119 #if defined(PPC64) || defined (AARCH64)
 120 inline JavaThreadState JavaThread::thread_state() const    {
 121   return (JavaThreadState) OrderAccess::load_acquire((volatile jint*)&_thread_state);
 122 }
 123 
 124 inline void JavaThread::set_thread_state(JavaThreadState s) {
 125   OrderAccess::release_store((volatile jint*)&_thread_state, (jint)s);
 126 }
 127 #endif
 128 
 129 inline void JavaThread::set_done_attaching_via_jni() {
 130   _jni_attach_state = _attached_via_jni;
 131   OrderAccess::fence();
 132 }
 133 
 134 inline bool JavaThread::stack_guard_zone_unused() {
 135   return _stack_guard_state == stack_guard_unused;
 136 }
 137 
 138 inline bool JavaThread::stack_yellow_reserved_zone_disabled() {
 139   return _stack_guard_state == stack_guard_yellow_reserved_disabled;
 140 }
 141 
 142 inline bool JavaThread::stack_reserved_zone_disabled() {
 143   return _stack_guard_state == stack_guard_reserved_disabled;
 144 }
 145 
 146 inline size_t JavaThread::stack_available(address cur_sp) {
 147   // This code assumes java stacks grow down
 148   address low_addr; // Limit on the address for deepest stack depth
 149   if (_stack_guard_state == stack_guard_unused) {
 150     low_addr = stack_end();
 151   } else {
 152     low_addr = stack_reserved_zone_base();
 153   }
 154   return cur_sp > low_addr ? cur_sp - low_addr : 0;
 155 }
 156 
 157 inline bool JavaThread::stack_guards_enabled() {
 158 #ifdef ASSERT
 159   if (os::uses_stack_guard_pages()) {
 160     assert(_stack_guard_state != stack_guard_unused, "guard pages must be in use");
 161   }
 162 #endif
 163   return _stack_guard_state == stack_guard_enabled;
 164 }
 165 
 166 // The release make sure this store is done after storing the handshake
 167 // operation or global state
 168 inline void JavaThread::set_polling_page(void* poll_value) {
 169   OrderAccess::release_store(polling_page_addr(), poll_value);
 170 }
 171 
 172 // The aqcquire make sure reading of polling page is done before
 173 // the reading the handshake operation or the global state
 174 inline volatile void* JavaThread::get_polling_page() {
 175   return OrderAccess::load_acquire(polling_page_addr());
 176 }
 177 
 178 #endif // SHARE_VM_RUNTIME_THREAD_INLINE_HPP