146 inline size_t JavaThread::stack_available(address cur_sp) {
147 // This code assumes java stacks grow down
148 address low_addr; // Limit on the address for deepest stack depth
149 if (_stack_guard_state == stack_guard_unused) {
150 low_addr = stack_end();
151 } else {
152 low_addr = stack_reserved_zone_base();
153 }
154 return cur_sp > low_addr ? cur_sp - low_addr : 0;
155 }
156
157 inline bool JavaThread::stack_guards_enabled() {
158 #ifdef ASSERT
159 if (os::uses_stack_guard_pages()) {
160 assert(_stack_guard_state != stack_guard_unused, "guard pages must be in use");
161 }
162 #endif
163 return _stack_guard_state == stack_guard_enabled;
164 }
165
166 #endif // SHARE_VM_RUNTIME_THREAD_INLINE_HPP
|
146 inline size_t JavaThread::stack_available(address cur_sp) {
147 // This code assumes java stacks grow down
148 address low_addr; // Limit on the address for deepest stack depth
149 if (_stack_guard_state == stack_guard_unused) {
150 low_addr = stack_end();
151 } else {
152 low_addr = stack_reserved_zone_base();
153 }
154 return cur_sp > low_addr ? cur_sp - low_addr : 0;
155 }
156
157 inline bool JavaThread::stack_guards_enabled() {
158 #ifdef ASSERT
159 if (os::uses_stack_guard_pages()) {
160 assert(_stack_guard_state != stack_guard_unused, "guard pages must be in use");
161 }
162 #endif
163 return _stack_guard_state == stack_guard_enabled;
164 }
165
166 // The release make sure this store is done after storing the handshake
167 // operation or global state
168 inline void JavaThread::set_polling_page(void* poll_value) {
169 OrderAccess::release_store_ptr(polling_page_addr(), poll_value);
170 }
171
172 // The aqcquire make sure reading of polling page is done before
173 // the reading the handshake operation or the global state
174 inline void* JavaThread::get_polling_page() {
175 return OrderAccess::load_ptr_acquire(polling_page_addr());
176 }
177
178 #endif // SHARE_VM_RUNTIME_THREAD_INLINE_HPP
|