src/share/vm/runtime/thread.cpp

Print this page

        

*** 302,311 **** --- 302,312 ---- void Thread::record_stack_base_and_size() { set_stack_base(os::current_stack_base()); set_stack_size(os::current_stack_size()); if (is_Java_thread()) { ((JavaThread*) this)->set_stack_overflow_limit(); + ((JavaThread*) this)->set_reserved_stack_activation((intptr_t*)stack_base()); } // CR 7190089: on Solaris, primordial thread's stack is adjusted // in initialize_thread(). Without the adjustment, stack size is // incorrect if stack is set to unlimited (ulimit -s unlimited). // So far, only Solaris has real implementation of initialize_thread().
*** 906,916 **** return false; } bool Thread::is_in_usable_stack(address adr) const { ! size_t stack_guard_size = os::uses_stack_guard_pages() ? (StackYellowPages + StackRedPages) * os::vm_page_size() : 0; size_t usable_stack_size = _stack_size - stack_guard_size; return ((adr < stack_base()) && (adr >= stack_base() - usable_stack_size)); } --- 907,917 ---- return false; } bool Thread::is_in_usable_stack(address adr) const { ! size_t stack_guard_size = os::uses_stack_guard_pages() ? (StackReservedPages + StackYellowPages + StackRedPages) * os::vm_page_size() : 0; size_t usable_stack_size = _stack_size - stack_guard_size; return ((adr < stack_base()) && (adr >= stack_base() - usable_stack_size)); }
*** 1462,1471 **** --- 1463,1473 ---- memset(_jvmci_counters, 0, sizeof(jlong) * JVMCICounterSize); } else { _jvmci_counters = NULL; } #endif // INCLUDE_JVMCI + _reserved_stack_activation = NULL; // stack base not known yet (void)const_cast<oop&>(_exception_oop = oop(NULL)); _exception_pc = 0; _exception_handler_pc = 0; _is_method_handle_return = 0; _jvmti_thread_state= NULL;
*** 1534,1544 **** } assert(deferred_card_mark().is_empty(), "Default MemRegion ctor"); } bool JavaThread::reguard_stack(address cur_sp) { ! if (_stack_guard_state != stack_guard_yellow_disabled) { return true; // Stack already guarded or guard pages not needed. } if (register_stack_overflow()) { // For those architectures which have separate register and --- 1536,1547 ---- } assert(deferred_card_mark().is_empty(), "Default MemRegion ctor"); } bool JavaThread::reguard_stack(address cur_sp) { ! if (_stack_guard_state != stack_guard_yellow_disabled ! && _stack_guard_state != stack_guard_reserved_disabled) { return true; // Stack already guarded or guard pages not needed. } if (register_stack_overflow()) { // For those architectures which have separate register and
*** 1551,1562 **** // there to provoke an exception during stack banging. If java code // is executing there, either StackShadowPages should be larger, or // some exception code in c1, c2 or the interpreter isn't unwinding // when it should. guarantee(cur_sp > stack_yellow_zone_base(), "not enough space to reguard - increase StackShadowPages"); ! enable_stack_yellow_zone(); return true; } bool JavaThread::reguard_stack(void) { return reguard_stack(os::current_stack_pointer()); --- 1554,1572 ---- // there to provoke an exception during stack banging. If java code // is executing there, either StackShadowPages should be larger, or // some exception code in c1, c2 or the interpreter isn't unwinding // when it should. guarantee(cur_sp > stack_yellow_zone_base(), "not enough space to reguard - increase StackShadowPages"); ! if (_stack_guard_state == stack_guard_yellow_disabled) { enable_stack_yellow_zone(); + if (reserved_stack_activation() != (intptr_t*)stack_base()) { + set_reserved_stack_activation((intptr_t*)stack_base()); + } + } else if (_stack_guard_state == stack_guard_reserved_disabled) { + set_reserved_stack_activation((intptr_t*)stack_base()); + enable_stack_reserved_zone(); + } return true; } bool JavaThread::reguard_stack(void) { return reguard_stack(os::current_stack_pointer());
*** 2482,2492 **** } void JavaThread::create_stack_guard_pages() { if (! os::uses_stack_guard_pages() || _stack_guard_state != stack_guard_unused) return; address low_addr = stack_base() - stack_size(); ! size_t len = (StackYellowPages + StackRedPages) * os::vm_page_size(); int allocate = os::allocate_stack_guard_pages(); // warning("Guarding at " PTR_FORMAT " for len " SIZE_FORMAT "\n", low_addr, len); if (allocate && !os::create_stack_guard_pages((char *) low_addr, len)) { --- 2492,2502 ---- } void JavaThread::create_stack_guard_pages() { if (! os::uses_stack_guard_pages() || _stack_guard_state != stack_guard_unused) return; address low_addr = stack_base() - stack_size(); ! size_t len = (StackReservedPages + StackYellowPages + StackRedPages) * os::vm_page_size(); int allocate = os::allocate_stack_guard_pages(); // warning("Guarding at " PTR_FORMAT " for len " SIZE_FORMAT "\n", low_addr, len); if (allocate && !os::create_stack_guard_pages((char *) low_addr, len)) {
*** 2506,2516 **** void JavaThread::remove_stack_guard_pages() { assert(Thread::current() == this, "from different thread"); if (_stack_guard_state == stack_guard_unused) return; address low_addr = stack_base() - stack_size(); ! size_t len = (StackYellowPages + StackRedPages) * os::vm_page_size(); if (os::allocate_stack_guard_pages()) { if (os::remove_stack_guard_pages((char *) low_addr, len)) { _stack_guard_state = stack_guard_unused; } else { --- 2516,2526 ---- void JavaThread::remove_stack_guard_pages() { assert(Thread::current() == this, "from different thread"); if (_stack_guard_state == stack_guard_unused) return; address low_addr = stack_base() - stack_size(); ! size_t len = (StackReservedPages + StackYellowPages + StackRedPages) * os::vm_page_size(); if (os::allocate_stack_guard_pages()) { if (os::remove_stack_guard_pages((char *) low_addr, len)) { _stack_guard_state = stack_guard_unused; } else {
*** 2524,2533 **** --- 2534,2581 ---- warning("Attempt to unprotect stack guard pages failed."); } } } + void JavaThread::enable_stack_reserved_zone() { + assert(_stack_guard_state != stack_guard_unused, "must be using guard pages."); + assert(_stack_guard_state != stack_guard_enabled, "already enabled"); + + // The base notation is from the stacks point of view, growing downward. + // We need to adjust it to work correctly with guard_memory() + address base = stack_reserved_zone_base() - stack_reserved_zone_size(); + + guarantee(base < stack_base(),"Error calculating stack reserved zone"); + guarantee(base < os::current_stack_pointer(),"Error calculating stack reserved zone"); + + if (os::guard_memory((char *) base, stack_reserved_zone_size())) { + _stack_guard_state = stack_guard_enabled; + } else { + warning("Attempt to guard stack reserved zone failed."); + } + enable_register_stack_guard(); + } + + void JavaThread::disable_stack_reserved_zone() { + assert(_stack_guard_state != stack_guard_unused, "must be using guard pages."); + assert(_stack_guard_state != stack_guard_reserved_disabled, "already disabled"); + + // Simply return if called for a thread that does not use guard pages. + if (_stack_guard_state == stack_guard_unused) return; + + // The base notation is from the stacks point of view, growing downward. + // We need to adjust it to work correctly with guard_memory() + address base = stack_reserved_zone_base() - stack_reserved_zone_size(); + + if (os::unguard_memory((char *)base, stack_reserved_zone_size())) { + _stack_guard_state = stack_guard_reserved_disabled; + } else { + warning("Attempt to unguard stack reserved zone failed."); + } + disable_register_stack_guard(); + } + void JavaThread::enable_stack_yellow_zone() { assert(_stack_guard_state != stack_guard_unused, "must be using guard pages."); assert(_stack_guard_state != stack_guard_enabled, "already enabled"); // The base notation is from the stacks point of view, growing downward.