< prev index next >

src/hotspot/share/runtime/thread.hpp

Print this page
rev 58073 : [mq]: v3


 667   // If "is_par" is false, sets the token of "this" to
 668   // "claim_token", and returns "true".  If "is_par" is true,
 669   // uses an atomic instruction to set the current thread's token to
 670   // "claim_token", if it is not already.  Returns "true" iff the
 671   // calling thread does the update, this indicates that the calling thread
 672   // has claimed the thread in the current iteration.
 673   bool claim_threads_do(bool is_par, uintx claim_token) {
 674     if (!is_par) {
 675       _threads_do_token = claim_token;
 676       return true;
 677     } else {
 678       return claim_par_threads_do(claim_token);
 679     }
 680   }
 681 
 682   uintx threads_do_token() const { return _threads_do_token; }
 683 
 684   // jvmtiRedefineClasses support
 685   void metadata_handles_do(void f(Metadata*));
 686 











 687   // Used by fast lock support
 688   virtual bool is_lock_owned(address adr) const;
 689 
 690   // Check if address is in the live stack of this thread (not just for locks).
 691   // Warning: can only be called by the current thread on itself.
 692   bool is_in_stack(address adr) const;


 693 
 694   // Check if address in the stack mapped to this thread. Used mainly in






 695   // error reporting (so has to include guard zone) and frame printing.
 696   bool on_local_stack(address adr) const {
 697     return (_stack_base > adr && adr >= stack_end());







 698   }
 699 
 700   // Sets this thread as starting thread. Returns failure if thread
 701   // creation fails due to lack of memory, too many threads etc.
 702   bool set_as_starting_thread();
 703 
 704 protected:
 705   // OS data associated with the thread
 706   OSThread* _osthread;  // Platform-specific thread information
 707 
 708   // Thread local resource area for temporary allocation within the VM
 709   ResourceArea* _resource_area;
 710 
 711   DEBUG_ONLY(ResourceMark* _current_resource_mark;)
 712 
 713   // Thread local handle area for allocation of handles within the VM
 714   HandleArea* _handle_area;
 715   GrowableArray<Metadata*>* _metadata_handles;
 716 
 717   // Support for stack overflow handling, get_thread, etc.


1632     assert(_stack_yellow_zone_size > 0, "Don't call this before the field is initialized.");
1633     return _stack_yellow_zone_size;
1634   }
1635   static void set_stack_yellow_zone_size(size_t s) {
1636     assert(is_aligned(s, os::vm_page_size()),
1637            "We can not protect if the yellow zone size is not page aligned.");
1638     assert(_stack_yellow_zone_size == 0, "This should be called only once.");
1639     _stack_yellow_zone_size = s;
1640   }
1641 
1642   static size_t stack_reserved_zone_size() {
1643     // _stack_reserved_zone_size may be 0. This indicates the feature is off.
1644     return _stack_reserved_zone_size;
1645   }
1646   static void set_stack_reserved_zone_size(size_t s) {
1647     assert(is_aligned(s, os::vm_page_size()),
1648            "We can not protect if the reserved zone size is not page aligned.");
1649     assert(_stack_reserved_zone_size == 0, "This should be called only once.");
1650     _stack_reserved_zone_size = s;
1651   }
1652   address stack_reserved_zone_base() {
1653     return (address)(stack_end() +
1654                      (stack_red_zone_size() + stack_yellow_zone_size() + stack_reserved_zone_size()));
1655   }
1656   bool in_stack_reserved_zone(address a) {
1657     return (a <= stack_reserved_zone_base()) &&
1658            (a >= (address)((intptr_t)stack_reserved_zone_base() - stack_reserved_zone_size()));
1659   }
1660 
1661   static size_t stack_yellow_reserved_zone_size() {
1662     return _stack_yellow_zone_size + _stack_reserved_zone_size;
1663   }
1664   bool in_stack_yellow_reserved_zone(address a) {
1665     return (a <= stack_reserved_zone_base()) && (a >= stack_red_zone_base());
1666   }
1667 
1668   // Size of red + yellow + reserved zones.
1669   static size_t stack_guard_zone_size() {
1670     return stack_red_zone_size() + stack_yellow_reserved_zone_size();
1671   }
1672 


1715 
1716   // Attempt to reguard the stack after a stack overflow may have occurred.
1717   // Returns true if (a) guard pages are not needed on this thread, (b) the
1718   // pages are already guarded, or (c) the pages were successfully reguarded.
1719   // Returns false if there is not enough stack space to reguard the pages, in
1720   // which case the caller should unwind a frame and try again.  The argument
1721   // should be the caller's (approximate) sp.
1722   bool reguard_stack(address cur_sp);
1723   // Similar to above but see if current stackpoint is out of the guard area
1724   // and reguard if possible.
1725   bool reguard_stack(void);
1726 
1727   address stack_overflow_limit() { return _stack_overflow_limit; }
1728   void set_stack_overflow_limit() {
1729     _stack_overflow_limit =
1730       stack_end() + MAX2(JavaThread::stack_guard_zone_size(), JavaThread::stack_shadow_zone_size());
1731   }
1732 
1733   // Check if address is in the usable part of the stack (excludes protected
1734   // guard pages). Can be applied to any thread and is an approximation for
1735   // using is_in_stack when the query has to happen from another thread.
1736   bool is_in_usable_stack(address adr) const;


1737 
1738   // Misc. accessors/mutators
1739   void set_do_not_unlock(void)                   { _do_not_unlock_if_synchronized = true; }
1740   void clr_do_not_unlock(void)                   { _do_not_unlock_if_synchronized = false; }
1741   bool do_not_unlock(void)                       { return _do_not_unlock_if_synchronized; }
1742 
1743   // For assembly stub generation
1744   static ByteSize threadObj_offset()             { return byte_offset_of(JavaThread, _threadObj); }
1745   static ByteSize jni_environment_offset()       { return byte_offset_of(JavaThread, _jni_environment); }
1746   static ByteSize pending_jni_exception_check_fn_offset() {
1747     return byte_offset_of(JavaThread, _pending_jni_exception_check_fn);
1748   }
1749   static ByteSize last_Java_sp_offset() {
1750     return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_sp_offset();
1751   }
1752   static ByteSize last_Java_pc_offset() {
1753     return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_pc_offset();
1754   }
1755   static ByteSize frame_anchor_offset() {
1756     return byte_offset_of(JavaThread, _anchor);




 667   // If "is_par" is false, sets the token of "this" to
 668   // "claim_token", and returns "true".  If "is_par" is true,
 669   // uses an atomic instruction to set the current thread's token to
 670   // "claim_token", if it is not already.  Returns "true" iff the
 671   // calling thread does the update, this indicates that the calling thread
 672   // has claimed the thread in the current iteration.
 673   bool claim_threads_do(bool is_par, uintx claim_token) {
 674     if (!is_par) {
 675       _threads_do_token = claim_token;
 676       return true;
 677     } else {
 678       return claim_par_threads_do(claim_token);
 679     }
 680   }
 681 
 682   uintx threads_do_token() const { return _threads_do_token; }
 683 
 684   // jvmtiRedefineClasses support
 685   void metadata_handles_do(void f(Metadata*));
 686 
 687  private:
 688 
 689   // Check if address is within the given range of this thread's
 690   // stack:  stack_base() > adr >/>= limit
 691   // The check is inclusive of limit if passed true, else exclusive.
 692   bool is_in_stack_range(address adr, address limit, bool inclusive) const {
 693     assert(stack_base() > limit && limit >= stack_end(), "limit is outside of stack");
 694     return stack_base() > adr && (inclusive ? adr >= limit : adr > limit);
 695   }
 696 
 697  public:
 698   // Used by fast lock support
 699   virtual bool is_lock_owned(address adr) const;
 700 
 701   // Check if address is within the given range of this thread's
 702   // stack:  stack_base() > adr >= limit
 703   bool is_in_stack_range_incl(address adr, address limit) const {
 704     return is_in_stack_range(adr, limit, true);
 705   }
 706 
 707   // Check if address is within the given range of this thread's
 708   // stack:  stack_base() > adr > limit
 709   bool is_in_stack_range_excl(address adr, address limit) const {
 710     return is_in_stack_range(adr, limit, false);
 711   }
 712 
 713   // Check if address is in the stack mapped to this thread. Used mainly in
 714   // error reporting (so has to include guard zone) and frame printing.
 715   bool is_in_full_stack(address adr) const {
 716     return is_in_stack_range_incl(adr, stack_end());
 717   }
 718 
 719   // Check if address is in the live stack of this thread (not just for locks).
 720   // Warning: can only be called by the current thread on itself.
 721   bool is_in_live_stack(address adr) const {
 722     assert(Thread::current() == this, "is_in_live_stack can only be called from current thread");
 723     return is_in_stack_range_incl(adr, os::current_stack_pointer());
 724   }
 725 
 726   // Sets this thread as starting thread. Returns failure if thread
 727   // creation fails due to lack of memory, too many threads etc.
 728   bool set_as_starting_thread();
 729 
 730 protected:
 731   // OS data associated with the thread
 732   OSThread* _osthread;  // Platform-specific thread information
 733 
 734   // Thread local resource area for temporary allocation within the VM
 735   ResourceArea* _resource_area;
 736 
 737   DEBUG_ONLY(ResourceMark* _current_resource_mark;)
 738 
 739   // Thread local handle area for allocation of handles within the VM
 740   HandleArea* _handle_area;
 741   GrowableArray<Metadata*>* _metadata_handles;
 742 
 743   // Support for stack overflow handling, get_thread, etc.


1658     assert(_stack_yellow_zone_size > 0, "Don't call this before the field is initialized.");
1659     return _stack_yellow_zone_size;
1660   }
1661   static void set_stack_yellow_zone_size(size_t s) {
1662     assert(is_aligned(s, os::vm_page_size()),
1663            "We can not protect if the yellow zone size is not page aligned.");
1664     assert(_stack_yellow_zone_size == 0, "This should be called only once.");
1665     _stack_yellow_zone_size = s;
1666   }
1667 
1668   static size_t stack_reserved_zone_size() {
1669     // _stack_reserved_zone_size may be 0. This indicates the feature is off.
1670     return _stack_reserved_zone_size;
1671   }
1672   static void set_stack_reserved_zone_size(size_t s) {
1673     assert(is_aligned(s, os::vm_page_size()),
1674            "We can not protect if the reserved zone size is not page aligned.");
1675     assert(_stack_reserved_zone_size == 0, "This should be called only once.");
1676     _stack_reserved_zone_size = s;
1677   }
1678   address stack_reserved_zone_base() const {
1679     return (address)(stack_end() +
1680                      (stack_red_zone_size() + stack_yellow_zone_size() + stack_reserved_zone_size()));
1681   }
1682   bool in_stack_reserved_zone(address a) {
1683     return (a <= stack_reserved_zone_base()) &&
1684            (a >= (address)((intptr_t)stack_reserved_zone_base() - stack_reserved_zone_size()));
1685   }
1686 
1687   static size_t stack_yellow_reserved_zone_size() {
1688     return _stack_yellow_zone_size + _stack_reserved_zone_size;
1689   }
1690   bool in_stack_yellow_reserved_zone(address a) {
1691     return (a <= stack_reserved_zone_base()) && (a >= stack_red_zone_base());
1692   }
1693 
1694   // Size of red + yellow + reserved zones.
1695   static size_t stack_guard_zone_size() {
1696     return stack_red_zone_size() + stack_yellow_reserved_zone_size();
1697   }
1698 


1741 
1742   // Attempt to reguard the stack after a stack overflow may have occurred.
1743   // Returns true if (a) guard pages are not needed on this thread, (b) the
1744   // pages are already guarded, or (c) the pages were successfully reguarded.
1745   // Returns false if there is not enough stack space to reguard the pages, in
1746   // which case the caller should unwind a frame and try again.  The argument
1747   // should be the caller's (approximate) sp.
1748   bool reguard_stack(address cur_sp);
1749   // Similar to above but see if current stackpoint is out of the guard area
1750   // and reguard if possible.
1751   bool reguard_stack(void);
1752 
1753   address stack_overflow_limit() { return _stack_overflow_limit; }
1754   void set_stack_overflow_limit() {
1755     _stack_overflow_limit =
1756       stack_end() + MAX2(JavaThread::stack_guard_zone_size(), JavaThread::stack_shadow_zone_size());
1757   }
1758 
1759   // Check if address is in the usable part of the stack (excludes protected
1760   // guard pages). Can be applied to any thread and is an approximation for
1761   // using is_in_live_stack when the query has to happen from another thread.
1762   bool is_in_usable_stack(address adr) const {
1763     return is_in_stack_range_incl(adr, stack_reserved_zone_base());
1764   }
1765 
1766   // Misc. accessors/mutators
1767   void set_do_not_unlock(void)                   { _do_not_unlock_if_synchronized = true; }
1768   void clr_do_not_unlock(void)                   { _do_not_unlock_if_synchronized = false; }
1769   bool do_not_unlock(void)                       { return _do_not_unlock_if_synchronized; }
1770 
1771   // For assembly stub generation
1772   static ByteSize threadObj_offset()             { return byte_offset_of(JavaThread, _threadObj); }
1773   static ByteSize jni_environment_offset()       { return byte_offset_of(JavaThread, _jni_environment); }
1774   static ByteSize pending_jni_exception_check_fn_offset() {
1775     return byte_offset_of(JavaThread, _pending_jni_exception_check_fn);
1776   }
1777   static ByteSize last_Java_sp_offset() {
1778     return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_sp_offset();
1779   }
1780   static ByteSize last_Java_pc_offset() {
1781     return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_pc_offset();
1782   }
1783   static ByteSize frame_anchor_offset() {
1784     return byte_offset_of(JavaThread, _anchor);


< prev index next >