< prev index next >

src/hotspot/share/runtime/thread.hpp

Print this page
rev 56044 : imported patch 8230184.patch
rev 56046 : v2.00 -> v2.05 (CR5/v2.05/8-for-jdk13) patches combined into one; merge with 8229212.patch; merge with jdk-14+11; merge with 8230184.patch.
rev 56048 : Add OM_CACHE_LINE_SIZE so that ObjectMonitor cache line sizes can be experimented with independently of DEFAULT_CACHE_LINE_SIZE; for SPARC and X64 configs that use 128 for DEFAULT_CACHE_LINE_SIZE, we are experimenting with 64; move _previous_owner_tid and _allocation_state fields to share the cache line with ObjectMonitor::_header; put ObjectMonitor::_ref_count on its own cache line after _owner; add 'int* count_p' parameter to deflate_monitor_list() and deflate_monitor_list_using_JT() and push counter updates down to where the ObjectMonitors are actually removed from the in-use lists; monitors_iterate() async deflation check should use negative ref_count; add 'JavaThread* target' param to deflate_per_thread_idle_monitors_using_JT() add deflate_common_idle_monitors_using_JT() to make it clear which JavaThread* is the target of the work and which is the calling JavaThread* (self); g_free_list, g_om_in_use_list and g_om_in_use_count are now static to synchronizer.cpp (reduce scope); add more diagnostic info to some assert()'s; minor code cleanups and code motion; save_om_ptr() should detect a race with a deflating thread that is bailing out and cause a retry when the ref_count field is not positive; merge with jdk-14+11; add special GC support for TestHumongousClassLoader.java; merge with 8230184.patch.


 506   static inline Thread* current_or_null();
 507   // Returns the current thread, or NULL if not attached, and is
 508   // safe for use from signal-handlers
 509   static inline Thread* current_or_null_safe();
 510 
 511   // Common thread operations
 512 #ifdef ASSERT
 513   static void check_for_dangling_thread_pointer(Thread *thread);
 514 #endif
 515   static void set_priority(Thread* thread, ThreadPriority priority);
 516   static ThreadPriority get_priority(const Thread* const thread);
 517   static void start(Thread* thread);
 518   static void interrupt(Thread* thr);
 519   static bool is_interrupted(Thread* thr, bool clear_interrupted);
 520 
 521   void set_native_thread_name(const char *name) {
 522     assert(Thread::current() == this, "set_native_thread_name can only be called on the current thread");
 523     os::set_native_thread_name(name);
 524   }
 525 
 526   ObjectMonitor** om_in_use_list_addr()          { return (ObjectMonitor **)&om_in_use_list; }
 527   Monitor* SR_lock() const                       { return _SR_lock; }
 528 
 529   bool has_async_exception() const { return (_suspend_flags & _has_async_exception) != 0; }
 530 
 531   inline void set_suspend_flag(SuspendFlags f);
 532   inline void clear_suspend_flag(SuspendFlags f);
 533 
 534   inline void set_has_async_exception();
 535   inline void clear_has_async_exception();
 536 
 537   bool do_critical_native_unlock() const { return (_suspend_flags & _critical_native_unlock) != 0; }
 538 
 539   inline void set_critical_native_unlock();
 540   inline void clear_critical_native_unlock();
 541 
 542   inline void set_trace_flag();
 543   inline void clear_trace_flag();
 544 
 545   // Support for Unhandled Oop detection
 546   // Add the field for both, fastdebug and debug, builds to keep




 506   static inline Thread* current_or_null();
 507   // Returns the current thread, or NULL if not attached, and is
 508   // safe for use from signal-handlers
 509   static inline Thread* current_or_null_safe();
 510 
 511   // Common thread operations
 512 #ifdef ASSERT
 513   static void check_for_dangling_thread_pointer(Thread *thread);
 514 #endif
 515   static void set_priority(Thread* thread, ThreadPriority priority);
 516   static ThreadPriority get_priority(const Thread* const thread);
 517   static void start(Thread* thread);
 518   static void interrupt(Thread* thr);
 519   static bool is_interrupted(Thread* thr, bool clear_interrupted);
 520 
 521   void set_native_thread_name(const char *name) {
 522     assert(Thread::current() == this, "set_native_thread_name can only be called on the current thread");
 523     os::set_native_thread_name(name);
 524   }
 525 

 526   Monitor* SR_lock() const                       { return _SR_lock; }
 527 
 528   bool has_async_exception() const { return (_suspend_flags & _has_async_exception) != 0; }
 529 
 530   inline void set_suspend_flag(SuspendFlags f);
 531   inline void clear_suspend_flag(SuspendFlags f);
 532 
 533   inline void set_has_async_exception();
 534   inline void clear_has_async_exception();
 535 
 536   bool do_critical_native_unlock() const { return (_suspend_flags & _critical_native_unlock) != 0; }
 537 
 538   inline void set_critical_native_unlock();
 539   inline void clear_critical_native_unlock();
 540 
 541   inline void set_trace_flag();
 542   inline void clear_trace_flag();
 543 
 544   // Support for Unhandled Oop detection
 545   // Add the field for both, fastdebug and debug, builds to keep


< prev index next >