< prev index next >

src/hotspot/share/runtime/thread.hpp

Print this page
rev 56044 : imported patch 8230184.patch
rev 56046 : v2.00 -> v2.05 (CR5/v2.05/8-for-jdk13) patches combined into one; merge with 8229212.patch; merge with jdk-14+11; merge with 8230184.patch.
rev 56048 : Add OM_CACHE_LINE_SIZE so that ObjectMonitor cache line sizes can be experimented with independently of DEFAULT_CACHE_LINE_SIZE; for SPARC and X64 configs that use 128 for DEFAULT_CACHE_LINE_SIZE, we are experimenting with 64; move _previous_owner_tid and _allocation_state fields to share the cache line with ObjectMonitor::_header; put ObjectMonitor::_ref_count on its own cache line after _owner; add 'int* count_p' parameter to deflate_monitor_list() and deflate_monitor_list_using_JT() and push counter updates down to where the ObjectMonitors are actually removed from the in-use lists; monitors_iterate() async deflation check should use negative ref_count; add 'JavaThread* target' param to deflate_per_thread_idle_monitors_using_JT() add deflate_common_idle_monitors_using_JT() to make it clear which JavaThread* is the target of the work and which is the calling JavaThread* (self); g_free_list, g_om_in_use_list and g_om_in_use_count are now static to synchronizer.cpp (reduce scope); add more diagnostic info to some assert()'s; minor code cleanups and code motion; save_om_ptr() should detect a race with a deflating thread that is bailing out and cause a retry when the ref_count field is not positive; merge with jdk-14+11; add special GC support for TestHumongousClassLoader.java; merge with 8230184.patch.
rev 56049 : Merge the remainder of the lock-free monitor list changes from v2.06 with v2.06a and v2.06b after running the changes through the edit scripts; merge pieces from dcubed.monitor_deflate_conc.v2.06d in dcubed.monitor_deflate_conc.v2.06[ac]; merge pieces from dcubed.monitor_deflate_conc.v2.06e into dcubed.monitor_deflate_conc.v2.06c; merge with jdk-14+11; test work around for test/jdk/tools/jlink/multireleasejar/JLinkMultiReleaseJarTest.java should not been needed anymore.


 393   jlong _allocated_bytes;                       // Cumulative number of bytes allocated on
 394                                                 // the Java heap
 395   ThreadHeapSampler _heap_sampler;              // For use when sampling the memory.
 396 
 397   ThreadStatisticalInfo _statistical_info;      // Statistics about the thread
 398 
 399   JFR_ONLY(DEFINE_THREAD_LOCAL_FIELD_JFR;)      // Thread-local data for jfr
 400 
 401   int   _vm_operation_started_count;            // VM_Operation support
 402   int   _vm_operation_completed_count;          // VM_Operation support
 403 
 404   ObjectMonitor* _current_pending_monitor;      // ObjectMonitor this thread
 405                                                 // is waiting to lock
 406   bool _current_pending_monitor_is_from_java;   // locking is from Java code
 407 
 408   // ObjectMonitor on which this thread called Object.wait()
 409   ObjectMonitor* _current_waiting_monitor;
 410 
 411   // Per-thread ObjectMonitor lists:
 412  public:
 413   ObjectMonitor* om_free_list;                  // SLL of free ObjectMonitors
 414   int om_free_count;                            // # on om_free_list
 415   int om_free_provision;                        // # to try to allocate next
 416   ObjectMonitor* om_in_use_list;                // SLL of in-use ObjectMonitors
 417   int om_in_use_count;                          // # on om_in_use_list
 418   volatile bool om_request_deflation;           // request deflation of idle monitors
 419 
 420 #ifdef ASSERT
 421  private:
 422   volatile uint64_t _visited_for_critical_count;
 423 
 424  public:
 425   void set_visited_for_critical_count(uint64_t safepoint_id) {
 426     assert(_visited_for_critical_count == 0, "Must be reset before set");
 427     assert((safepoint_id & 0x1) == 1, "Must be odd");
 428     _visited_for_critical_count = safepoint_id;
 429   }
 430   void reset_visited_for_critical_count(uint64_t safepoint_id) {
 431     assert(_visited_for_critical_count == safepoint_id, "Was not visited");
 432     _visited_for_critical_count = 0;
 433   }
 434   bool was_visited_for_critical_count(uint64_t safepoint_id) const {
 435     return _visited_for_critical_count == safepoint_id;
 436   }
 437 #endif
 438 


 506   static inline Thread* current_or_null();
 507   // Returns the current thread, or NULL if not attached, and is
 508   // safe for use from signal-handlers
 509   static inline Thread* current_or_null_safe();
 510 
 511   // Common thread operations
 512 #ifdef ASSERT
 513   static void check_for_dangling_thread_pointer(Thread *thread);
 514 #endif
 515   static void set_priority(Thread* thread, ThreadPriority priority);
 516   static ThreadPriority get_priority(const Thread* const thread);
 517   static void start(Thread* thread);
 518   static void interrupt(Thread* thr);
 519   static bool is_interrupted(Thread* thr, bool clear_interrupted);
 520 
 521   void set_native_thread_name(const char *name) {
 522     assert(Thread::current() == this, "set_native_thread_name can only be called on the current thread");
 523     os::set_native_thread_name(name);
 524   }
 525 
 526   ObjectMonitor** om_in_use_list_addr()          { return (ObjectMonitor **)&om_in_use_list; }
 527   Monitor* SR_lock() const                       { return _SR_lock; }
 528 
 529   bool has_async_exception() const { return (_suspend_flags & _has_async_exception) != 0; }
 530 
 531   inline void set_suspend_flag(SuspendFlags f);
 532   inline void clear_suspend_flag(SuspendFlags f);
 533 
 534   inline void set_has_async_exception();
 535   inline void clear_has_async_exception();
 536 
 537   bool do_critical_native_unlock() const { return (_suspend_flags & _critical_native_unlock) != 0; }
 538 
 539   inline void set_critical_native_unlock();
 540   inline void clear_critical_native_unlock();
 541 
 542   inline void set_trace_flag();
 543   inline void clear_trace_flag();
 544 
 545   // Support for Unhandled Oop detection
 546   // Add the field for both, fastdebug and debug, builds to keep




 393   jlong _allocated_bytes;                       // Cumulative number of bytes allocated on
 394                                                 // the Java heap
 395   ThreadHeapSampler _heap_sampler;              // For use when sampling the memory.
 396 
 397   ThreadStatisticalInfo _statistical_info;      // Statistics about the thread
 398 
 399   JFR_ONLY(DEFINE_THREAD_LOCAL_FIELD_JFR;)      // Thread-local data for jfr
 400 
 401   int   _vm_operation_started_count;            // VM_Operation support
 402   int   _vm_operation_completed_count;          // VM_Operation support
 403 
 404   ObjectMonitor* _current_pending_monitor;      // ObjectMonitor this thread
 405                                                 // is waiting to lock
 406   bool _current_pending_monitor_is_from_java;   // locking is from Java code
 407 
 408   // ObjectMonitor on which this thread called Object.wait()
 409   ObjectMonitor* _current_waiting_monitor;
 410 
 411   // Per-thread ObjectMonitor lists:
 412  public:
 413   ObjectMonitor* volatile om_free_list;         // SLL of free ObjectMonitors
 414   volatile int om_free_count;                   // # on om_free_list
 415   int om_free_provision;                        // # to try to allocate next
 416   ObjectMonitor* volatile om_in_use_list;       // SLL of in-use ObjectMonitors
 417   volatile int om_in_use_count;                 // # on om_in_use_list

 418 
 419 #ifdef ASSERT
 420  private:
 421   volatile uint64_t _visited_for_critical_count;
 422 
 423  public:
 424   void set_visited_for_critical_count(uint64_t safepoint_id) {
 425     assert(_visited_for_critical_count == 0, "Must be reset before set");
 426     assert((safepoint_id & 0x1) == 1, "Must be odd");
 427     _visited_for_critical_count = safepoint_id;
 428   }
 429   void reset_visited_for_critical_count(uint64_t safepoint_id) {
 430     assert(_visited_for_critical_count == safepoint_id, "Was not visited");
 431     _visited_for_critical_count = 0;
 432   }
 433   bool was_visited_for_critical_count(uint64_t safepoint_id) const {
 434     return _visited_for_critical_count == safepoint_id;
 435   }
 436 #endif
 437 


 505   static inline Thread* current_or_null();
 506   // Returns the current thread, or NULL if not attached, and is
 507   // safe for use from signal-handlers
 508   static inline Thread* current_or_null_safe();
 509 
 510   // Common thread operations
 511 #ifdef ASSERT
 512   static void check_for_dangling_thread_pointer(Thread *thread);
 513 #endif
 514   static void set_priority(Thread* thread, ThreadPriority priority);
 515   static ThreadPriority get_priority(const Thread* const thread);
 516   static void start(Thread* thread);
 517   static void interrupt(Thread* thr);
 518   static bool is_interrupted(Thread* thr, bool clear_interrupted);
 519 
 520   void set_native_thread_name(const char *name) {
 521     assert(Thread::current() == this, "set_native_thread_name can only be called on the current thread");
 522     os::set_native_thread_name(name);
 523   }
 524 

 525   Monitor* SR_lock() const                       { return _SR_lock; }
 526 
 527   bool has_async_exception() const { return (_suspend_flags & _has_async_exception) != 0; }
 528 
 529   inline void set_suspend_flag(SuspendFlags f);
 530   inline void clear_suspend_flag(SuspendFlags f);
 531 
 532   inline void set_has_async_exception();
 533   inline void clear_has_async_exception();
 534 
 535   bool do_critical_native_unlock() const { return (_suspend_flags & _critical_native_unlock) != 0; }
 536 
 537   inline void set_critical_native_unlock();
 538   inline void clear_critical_native_unlock();
 539 
 540   inline void set_trace_flag();
 541   inline void clear_trace_flag();
 542 
 543   // Support for Unhandled Oop detection
 544   // Add the field for both, fastdebug and debug, builds to keep


< prev index next >