< prev index next >

src/hotspot/share/runtime/synchronizer.hpp

Print this page
rev 57232 : v2.00 -> v2.08 (CR8/v2.08/11-for-jdk14) patches combined into one; merge with jdk-14+25 snapshot; merge with jdk-14+26 snapshot.
rev 57233 : See CR8-to-CR9-changes; merge with 8230876.patch (2019.11.15); merge with jdk-14+25 snapshot; fuzzy merge with jdk-14+26 snapshot.

*** 30,48 **** #include "runtime/basicLock.hpp" #include "runtime/handles.hpp" #include "runtime/perfData.hpp" class ObjectMonitor; class ThreadsList; ! typedef PaddedEnd<ObjectMonitor, DEFAULT_CACHE_LINE_SIZE> PaddedObjectMonitor; struct DeflateMonitorCounters { ! int n_in_use; // currently associated with objects ! int n_in_circulation; // extant ! int n_scavenged; // reclaimed (global and per-thread) ! int per_thread_scavenged; // per-thread scavenge total double per_thread_times; // per-thread scavenge times }; class ObjectSynchronizer : AllStatic { friend class VMStructs; --- 30,55 ---- #include "runtime/basicLock.hpp" #include "runtime/handles.hpp" #include "runtime/perfData.hpp" class ObjectMonitor; + class ObjectMonitorHandle; class ThreadsList; ! #ifndef OM_CACHE_LINE_SIZE ! // Use DEFAULT_CACHE_LINE_SIZE if not already specified for ! // the current build platform. ! #define OM_CACHE_LINE_SIZE DEFAULT_CACHE_LINE_SIZE ! #endif ! ! typedef PaddedEnd<ObjectMonitor, OM_CACHE_LINE_SIZE> PaddedObjectMonitor; struct DeflateMonitorCounters { ! volatile int n_in_use; // currently associated with objects ! volatile int n_in_circulation; // extant ! volatile int n_scavenged; // reclaimed (global and per-thread) ! volatile int per_thread_scavenged; // per-thread scavenge total double per_thread_times; // per-thread scavenge times }; class ObjectSynchronizer : AllStatic { friend class VMStructs;
*** 94,112 **** // with original recursion count static intx complete_exit(Handle obj, TRAPS); static void reenter (Handle obj, intx recursions, TRAPS); // thread-specific and global ObjectMonitor free list accessors ! static ObjectMonitor* om_alloc(Thread* self); static void om_release(Thread* self, ObjectMonitor* m, bool FromPerThreadAlloc); static void om_flush(Thread* self); // Inflate light weight monitor to heavy weight monitor ! static ObjectMonitor* inflate(Thread* self, oop obj, const InflateCause cause); // This version is only for internal use ! static void inflate_helper(oop obj); static const char* inflate_cause_name(const InflateCause cause); // Returns the identity hash value for an oop // NOTE: It may cause monitor inflation static intptr_t identity_hash_value_for(Handle obj); --- 101,120 ---- // with original recursion count static intx complete_exit(Handle obj, TRAPS); static void reenter (Handle obj, intx recursions, TRAPS); // thread-specific and global ObjectMonitor free list accessors ! static ObjectMonitor* om_alloc(Thread* self, const InflateCause cause); static void om_release(Thread* self, ObjectMonitor* m, bool FromPerThreadAlloc); static void om_flush(Thread* self); // Inflate light weight monitor to heavy weight monitor ! static void inflate(ObjectMonitorHandle* omh_p, Thread* self, oop obj, ! const InflateCause cause); // This version is only for internal use ! static void inflate_helper(ObjectMonitorHandle* omh_p, oop obj); static const char* inflate_cause_name(const InflateCause cause); // Returns the identity hash value for an oop // NOTE: It may cause monitor inflation static intptr_t identity_hash_value_for(Handle obj);
*** 124,188 **** // GC: we current use aggressive monitor deflation policy // Basically we deflate all monitors that are not busy. // An adaptive profile-based deflation policy could be used if needed static void deflate_idle_monitors(DeflateMonitorCounters* counters); static void deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters); static void prepare_deflate_idle_monitors(DeflateMonitorCounters* counters); static void finish_deflate_idle_monitors(DeflateMonitorCounters* counters); // For a given monitor list: global or per-thread, deflate idle monitors static int deflate_monitor_list(ObjectMonitor** list_p, ObjectMonitor** free_head_p, ObjectMonitor** free_tail_p); static bool deflate_monitor(ObjectMonitor* mid, oop obj, ObjectMonitor** free_head_p, ObjectMonitor** free_tail_p); ! static bool is_cleanup_needed(); static bool needs_monitor_scavenge(); static void oops_do(OopClosure* f); // Process oops in thread local used monitors static void thread_local_used_oops_do(Thread* thread, OopClosure* f); // debugging static void audit_and_print_stats(bool on_exit); static void chk_free_entry(JavaThread* jt, ObjectMonitor* n, outputStream * out, int *error_cnt_p); static void chk_global_free_list_and_count(outputStream * out, int *error_cnt_p); static void chk_global_in_use_list_and_count(outputStream * out, int *error_cnt_p); static void chk_in_use_entry(JavaThread* jt, ObjectMonitor* n, outputStream * out, int *error_cnt_p); static void chk_per_thread_in_use_list_and_count(JavaThread *jt, outputStream * out, int *error_cnt_p); static void chk_per_thread_free_list_and_count(JavaThread *jt, outputStream * out, int *error_cnt_p); ! static void log_in_use_monitor_details(outputStream * out, bool on_exit); static int log_monitor_list_counts(outputStream * out); static int verify_objmon_isinpool(ObjectMonitor *addr) PRODUCT_RETURN0; private: friend class SynchronizerTest; enum { _BLOCKSIZE = 128 }; // global list of blocks of monitors ! static PaddedObjectMonitor* volatile g_block_list; ! // global monitor free list ! static ObjectMonitor* volatile g_free_list; ! // global monitor in-use list, for moribund threads, ! // monitors they inflated need to be scanned for deflation ! static ObjectMonitor* volatile g_om_in_use_list; ! // count of entries in g_om_in_use_list ! static int g_om_in_use_count; // Process oops in all global used monitors (i.e. moribund thread's monitors) static void global_used_oops_do(OopClosure* f); // Process oops in monitors on the given list ! static void list_oops_do(ObjectMonitor* list, OopClosure* f); // Support for SynchronizerTest access to GVars fields: static u_char* get_gvars_addr(); static u_char* get_gvars_hc_sequence_addr(); static size_t get_gvars_size(); --- 132,220 ---- // GC: we current use aggressive monitor deflation policy // Basically we deflate all monitors that are not busy. // An adaptive profile-based deflation policy could be used if needed static void deflate_idle_monitors(DeflateMonitorCounters* counters); + static void deflate_idle_monitors_using_JT(); + static void deflate_global_idle_monitors_using_JT(); + static void deflate_per_thread_idle_monitors_using_JT(JavaThread* target); + static void deflate_common_idle_monitors_using_JT(bool is_global, JavaThread* target); static void deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters); static void prepare_deflate_idle_monitors(DeflateMonitorCounters* counters); static void finish_deflate_idle_monitors(DeflateMonitorCounters* counters); // For a given monitor list: global or per-thread, deflate idle monitors static int deflate_monitor_list(ObjectMonitor** list_p, + int* count_p, ObjectMonitor** free_head_p, ObjectMonitor** free_tail_p); + // For a given in-use monitor list: global or per-thread, deflate idle + // monitors using a JavaThread. + static int deflate_monitor_list_using_JT(ObjectMonitor** list_p, + int* count_p, + ObjectMonitor** free_head_p, + ObjectMonitor** free_tail_p, + ObjectMonitor** saved_mid_in_use_p); static bool deflate_monitor(ObjectMonitor* mid, oop obj, ObjectMonitor** free_head_p, ObjectMonitor** free_tail_p); ! static bool deflate_monitor_using_JT(ObjectMonitor* mid, ! ObjectMonitor** free_head_p, ! ObjectMonitor** free_tail_p); ! static bool is_async_deflation_needed(); static bool needs_monitor_scavenge(); + static bool is_safepoint_deflation_needed(); + static bool is_async_deflation_requested() { return _is_async_deflation_requested; } + static bool is_special_deflation_requested() { return _is_special_deflation_requested; } + static void set_is_async_deflation_requested(bool new_value) { _is_async_deflation_requested = new_value; } + static void set_is_special_deflation_requested(bool new_value) { _is_special_deflation_requested = new_value; } + static jlong time_since_last_async_deflation_ms(); static void oops_do(OopClosure* f); // Process oops in thread local used monitors static void thread_local_used_oops_do(Thread* thread, OopClosure* f); // debugging static void audit_and_print_stats(bool on_exit); static void chk_free_entry(JavaThread* jt, ObjectMonitor* n, outputStream * out, int *error_cnt_p); static void chk_global_free_list_and_count(outputStream * out, int *error_cnt_p); + static void chk_global_wait_list_and_count(outputStream * out, + int *error_cnt_p); static void chk_global_in_use_list_and_count(outputStream * out, int *error_cnt_p); static void chk_in_use_entry(JavaThread* jt, ObjectMonitor* n, outputStream * out, int *error_cnt_p); static void chk_per_thread_in_use_list_and_count(JavaThread *jt, outputStream * out, int *error_cnt_p); static void chk_per_thread_free_list_and_count(JavaThread *jt, outputStream * out, int *error_cnt_p); ! static void log_in_use_monitor_details(outputStream * out); static int log_monitor_list_counts(outputStream * out); static int verify_objmon_isinpool(ObjectMonitor *addr) PRODUCT_RETURN0; + static void do_safepoint_work(DeflateMonitorCounters* counters); + private: friend class SynchronizerTest; enum { _BLOCKSIZE = 128 }; // global list of blocks of monitors ! static PaddedObjectMonitor* g_block_list; ! static volatile bool _is_async_deflation_requested; ! static volatile bool _is_special_deflation_requested; ! static jlong _last_async_deflation_time_ns; ! ! // Function to prepend new blocks to the appropriate lists: ! static void prepend_block_to_lists(PaddedObjectMonitor* new_blk); // Process oops in all global used monitors (i.e. moribund thread's monitors) static void global_used_oops_do(OopClosure* f); // Process oops in monitors on the given list ! static void list_oops_do(ObjectMonitor* list, int count, OopClosure* f); // Support for SynchronizerTest access to GVars fields: static u_char* get_gvars_addr(); static u_char* get_gvars_hc_sequence_addr(); static size_t get_gvars_size();
< prev index next >