26 #define SHARE_RUNTIME_SYNCHRONIZER_HPP
27
28 #include "memory/padded.hpp"
29 #include "oops/markWord.hpp"
30 #include "runtime/basicLock.hpp"
31 #include "runtime/handles.hpp"
32 #include "runtime/perfData.hpp"
33
34 class ObjectMonitor;
35 class ThreadsList;
36
37 #ifndef OM_CACHE_LINE_SIZE
38 // Use DEFAULT_CACHE_LINE_SIZE if not already specified for
39 // the current build platform.
40 #define OM_CACHE_LINE_SIZE DEFAULT_CACHE_LINE_SIZE
41 #endif
42
43 typedef PaddedEnd<ObjectMonitor, OM_CACHE_LINE_SIZE> PaddedObjectMonitor;
44
45 struct DeflateMonitorCounters {
46 int n_in_use; // currently associated with objects
47 int n_in_circulation; // extant
48 int n_scavenged; // reclaimed (global and per-thread)
49 int per_thread_scavenged; // per-thread scavenge total
50 double per_thread_times; // per-thread scavenge times
51 };
52
53 class ObjectSynchronizer : AllStatic {
54 friend class VMStructs;
55 public:
56 typedef enum {
57 owner_self,
58 owner_none,
59 owner_other
60 } LockOwnership;
61
62 typedef enum {
63 inflate_cause_vm_internal = 0,
64 inflate_cause_monitor_enter = 1,
65 inflate_cause_wait = 2,
66 inflate_cause_notify = 3,
67 inflate_cause_hash_code = 4,
68 inflate_cause_jni_enter = 5,
69 inflate_cause_jni_exit = 6,
115
116 // Returns the identity hash value for an oop
117 // NOTE: It may cause monitor inflation
118 static intptr_t identity_hash_value_for(Handle obj);
119 static intptr_t FastHashCode(Thread* self, oop obj);
120
121 // java.lang.Thread support
122 static bool current_thread_holds_lock(JavaThread* thread, Handle h_obj);
123 static LockOwnership query_lock_ownership(JavaThread* self, Handle h_obj);
124
125 static JavaThread* get_lock_owner(ThreadsList * t_list, Handle h_obj);
126
127 // JNI detach support
128 static void release_monitors_owned_by_thread(TRAPS);
129 static void monitors_iterate(MonitorClosure* m);
130
131 // GC: we current use aggressive monitor deflation policy
132 // Basically we deflate all monitors that are not busy.
133 // An adaptive profile-based deflation policy could be used if needed
134 static void deflate_idle_monitors(DeflateMonitorCounters* counters);
135 static void deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters);
136 static void prepare_deflate_idle_monitors(DeflateMonitorCounters* counters);
137 static void finish_deflate_idle_monitors(DeflateMonitorCounters* counters);
138
139 // For a given monitor list: global or per-thread, deflate idle monitors
140 static int deflate_monitor_list(ObjectMonitor** list_p,
141 int* count_p,
142 ObjectMonitor** free_head_p,
143 ObjectMonitor** free_tail_p);
144 static bool deflate_monitor(ObjectMonitor* mid, oop obj,
145 ObjectMonitor** free_head_p,
146 ObjectMonitor** free_tail_p);
147 static bool is_cleanup_needed();
148 static bool needs_monitor_scavenge();
149 static void oops_do(OopClosure* f);
150 // Process oops in thread local used monitors
151 static void thread_local_used_oops_do(Thread* thread, OopClosure* f);
152
153 // debugging
154 static void audit_and_print_stats(bool on_exit);
155 static void chk_free_entry(JavaThread* jt, ObjectMonitor* n,
156 outputStream * out, int *error_cnt_p);
157 static void chk_global_free_list_and_count(outputStream * out,
158 int *error_cnt_p);
159 static void chk_global_in_use_list_and_count(outputStream * out,
160 int *error_cnt_p);
161 static void chk_in_use_entry(JavaThread* jt, ObjectMonitor* n,
162 outputStream * out, int *error_cnt_p);
163 static void chk_per_thread_in_use_list_and_count(JavaThread *jt,
164 outputStream * out,
165 int *error_cnt_p);
166 static void chk_per_thread_free_list_and_count(JavaThread *jt,
167 outputStream * out,
168 int *error_cnt_p);
169 static void log_in_use_monitor_details(outputStream * out);
170 static int log_monitor_list_counts(outputStream * out);
171 static int verify_objmon_isinpool(ObjectMonitor *addr) PRODUCT_RETURN0;
172
173 private:
174 friend class SynchronizerTest;
175
176 enum { _BLOCKSIZE = 128 };
177 // global list of blocks of monitors
178 static PaddedObjectMonitor* g_block_list;
179
180 // Function to prepend new blocks to the appropriate lists:
181 static void prepend_block_to_lists(PaddedObjectMonitor* new_blk);
182
183 // Process oops in all global used monitors (i.e. moribund thread's monitors)
184 static void global_used_oops_do(OopClosure* f);
185 // Process oops in monitors on the given list
186 static void list_oops_do(ObjectMonitor* list, OopClosure* f);
187
188 // Support for SynchronizerTest access to GVars fields:
189 static u_char* get_gvars_addr();
190 static u_char* get_gvars_hc_sequence_addr();
191 static size_t get_gvars_size();
192 static u_char* get_gvars_stw_random_addr();
193 };
194
195 // ObjectLocker enforces balanced locking and can never throw an
196 // IllegalMonitorStateException. However, a pending exception may
197 // have to pass through, and we must also be able to deal with
198 // asynchronous exceptions. The caller is responsible for checking
|
26 #define SHARE_RUNTIME_SYNCHRONIZER_HPP
27
28 #include "memory/padded.hpp"
29 #include "oops/markWord.hpp"
30 #include "runtime/basicLock.hpp"
31 #include "runtime/handles.hpp"
32 #include "runtime/perfData.hpp"
33
34 class ObjectMonitor;
35 class ThreadsList;
36
37 #ifndef OM_CACHE_LINE_SIZE
38 // Use DEFAULT_CACHE_LINE_SIZE if not already specified for
39 // the current build platform.
40 #define OM_CACHE_LINE_SIZE DEFAULT_CACHE_LINE_SIZE
41 #endif
42
43 typedef PaddedEnd<ObjectMonitor, OM_CACHE_LINE_SIZE> PaddedObjectMonitor;
44
45 struct DeflateMonitorCounters {
46 volatile int n_in_use; // currently associated with objects
47 volatile int n_in_circulation; // extant
48 volatile int n_scavenged; // reclaimed (global and per-thread)
49 volatile int per_thread_scavenged; // per-thread scavenge total
50 double per_thread_times; // per-thread scavenge times
51 };
52
53 class ObjectSynchronizer : AllStatic {
54 friend class VMStructs;
55 public:
56 typedef enum {
57 owner_self,
58 owner_none,
59 owner_other
60 } LockOwnership;
61
62 typedef enum {
63 inflate_cause_vm_internal = 0,
64 inflate_cause_monitor_enter = 1,
65 inflate_cause_wait = 2,
66 inflate_cause_notify = 3,
67 inflate_cause_hash_code = 4,
68 inflate_cause_jni_enter = 5,
69 inflate_cause_jni_exit = 6,
115
116 // Returns the identity hash value for an oop
117 // NOTE: It may cause monitor inflation
118 static intptr_t identity_hash_value_for(Handle obj);
119 static intptr_t FastHashCode(Thread* self, oop obj);
120
121 // java.lang.Thread support
122 static bool current_thread_holds_lock(JavaThread* thread, Handle h_obj);
123 static LockOwnership query_lock_ownership(JavaThread* self, Handle h_obj);
124
125 static JavaThread* get_lock_owner(ThreadsList * t_list, Handle h_obj);
126
127 // JNI detach support
128 static void release_monitors_owned_by_thread(TRAPS);
129 static void monitors_iterate(MonitorClosure* m);
130
131 // GC: we current use aggressive monitor deflation policy
132 // Basically we deflate all monitors that are not busy.
133 // An adaptive profile-based deflation policy could be used if needed
134 static void deflate_idle_monitors(DeflateMonitorCounters* counters);
135 static void deflate_idle_monitors_using_JT();
136 static void deflate_global_idle_monitors_using_JT();
137 static void deflate_per_thread_idle_monitors_using_JT(JavaThread* target);
138 static void deflate_common_idle_monitors_using_JT(bool is_global, JavaThread* target);
139 static void deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters);
140 static void prepare_deflate_idle_monitors(DeflateMonitorCounters* counters);
141 static void finish_deflate_idle_monitors(DeflateMonitorCounters* counters);
142
143 // For a given monitor list: global or per-thread, deflate idle monitors
144 static int deflate_monitor_list(ObjectMonitor** list_p,
145 int* count_p,
146 ObjectMonitor** free_head_p,
147 ObjectMonitor** free_tail_p);
148 // For a given in-use monitor list: global or per-thread, deflate idle
149 // monitors using a JavaThread.
150 static int deflate_monitor_list_using_JT(ObjectMonitor** list_p,
151 int* count_p,
152 ObjectMonitor** free_head_p,
153 ObjectMonitor** free_tail_p,
154 ObjectMonitor** saved_mid_in_use_p);
155 static bool deflate_monitor(ObjectMonitor* mid, oop obj,
156 ObjectMonitor** free_head_p,
157 ObjectMonitor** free_tail_p);
158 static bool deflate_monitor_using_JT(ObjectMonitor* mid,
159 ObjectMonitor** free_head_p,
160 ObjectMonitor** free_tail_p);
161 static bool is_async_deflation_needed();
162 static bool needs_monitor_scavenge();
163 static bool is_safepoint_deflation_needed();
164 static bool is_async_deflation_requested() { return _is_async_deflation_requested; }
165 static bool is_special_deflation_requested() { return _is_special_deflation_requested; }
166 static void set_is_async_deflation_requested(bool new_value) { _is_async_deflation_requested = new_value; }
167 static void set_is_special_deflation_requested(bool new_value) { _is_special_deflation_requested = new_value; }
168 static jlong time_since_last_async_deflation_ms();
169 static void oops_do(OopClosure* f);
170 // Process oops in thread local used monitors
171 static void thread_local_used_oops_do(Thread* thread, OopClosure* f);
172
173 // debugging
174 static void audit_and_print_stats(bool on_exit);
175 static void chk_free_entry(JavaThread* jt, ObjectMonitor* n,
176 outputStream * out, int *error_cnt_p);
177 static void chk_global_free_list_and_count(outputStream * out,
178 int *error_cnt_p);
179 static void chk_global_wait_list_and_count(outputStream * out,
180 int *error_cnt_p);
181 static void chk_global_in_use_list_and_count(outputStream * out,
182 int *error_cnt_p);
183 static void chk_in_use_entry(JavaThread* jt, ObjectMonitor* n,
184 outputStream * out, int *error_cnt_p);
185 static void chk_per_thread_in_use_list_and_count(JavaThread *jt,
186 outputStream * out,
187 int *error_cnt_p);
188 static void chk_per_thread_free_list_and_count(JavaThread *jt,
189 outputStream * out,
190 int *error_cnt_p);
191 static void log_in_use_monitor_details(outputStream * out);
192 static int log_monitor_list_counts(outputStream * out);
193 static int verify_objmon_isinpool(ObjectMonitor *addr) PRODUCT_RETURN0;
194
195 static void do_safepoint_work(DeflateMonitorCounters* counters);
196
197 private:
198 friend class SynchronizerTest;
199
200 enum { _BLOCKSIZE = 128 };
201 // global list of blocks of monitors
202 static PaddedObjectMonitor* g_block_list;
203 static volatile bool _is_async_deflation_requested;
204 static volatile bool _is_special_deflation_requested;
205 static jlong _last_async_deflation_time_ns;
206
207 // Function to prepend new blocks to the appropriate lists:
208 static void prepend_block_to_lists(PaddedObjectMonitor* new_blk);
209
210 // Process oops in all global used monitors (i.e. moribund thread's monitors)
211 static void global_used_oops_do(OopClosure* f);
212 // Process oops in monitors on the given list
213 static void list_oops_do(ObjectMonitor* list, OopClosure* f);
214
215 // Support for SynchronizerTest access to GVars fields:
216 static u_char* get_gvars_addr();
217 static u_char* get_gvars_hc_sequence_addr();
218 static size_t get_gvars_size();
219 static u_char* get_gvars_stw_random_addr();
220 };
221
222 // ObjectLocker enforces balanced locking and can never throw an
223 // IllegalMonitorStateException. However, a pending exception may
224 // have to pass through, and we must also be able to deal with
225 // asynchronous exceptions. The caller is responsible for checking
|