36 #include "runtime/osThread.hpp"
37 #include "runtime/stubRoutines.hpp"
38 #include "runtime/synchronizer.hpp"
39 #include "runtime/thread.inline.hpp"
40 #include "utilities/dtrace.hpp"
41 #include "utilities/events.hpp"
42 #include "utilities/preserveException.hpp"
43 #ifdef TARGET_OS_FAMILY_linux
44 # include "os_linux.inline.hpp"
45 #endif
46 #ifdef TARGET_OS_FAMILY_solaris
47 # include "os_solaris.inline.hpp"
48 #endif
49 #ifdef TARGET_OS_FAMILY_windows
50 # include "os_windows.inline.hpp"
51 #endif
52 #ifdef TARGET_OS_FAMILY_bsd
53 # include "os_bsd.inline.hpp"
54 #endif
55
56 #if defined(__GNUC__)
57 // Need to inhibit inlining for older versions of GCC to avoid build-time failures
58 #define ATTR __attribute__((noinline))
59 #else
60 #define ATTR
61 #endif
62
63 // The "core" versions of monitor enter and exit reside in this file.
64 // The interpreter and compilers contain specialized transliterated
65 // variants of the enter-exit fast-path operations. See i486.ad fast_lock(),
66 // for instance. If you make changes here, make sure to modify the
67 // interpreter, and both C1 and C2 fast-path inline locking code emission.
68 //
69 //
70 // -----------------------------------------------------------------------------
71
72 #ifdef DTRACE_ENABLED
73
74 // Only bother with this argument setup if dtrace is available
75 // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly.
76
77 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \
78 char* bytes = NULL; \
79 int len = 0; \
80 jlong jtid = SharedRuntime::get_java_tid(thread); \
81 Symbol* klassname = ((oop)(obj))->klass()->name(); \
82 if (klassname != NULL) { \
924 ::fflush(stdout) ;
925 }
926 }
927 }
928 /* Too slow for general assert or debug
929 void ObjectSynchronizer::verifyInUse (Thread *Self) {
930 ObjectMonitor* mid;
931 int inusetally = 0;
932 for (mid = Self->omInUseList; mid != NULL; mid = mid->FreeNext) {
933 inusetally ++;
934 }
935 assert(inusetally == Self->omInUseCount, "inuse count off");
936
937 int freetally = 0;
938 for (mid = Self->omFreeList; mid != NULL; mid = mid->FreeNext) {
939 freetally ++;
940 }
941 assert(freetally == Self->omFreeCount, "free count off");
942 }
943 */
944 ObjectMonitor * ATTR ObjectSynchronizer::omAlloc (Thread * Self) {
945 // A large MAXPRIVATE value reduces both list lock contention
946 // and list coherency traffic, but also tends to increase the
947 // number of objectMonitors in circulation as well as the STW
948 // scavenge costs. As usual, we lean toward time in space-time
949 // tradeoffs.
950 const int MAXPRIVATE = 1024 ;
951 for (;;) {
952 ObjectMonitor * m ;
953
954 // 1: try to allocate from the thread's local omFreeList.
955 // Threads will attempt to allocate first from their local list, then
956 // from the global list, and only after those attempts fail will the thread
957 // attempt to instantiate new monitors. Thread-local free lists take
958 // heat off the ListLock and improve allocation latency, as well as reducing
959 // coherency traffic on the shared global list.
960 m = Self->omFreeList ;
961 if (m != NULL) {
962 Self->omFreeList = m->FreeNext ;
963 Self->omFreeCount -- ;
964 // CONSIDER: set m->FreeNext = BAD -- diagnostic hygiene
1172 Thread::muxRelease (&ListLock) ;
1173 TEVENT (omFlush) ;
1174 }
1175
1176 // Fast path code shared by multiple functions
1177 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) {
1178 markOop mark = obj->mark();
1179 if (mark->has_monitor()) {
1180 assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid");
1181 assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header");
1182 return mark->monitor();
1183 }
1184 return ObjectSynchronizer::inflate(Thread::current(), obj);
1185 }
1186
1187
1188 // Note that we could encounter some performance loss through false-sharing as
1189 // multiple locks occupy the same $ line. Padding might be appropriate.
1190
1191
1192 ObjectMonitor * ATTR ObjectSynchronizer::inflate (Thread * Self, oop object) {
1193 // Inflate mutates the heap ...
1194 // Relaxing assertion for bug 6320749.
1195 assert (Universe::verify_in_progress() ||
1196 !SafepointSynchronize::is_at_safepoint(), "invariant") ;
1197
1198 for (;;) {
1199 const markOop mark = object->mark() ;
1200 assert (!mark->has_bias_pattern(), "invariant") ;
1201
1202 // The mark can be in one of the following states:
1203 // * Inflated - just return
1204 // * Stack-locked - coerce it to inflated
1205 // * INFLATING - busy wait for conversion to complete
1206 // * Neutral - aggressively inflate the object.
1207 // * BIASED - Illegal. We should never see this
1208
1209 // CASE: inflated
1210 if (mark->has_monitor()) {
1211 ObjectMonitor * inf = mark->monitor() ;
1212 assert (inf->header()->is_neutral(), "invariant");
|
36 #include "runtime/osThread.hpp"
37 #include "runtime/stubRoutines.hpp"
38 #include "runtime/synchronizer.hpp"
39 #include "runtime/thread.inline.hpp"
40 #include "utilities/dtrace.hpp"
41 #include "utilities/events.hpp"
42 #include "utilities/preserveException.hpp"
43 #ifdef TARGET_OS_FAMILY_linux
44 # include "os_linux.inline.hpp"
45 #endif
46 #ifdef TARGET_OS_FAMILY_solaris
47 # include "os_solaris.inline.hpp"
48 #endif
49 #ifdef TARGET_OS_FAMILY_windows
50 # include "os_windows.inline.hpp"
51 #endif
52 #ifdef TARGET_OS_FAMILY_bsd
53 # include "os_bsd.inline.hpp"
54 #endif
55
56 // The "core" versions of monitor enter and exit reside in this file.
57 // The interpreter and compilers contain specialized transliterated
58 // variants of the enter-exit fast-path operations. See i486.ad fast_lock(),
59 // for instance. If you make changes here, make sure to modify the
60 // interpreter, and both C1 and C2 fast-path inline locking code emission.
61 //
62 //
63 // -----------------------------------------------------------------------------
64
65 #ifdef DTRACE_ENABLED
66
67 // Only bother with this argument setup if dtrace is available
68 // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly.
69
70 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \
71 char* bytes = NULL; \
72 int len = 0; \
73 jlong jtid = SharedRuntime::get_java_tid(thread); \
74 Symbol* klassname = ((oop)(obj))->klass()->name(); \
75 if (klassname != NULL) { \
917 ::fflush(stdout) ;
918 }
919 }
920 }
921 /* Too slow for general assert or debug
922 void ObjectSynchronizer::verifyInUse (Thread *Self) {
923 ObjectMonitor* mid;
924 int inusetally = 0;
925 for (mid = Self->omInUseList; mid != NULL; mid = mid->FreeNext) {
926 inusetally ++;
927 }
928 assert(inusetally == Self->omInUseCount, "inuse count off");
929
930 int freetally = 0;
931 for (mid = Self->omFreeList; mid != NULL; mid = mid->FreeNext) {
932 freetally ++;
933 }
934 assert(freetally == Self->omFreeCount, "free count off");
935 }
936 */
937 ObjectMonitor * ObjectSynchronizer::omAlloc (Thread * Self) {
938 // A large MAXPRIVATE value reduces both list lock contention
939 // and list coherency traffic, but also tends to increase the
940 // number of objectMonitors in circulation as well as the STW
941 // scavenge costs. As usual, we lean toward time in space-time
942 // tradeoffs.
943 const int MAXPRIVATE = 1024 ;
944 for (;;) {
945 ObjectMonitor * m ;
946
947 // 1: try to allocate from the thread's local omFreeList.
948 // Threads will attempt to allocate first from their local list, then
949 // from the global list, and only after those attempts fail will the thread
950 // attempt to instantiate new monitors. Thread-local free lists take
951 // heat off the ListLock and improve allocation latency, as well as reducing
952 // coherency traffic on the shared global list.
953 m = Self->omFreeList ;
954 if (m != NULL) {
955 Self->omFreeList = m->FreeNext ;
956 Self->omFreeCount -- ;
957 // CONSIDER: set m->FreeNext = BAD -- diagnostic hygiene
1165 Thread::muxRelease (&ListLock) ;
1166 TEVENT (omFlush) ;
1167 }
1168
1169 // Fast path code shared by multiple functions
1170 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) {
1171 markOop mark = obj->mark();
1172 if (mark->has_monitor()) {
1173 assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid");
1174 assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header");
1175 return mark->monitor();
1176 }
1177 return ObjectSynchronizer::inflate(Thread::current(), obj);
1178 }
1179
1180
1181 // Note that we could encounter some performance loss through false-sharing as
1182 // multiple locks occupy the same $ line. Padding might be appropriate.
1183
1184
1185 ObjectMonitor * ObjectSynchronizer::inflate (Thread * Self, oop object) {
1186 // Inflate mutates the heap ...
1187 // Relaxing assertion for bug 6320749.
1188 assert (Universe::verify_in_progress() ||
1189 !SafepointSynchronize::is_at_safepoint(), "invariant") ;
1190
1191 for (;;) {
1192 const markOop mark = object->mark() ;
1193 assert (!mark->has_bias_pattern(), "invariant") ;
1194
1195 // The mark can be in one of the following states:
1196 // * Inflated - just return
1197 // * Stack-locked - coerce it to inflated
1198 // * INFLATING - busy wait for conversion to complete
1199 // * Neutral - aggressively inflate the object.
1200 // * BIASED - Illegal. We should never see this
1201
1202 // CASE: inflated
1203 if (mark->has_monitor()) {
1204 ObjectMonitor * inf = mark->monitor() ;
1205 assert (inf->header()->is_neutral(), "invariant");
|