50 //
51 // Note that we're *not* using word-tearing the classic sense.
52 // The lock() fast-path will CAS the lockword and the unlock()
53 // fast-path will store into the lock-byte colocated within the lockword.
54 // We depend on the fact that all our reference platforms have
55 // coherent and atomic byte accesses. More precisely, byte stores
56 // interoperate in a safe, sane, and expected manner with respect to
57 // CAS, ST and LDs to the full-word containing the byte.
58 // If you're porting HotSpot to a platform where that isn't the case
59 // then you'll want change the unlock() fast path from:
60 // STB;MEMBAR #storeload; LDN
61 // to a full-word CAS of the lockword.
62
63
64 union SplitWord { // full-word with separately addressable LSB
65 volatile intptr_t FullWord ;
66 volatile void * Address ;
67 volatile jbyte Bytes [sizeof(intptr_t)] ;
68 } ;
69
70 // Endian-ness ... index of least-significant byte in SplitWord.Bytes[]
71 #ifdef VM_LITTLE_ENDIAN
72 #define _LSBINDEX 0
73 #else
74 #define _LSBINDEX (sizeof(intptr_t)-1)
75 #endif
76
77 class ParkEvent ;
78
79 // See orderAccess.hpp. We assume throughout the VM that mutex lock and
80 // try_lock do fence-lock-acquire, and that unlock does a release-unlock,
81 // *in that order*. If their implementations change such that these
82 // assumptions are violated, a whole lot of code will break.
83
84 // The default length of monitor name was originally chosen to be 64 to avoid
85 // false sharing. Now, PaddedMonitor is available for this purpose.
86 // TODO: Check if _name[MONITOR_NAME_LEN] should better get replaced by const char*.
87 static const int MONITOR_NAME_LEN = 64;
88
89 class Monitor : public CHeapObj<mtInternal> {
90
91 public:
92 // A special lock: Is a lock where you are guaranteed not to block while you are
93 // holding it, i.e., no vm operation can happen, taking other (blocking) locks, etc.
94 // The rank 'access' is similar to 'special' and has the same restrictions on usage.
95 // It is reserved for locks that may be required in order to perform memory accesses
96 // that require special barriers, e.g. SATB GC barriers, that in turn uses locks.
111 enum lock_types {
112 event,
113 access = event + 1,
114 special = access + 2,
115 suspend_resume = special + 1,
116 leaf = suspend_resume + 2,
117 safepoint = leaf + 10,
118 barrier = safepoint + 1,
119 nonleaf = barrier + 1,
120 max_nonleaf = nonleaf + 900,
121 native = max_nonleaf + 1
122 };
123
124 // The WaitSet and EntryList linked lists are composed of ParkEvents.
125 // I use ParkEvent instead of threads as ParkEvents are immortal and
126 // type-stable, meaning we can safely unpark() a possibly stale
127 // list element in the unlock()-path.
128
129 protected: // Monitor-Mutex metadata
130 SplitWord _LockWord ; // Contention queue (cxq) colocated with Lock-byte
131 enum LockWordBits { _LBIT=1 } ;
132 Thread * volatile _owner; // The owner of the lock
133 // Consider sequestering _owner on its own $line
134 // to aid future synchronization mechanisms.
135 ParkEvent * volatile _EntryList ; // List of threads waiting for entry
136 ParkEvent * volatile _OnDeck ; // heir-presumptive
137 volatile intptr_t _WaitLock [1] ; // Protects _WaitSet
138 ParkEvent * volatile _WaitSet ; // LL of ParkEvents
139 volatile bool _snuck; // Used for sneaky locking (evil).
140 int NotifyCount ; // diagnostic assist
141 char _name[MONITOR_NAME_LEN]; // Name of mutex
142
143 // Debugging fields for naming, deadlock detection, etc. (some only used in debug mode)
144 #ifndef PRODUCT
145 bool _allow_vm_block;
146 debug_only(int _rank;) // rank (to avoid/detect potential deadlocks)
147 debug_only(Monitor * _next;) // Used by a Thread to link up owned locks
148 debug_only(Thread* _last_owner;) // the last thread to own the lock
149 debug_only(static bool contains(Monitor * locks, Monitor * lock);)
150 debug_only(static Monitor * get_least_ranked_lock(Monitor * locks);)
151 debug_only(Monitor * get_least_ranked_lock_besides_this(Monitor * locks);)
|
50 //
51 // Note that we're *not* using word-tearing the classic sense.
52 // The lock() fast-path will CAS the lockword and the unlock()
53 // fast-path will store into the lock-byte colocated within the lockword.
54 // We depend on the fact that all our reference platforms have
55 // coherent and atomic byte accesses. More precisely, byte stores
56 // interoperate in a safe, sane, and expected manner with respect to
57 // CAS, ST and LDs to the full-word containing the byte.
58 // If you're porting HotSpot to a platform where that isn't the case
59 // then you'll want change the unlock() fast path from:
60 // STB;MEMBAR #storeload; LDN
61 // to a full-word CAS of the lockword.
62
63
64 union SplitWord { // full-word with separately addressable LSB
65 volatile intptr_t FullWord ;
66 volatile void * Address ;
67 volatile jbyte Bytes [sizeof(intptr_t)] ;
68 } ;
69
70 class ParkEvent ;
71
72 // See orderAccess.hpp. We assume throughout the VM that mutex lock and
73 // try_lock do fence-lock-acquire, and that unlock does a release-unlock,
74 // *in that order*. If their implementations change such that these
75 // assumptions are violated, a whole lot of code will break.
76
77 // The default length of monitor name was originally chosen to be 64 to avoid
78 // false sharing. Now, PaddedMonitor is available for this purpose.
79 // TODO: Check if _name[MONITOR_NAME_LEN] should better get replaced by const char*.
80 static const int MONITOR_NAME_LEN = 64;
81
82 class Monitor : public CHeapObj<mtInternal> {
83
84 public:
85 // A special lock: Is a lock where you are guaranteed not to block while you are
86 // holding it, i.e., no vm operation can happen, taking other (blocking) locks, etc.
87 // The rank 'access' is similar to 'special' and has the same restrictions on usage.
88 // It is reserved for locks that may be required in order to perform memory accesses
89 // that require special barriers, e.g. SATB GC barriers, that in turn uses locks.
104 enum lock_types {
105 event,
106 access = event + 1,
107 special = access + 2,
108 suspend_resume = special + 1,
109 leaf = suspend_resume + 2,
110 safepoint = leaf + 10,
111 barrier = safepoint + 1,
112 nonleaf = barrier + 1,
113 max_nonleaf = nonleaf + 900,
114 native = max_nonleaf + 1
115 };
116
117 // The WaitSet and EntryList linked lists are composed of ParkEvents.
118 // I use ParkEvent instead of threads as ParkEvents are immortal and
119 // type-stable, meaning we can safely unpark() a possibly stale
120 // list element in the unlock()-path.
121
122 protected: // Monitor-Mutex metadata
123 SplitWord _LockWord ; // Contention queue (cxq) colocated with Lock-byte
124 Thread * volatile _owner; // The owner of the lock
125 // Consider sequestering _owner on its own $line
126 // to aid future synchronization mechanisms.
127 ParkEvent * volatile _EntryList ; // List of threads waiting for entry
128 ParkEvent * volatile _OnDeck ; // heir-presumptive
129 volatile intptr_t _WaitLock [1] ; // Protects _WaitSet
130 ParkEvent * volatile _WaitSet ; // LL of ParkEvents
131 volatile bool _snuck; // Used for sneaky locking (evil).
132 int NotifyCount ; // diagnostic assist
133 char _name[MONITOR_NAME_LEN]; // Name of mutex
134
135 // Debugging fields for naming, deadlock detection, etc. (some only used in debug mode)
136 #ifndef PRODUCT
137 bool _allow_vm_block;
138 debug_only(int _rank;) // rank (to avoid/detect potential deadlocks)
139 debug_only(Monitor * _next;) // Used by a Thread to link up owned locks
140 debug_only(Thread* _last_owner;) // the last thread to own the lock
141 debug_only(static bool contains(Monitor * locks, Monitor * lock);)
142 debug_only(static Monitor * get_least_ranked_lock(Monitor * locks);)
143 debug_only(Monitor * get_least_ranked_lock_besides_this(Monitor * locks);)
|