91 // This scheme can not handle transfers of biases of single objects
92 // from thread to thread efficiently, but it can handle bulk transfers
93 // of such biases, which is a usage pattern showing up in some
94 // applications and benchmarks. We implement "bulk rebias" and "bulk
95 // revoke" operations using a "bias epoch" on a per-data-type basis.
96 // If too many bias revocations are occurring for a particular data
97 // type, the bias epoch for the data type is incremented at a
98 // safepoint, effectively meaning that all previous biases are
99 // invalid. The fast path locking case checks for an invalid epoch in
100 // the object header and attempts to rebias the object with a CAS if
101 // found, avoiding safepoints or bulk heap sweeps (the latter which
102 // was used in a prior version of this algorithm and did not scale
103 // well). If too many bias revocations persist, biasing is completely
104 // disabled for the data type by resetting the prototype header to the
105 // unbiased markOop. The fast-path locking code checks to see whether
106 // the instance's bias pattern differs from the prototype header's and
107 // causes the bias to be revoked without reaching a safepoint or,
108 // again, a bulk heap sweep.
109
110 // Biased locking counters
111 class BiasedLockingCounters VALUE_OBJ_CLASS_SPEC {
112 private:
113 int _total_entry_count;
114 int _biased_lock_entry_count;
115 int _anonymously_biased_lock_entry_count;
116 int _rebiased_lock_entry_count;
117 int _revoked_lock_entry_count;
118 int _fast_path_entry_count;
119 int _slow_path_entry_count;
120
121 public:
122 BiasedLockingCounters() :
123 _total_entry_count(0),
124 _biased_lock_entry_count(0),
125 _anonymously_biased_lock_entry_count(0),
126 _rebiased_lock_entry_count(0),
127 _revoked_lock_entry_count(0),
128 _fast_path_entry_count(0),
129 _slow_path_entry_count(0) {}
130
131 int slow_path_entry_count(); // Compute this field if necessary
|
91 // This scheme can not handle transfers of biases of single objects
92 // from thread to thread efficiently, but it can handle bulk transfers
93 // of such biases, which is a usage pattern showing up in some
94 // applications and benchmarks. We implement "bulk rebias" and "bulk
95 // revoke" operations using a "bias epoch" on a per-data-type basis.
96 // If too many bias revocations are occurring for a particular data
97 // type, the bias epoch for the data type is incremented at a
98 // safepoint, effectively meaning that all previous biases are
99 // invalid. The fast path locking case checks for an invalid epoch in
100 // the object header and attempts to rebias the object with a CAS if
101 // found, avoiding safepoints or bulk heap sweeps (the latter which
102 // was used in a prior version of this algorithm and did not scale
103 // well). If too many bias revocations persist, biasing is completely
104 // disabled for the data type by resetting the prototype header to the
105 // unbiased markOop. The fast-path locking code checks to see whether
106 // the instance's bias pattern differs from the prototype header's and
107 // causes the bias to be revoked without reaching a safepoint or,
108 // again, a bulk heap sweep.
109
110 // Biased locking counters
111 class BiasedLockingCounters {
112 private:
113 int _total_entry_count;
114 int _biased_lock_entry_count;
115 int _anonymously_biased_lock_entry_count;
116 int _rebiased_lock_entry_count;
117 int _revoked_lock_entry_count;
118 int _fast_path_entry_count;
119 int _slow_path_entry_count;
120
121 public:
122 BiasedLockingCounters() :
123 _total_entry_count(0),
124 _biased_lock_entry_count(0),
125 _anonymously_biased_lock_entry_count(0),
126 _rebiased_lock_entry_count(0),
127 _revoked_lock_entry_count(0),
128 _fast_path_entry_count(0),
129 _slow_path_entry_count(0) {}
130
131 int slow_path_entry_count(); // Compute this field if necessary
|