< prev index next >
src/hotspot/share/oops/markOop.hpp
Print this page
@@ -36,17 +36,19 @@
//
// 32 bits:
// --------
// hash:25 ------------>| age:4 biased_lock:1 lock:2 (normal object)
// JavaThread*:23 epoch:2 age:4 biased_lock:1 lock:2 (biased object)
+// "1" :23 epoch:2 age:4 biased_lock:1 lock:2 (biased always locked object)
// size:32 ------------------------------------------>| (CMS free block)
// PromotedObject*:29 ---------->| promo_bits:3 ----->| (CMS promoted object)
//
// 64 bits:
// --------
// unused:25 hash:31 -->| unused:1 age:4 biased_lock:1 lock:2 (normal object)
// JavaThread*:54 epoch:2 unused:1 age:4 biased_lock:1 lock:2 (biased object)
+// "1" :54 epoch:2 unused:1 age:4 biased_lock:1 lock:2 (biased always locked object)
// PromotedObject*:61 --------------------->| promo_bits:3 ----->| (CMS promoted object)
// size:64 ----------------------------------------------------->| (CMS free block)
//
// unused:25 hash:31 -->| cms_free:1 age:4 biased_lock:1 lock:2 (COOPs && normal object)
// JavaThread*:54 epoch:2 cms_free:1 age:4 biased_lock:1 lock:2 (COOPs && biased object)
@@ -94,10 +96,22 @@
// [ptr | 10] monitor inflated lock (header is wapped out)
// [ptr | 11] marked used by markSweep to mark an object
// not valid at any other time
//
// We assume that stack/thread pointers have the lowest two bits cleared.
+//
+// Always locked: since displaced and monitor references require memory at a
+// fixed address, and hash code can be displaced, an efficiently providing a
+// *permanent lock* leaves us with specializing the biased pattern (even when
+// biased locking isn't enabled). Since biased_lock_alignment for the thread
+// reference doesn't use the lowest bit ("2 << thread_shift"), we can use
+// this illegal thread pointer alignment to denote "always locked" pattern.
+//
+// [ <unused> | larval |1| epoch | age | 1 | 01] permanently locked
+//
+// A private buffered value is always locked and can be in a larval state.
+//
class BasicLock;
class ObjectMonitor;
class JavaThread;
@@ -112,21 +126,25 @@
lock_bits = 2,
biased_lock_bits = 1,
max_hash_bits = BitsPerWord - age_bits - lock_bits - biased_lock_bits,
hash_bits = max_hash_bits > 31 ? 31 : max_hash_bits,
cms_bits = LP64_ONLY(1) NOT_LP64(0),
- epoch_bits = 2
+ epoch_bits = 2,
+ always_locked_bits = 1,
+ larval_bits = 1
};
// The biased locking code currently requires that the age bits be
// contiguous to the lock bits.
enum { lock_shift = 0,
biased_lock_shift = lock_bits,
age_shift = lock_bits + biased_lock_bits,
cms_shift = age_shift + age_bits,
hash_shift = cms_shift + cms_bits,
- epoch_shift = hash_shift
+ epoch_shift = hash_shift,
+ thread_shift = epoch_shift + epoch_bits,
+ larval_shift = thread_shift + always_locked_bits
};
enum { lock_mask = right_n_bits(lock_bits),
lock_mask_in_place = lock_mask << lock_shift,
biased_lock_mask = right_n_bits(lock_bits + biased_lock_bits),
@@ -135,19 +153,21 @@
age_mask = right_n_bits(age_bits),
age_mask_in_place = age_mask << age_shift,
epoch_mask = right_n_bits(epoch_bits),
epoch_mask_in_place = epoch_mask << epoch_shift,
cms_mask = right_n_bits(cms_bits),
- cms_mask_in_place = cms_mask << cms_shift
+ cms_mask_in_place = cms_mask << cms_shift,
#ifndef _WIN64
- ,hash_mask = right_n_bits(hash_bits),
- hash_mask_in_place = (address_word)hash_mask << hash_shift
+ hash_mask = right_n_bits(hash_bits),
+ hash_mask_in_place = (address_word)hash_mask << hash_shift,
#endif
+ larval_mask = right_n_bits(larval_bits),
+ larval_mask_in_place = larval_mask << larval_shift
};
// Alignment of JavaThread pointers encoded in object header required by biased locking
- enum { biased_lock_alignment = 2 << (epoch_shift + epoch_bits)
+ enum { biased_lock_alignment = 2 << thread_shift
};
#ifdef _WIN64
// These values are too big for Win64
const static uintptr_t hash_mask = right_n_bits(hash_bits);
@@ -157,11 +177,12 @@
enum { locked_value = 0,
unlocked_value = 1,
monitor_value = 2,
marked_value = 3,
- biased_lock_pattern = 5
+ biased_lock_pattern = 5,
+ always_locked_pattern = 1 << thread_shift | biased_lock_pattern
};
enum { no_hash = 0 }; // no hash value assigned
enum { no_hash_in_place = (address_word)no_hash << hash_shift,
@@ -170,10 +191,18 @@
enum { max_age = age_mask };
enum { max_bias_epoch = epoch_mask };
+ enum { larval_state_pattern = (1 << larval_shift) };
+
+ static markOop always_locked_prototype() {
+ return markOop(always_locked_pattern);
+ }
+
+ bool is_always_locked() const { return mask_bits(value(), always_locked_pattern) == always_locked_pattern; }
+
// Biased Locking accessors.
// These must be checked by all code which calls into the
// ObjectSynchronizer and other code. The biasing is not understood
// by the lower-level CAS-based locking code, although the runtime
// fixes up biased locks to be compatible with it when a bias is
@@ -181,10 +210,11 @@
bool has_bias_pattern() const {
return (mask_bits(value(), biased_lock_mask_in_place) == biased_lock_pattern);
}
JavaThread* biased_locker() const {
assert(has_bias_pattern(), "should not call this otherwise");
+ assert(!is_always_locked(), "invariant");
return (JavaThread*) ((intptr_t) (mask_bits(value(), ~(biased_lock_mask_in_place | age_mask_in_place | epoch_mask_in_place))));
}
// Indicates that the mark has the bias bit set but that it has not
// yet been biased toward a particular thread
bool is_biased_anonymously() const {
@@ -198,10 +228,11 @@
return (mask_bits(value(), epoch_mask_in_place) >> epoch_shift);
}
markOop set_bias_epoch(int epoch) {
assert(has_bias_pattern(), "should not call this otherwise");
assert((epoch & (~epoch_mask)) == 0, "epoch overflow");
+ assert(!is_always_locked(), "Rebias needs to fail");
return markOop(mask_bits(value(), ~epoch_mask_in_place) | (epoch << epoch_shift));
}
markOop incr_bias_epoch() {
return set_bias_epoch((1 + bias_epoch()) & epoch_mask);
}
@@ -347,10 +378,21 @@
bool has_no_hash() const {
return hash() == no_hash;
}
+ // private buffered value operations
+ markOop enter_larval_state() const {
+ return markOop((value() & ~larval_mask_in_place) | larval_state_pattern);
+ }
+ markOop exit_larval_state() const {
+ return markOop(value() & ~larval_mask_in_place);
+ }
+ bool is_larval_state() const {
+ return (value() & larval_mask_in_place) == larval_state_pattern;
+ }
+
// Prototype mark for initialization
static markOop prototype() {
return markOop( no_hash_in_place | no_lock_in_place );
}
@@ -362,11 +404,11 @@
// Prepare address of oop for placement into mark
inline static markOop encode_pointer_as_mark(void* p) { return markOop(p)->set_marked(); }
// Recover address of oop from encoded form used in mark
- inline void* decode_pointer() { if (UseBiasedLocking && has_bias_pattern()) return NULL; return clear_lock_bits(); }
+ inline void* decode_pointer() { if (has_bias_pattern()) return NULL; return clear_lock_bits(); }
// These markOops indicate cms free chunk blocks and not objects.
// In 64 bit, the markOop is set to distinguish them from oops.
// These are defined in 32 bit mode for vmStructs.
const static uintptr_t cms_free_chunk_pattern = 0x1;
< prev index next >