--- old/src/hotspot/share/oops/markOop.hpp 2019-03-11 14:26:21.822354939 +0100 +++ new/src/hotspot/share/oops/markOop.hpp 2019-03-11 14:26:21.614354941 +0100 @@ -38,6 +38,7 @@ // -------- // hash:25 ------------>| age:4 biased_lock:1 lock:2 (normal object) // JavaThread*:23 epoch:2 age:4 biased_lock:1 lock:2 (biased object) +// "1" :23 epoch:2 age:4 biased_lock:1 lock:2 (biased always locked object) // size:32 ------------------------------------------>| (CMS free block) // PromotedObject*:29 ---------->| promo_bits:3 ----->| (CMS promoted object) // @@ -45,6 +46,7 @@ // -------- // unused:25 hash:31 -->| unused:1 age:4 biased_lock:1 lock:2 (normal object) // JavaThread*:54 epoch:2 unused:1 age:4 biased_lock:1 lock:2 (biased object) +// "1" :54 epoch:2 unused:1 age:4 biased_lock:1 lock:2 (biased always locked object) // PromotedObject*:61 --------------------->| promo_bits:3 ----->| (CMS promoted object) // size:64 ----------------------------------------------------->| (CMS free block) // @@ -96,6 +98,18 @@ // not valid at any other time // // We assume that stack/thread pointers have the lowest two bits cleared. +// +// Always locked: since displaced and monitor references require memory at a +// fixed address, and hash code can be displaced, an efficiently providing a +// *permanent lock* leaves us with specializing the biased pattern (even when +// biased locking isn't enabled). Since biased_lock_alignment for the thread +// reference doesn't use the lowest bit ("2 << thread_shift"), we can use +// this illegal thread pointer alignment to denote "always locked" pattern. +// +// [ | larval |1| epoch | age | 1 | 01] permanently locked +// +// A private buffered value is always locked and can be in a larval state. +// class BasicLock; class ObjectMonitor; @@ -114,7 +128,9 @@ max_hash_bits = BitsPerWord - age_bits - lock_bits - biased_lock_bits, hash_bits = max_hash_bits > 31 ? 31 : max_hash_bits, cms_bits = LP64_ONLY(1) NOT_LP64(0), - epoch_bits = 2 + epoch_bits = 2, + always_locked_bits = 1, + larval_bits = 1 }; // The biased locking code currently requires that the age bits be @@ -124,7 +140,9 @@ age_shift = lock_bits + biased_lock_bits, cms_shift = age_shift + age_bits, hash_shift = cms_shift + cms_bits, - epoch_shift = hash_shift + epoch_shift = hash_shift, + thread_shift = epoch_shift + epoch_bits, + larval_shift = thread_shift + always_locked_bits }; enum { lock_mask = right_n_bits(lock_bits), @@ -137,15 +155,17 @@ epoch_mask = right_n_bits(epoch_bits), epoch_mask_in_place = epoch_mask << epoch_shift, cms_mask = right_n_bits(cms_bits), - cms_mask_in_place = cms_mask << cms_shift + cms_mask_in_place = cms_mask << cms_shift, #ifndef _WIN64 - ,hash_mask = right_n_bits(hash_bits), - hash_mask_in_place = (address_word)hash_mask << hash_shift + hash_mask = right_n_bits(hash_bits), + hash_mask_in_place = (address_word)hash_mask << hash_shift, #endif + larval_mask = right_n_bits(larval_bits), + larval_mask_in_place = larval_mask << larval_shift }; // Alignment of JavaThread pointers encoded in object header required by biased locking - enum { biased_lock_alignment = 2 << (epoch_shift + epoch_bits) + enum { biased_lock_alignment = 2 << thread_shift }; #ifdef _WIN64 @@ -159,7 +179,8 @@ unlocked_value = 1, monitor_value = 2, marked_value = 3, - biased_lock_pattern = 5 + biased_lock_pattern = 5, + always_locked_pattern = 1 << thread_shift | biased_lock_pattern }; enum { no_hash = 0 }; // no hash value assigned @@ -172,6 +193,14 @@ enum { max_bias_epoch = epoch_mask }; + enum { larval_state_pattern = (1 << larval_shift) }; + + static markOop always_locked_prototype() { + return markOop(always_locked_pattern); + } + + bool is_always_locked() const { return mask_bits(value(), always_locked_pattern) == always_locked_pattern; } + // Biased Locking accessors. // These must be checked by all code which calls into the // ObjectSynchronizer and other code. The biasing is not understood @@ -183,6 +212,7 @@ } JavaThread* biased_locker() const { assert(has_bias_pattern(), "should not call this otherwise"); + assert(!is_always_locked(), "invariant"); return (JavaThread*) ((intptr_t) (mask_bits(value(), ~(biased_lock_mask_in_place | age_mask_in_place | epoch_mask_in_place)))); } // Indicates that the mark has the bias bit set but that it has not @@ -200,6 +230,7 @@ markOop set_bias_epoch(int epoch) { assert(has_bias_pattern(), "should not call this otherwise"); assert((epoch & (~epoch_mask)) == 0, "epoch overflow"); + assert(!is_always_locked(), "Rebias needs to fail"); return markOop(mask_bits(value(), ~epoch_mask_in_place) | (epoch << epoch_shift)); } markOop incr_bias_epoch() { @@ -349,6 +380,17 @@ return hash() == no_hash; } + // private buffered value operations + markOop enter_larval_state() const { + return markOop((value() & ~larval_mask_in_place) | larval_state_pattern); + } + markOop exit_larval_state() const { + return markOop(value() & ~larval_mask_in_place); + } + bool is_larval_state() const { + return (value() & larval_mask_in_place) == larval_state_pattern; + } + // Prototype mark for initialization static markOop prototype() { return markOop( no_hash_in_place | no_lock_in_place ); @@ -364,7 +406,7 @@ inline static markOop encode_pointer_as_mark(void* p) { return markOop(p)->set_marked(); } // Recover address of oop from encoded form used in mark - inline void* decode_pointer() { if (UseBiasedLocking && has_bias_pattern()) return NULL; return clear_lock_bits(); } + inline void* decode_pointer() { if (has_bias_pattern()) return NULL; return clear_lock_bits(); } // These markOops indicate cms free chunk blocks and not objects. // In 64 bit, the markOop is set to distinguish them from oops.