< prev index next >

src/hotspot/share/oops/markOop.hpp

Print this page

        

*** 36,52 **** --- 36,54 ---- // // 32 bits: // -------- // hash:25 ------------>| age:4 biased_lock:1 lock:2 (normal object) // JavaThread*:23 epoch:2 age:4 biased_lock:1 lock:2 (biased object) + // "1" :23 epoch:2 age:4 biased_lock:1 lock:2 (biased always locked object) // size:32 ------------------------------------------>| (CMS free block) // PromotedObject*:29 ---------->| promo_bits:3 ----->| (CMS promoted object) // // 64 bits: // -------- // unused:25 hash:31 -->| unused:1 age:4 biased_lock:1 lock:2 (normal object) // JavaThread*:54 epoch:2 unused:1 age:4 biased_lock:1 lock:2 (biased object) + // "1" :54 epoch:2 unused:1 age:4 biased_lock:1 lock:2 (biased always locked object) // PromotedObject*:61 --------------------->| promo_bits:3 ----->| (CMS promoted object) // size:64 ----------------------------------------------------->| (CMS free block) // // unused:25 hash:31 -->| cms_free:1 age:4 biased_lock:1 lock:2 (COOPs && normal object) // JavaThread*:54 epoch:2 cms_free:1 age:4 biased_lock:1 lock:2 (COOPs && biased object)
*** 94,103 **** --- 96,117 ---- // [ptr | 10] monitor inflated lock (header is wapped out) // [ptr | 11] marked used by markSweep to mark an object // not valid at any other time // // We assume that stack/thread pointers have the lowest two bits cleared. + // + // Always locked: since displaced and monitor references require memory at a + // fixed address, and hash code can be displaced, an efficiently providing a + // *permanent lock* leaves us with specializing the biased pattern (even when + // biased locking isn't enabled). Since biased_lock_alignment for the thread + // reference doesn't use the lowest bit ("2 << thread_shift"), we can use + // this illegal thread pointer alignment to denote "always locked" pattern. + // + // [ <unused> | larval |1| epoch | age | 1 | 01] permanently locked + // + // A private buffered value is always locked and can be in a larval state. + // class BasicLock; class ObjectMonitor; class JavaThread;
*** 112,132 **** lock_bits = 2, biased_lock_bits = 1, max_hash_bits = BitsPerWord - age_bits - lock_bits - biased_lock_bits, hash_bits = max_hash_bits > 31 ? 31 : max_hash_bits, cms_bits = LP64_ONLY(1) NOT_LP64(0), ! epoch_bits = 2 }; // The biased locking code currently requires that the age bits be // contiguous to the lock bits. enum { lock_shift = 0, biased_lock_shift = lock_bits, age_shift = lock_bits + biased_lock_bits, cms_shift = age_shift + age_bits, hash_shift = cms_shift + cms_bits, ! epoch_shift = hash_shift }; enum { lock_mask = right_n_bits(lock_bits), lock_mask_in_place = lock_mask << lock_shift, biased_lock_mask = right_n_bits(lock_bits + biased_lock_bits), --- 126,150 ---- lock_bits = 2, biased_lock_bits = 1, max_hash_bits = BitsPerWord - age_bits - lock_bits - biased_lock_bits, hash_bits = max_hash_bits > 31 ? 31 : max_hash_bits, cms_bits = LP64_ONLY(1) NOT_LP64(0), ! epoch_bits = 2, ! always_locked_bits = 1, ! larval_bits = 1 }; // The biased locking code currently requires that the age bits be // contiguous to the lock bits. enum { lock_shift = 0, biased_lock_shift = lock_bits, age_shift = lock_bits + biased_lock_bits, cms_shift = age_shift + age_bits, hash_shift = cms_shift + cms_bits, ! epoch_shift = hash_shift, ! thread_shift = epoch_shift + epoch_bits, ! larval_shift = thread_shift + always_locked_bits }; enum { lock_mask = right_n_bits(lock_bits), lock_mask_in_place = lock_mask << lock_shift, biased_lock_mask = right_n_bits(lock_bits + biased_lock_bits),
*** 135,153 **** age_mask = right_n_bits(age_bits), age_mask_in_place = age_mask << age_shift, epoch_mask = right_n_bits(epoch_bits), epoch_mask_in_place = epoch_mask << epoch_shift, cms_mask = right_n_bits(cms_bits), ! cms_mask_in_place = cms_mask << cms_shift #ifndef _WIN64 ! ,hash_mask = right_n_bits(hash_bits), ! hash_mask_in_place = (address_word)hash_mask << hash_shift #endif }; // Alignment of JavaThread pointers encoded in object header required by biased locking ! enum { biased_lock_alignment = 2 << (epoch_shift + epoch_bits) }; #ifdef _WIN64 // These values are too big for Win64 const static uintptr_t hash_mask = right_n_bits(hash_bits); --- 153,173 ---- age_mask = right_n_bits(age_bits), age_mask_in_place = age_mask << age_shift, epoch_mask = right_n_bits(epoch_bits), epoch_mask_in_place = epoch_mask << epoch_shift, cms_mask = right_n_bits(cms_bits), ! cms_mask_in_place = cms_mask << cms_shift, #ifndef _WIN64 ! hash_mask = right_n_bits(hash_bits), ! hash_mask_in_place = (address_word)hash_mask << hash_shift, #endif + larval_mask = right_n_bits(larval_bits), + larval_mask_in_place = larval_mask << larval_shift }; // Alignment of JavaThread pointers encoded in object header required by biased locking ! enum { biased_lock_alignment = 2 << thread_shift }; #ifdef _WIN64 // These values are too big for Win64 const static uintptr_t hash_mask = right_n_bits(hash_bits);
*** 157,167 **** enum { locked_value = 0, unlocked_value = 1, monitor_value = 2, marked_value = 3, ! biased_lock_pattern = 5 }; enum { no_hash = 0 }; // no hash value assigned enum { no_hash_in_place = (address_word)no_hash << hash_shift, --- 177,188 ---- enum { locked_value = 0, unlocked_value = 1, monitor_value = 2, marked_value = 3, ! biased_lock_pattern = 5, ! always_locked_pattern = 1 << thread_shift | biased_lock_pattern }; enum { no_hash = 0 }; // no hash value assigned enum { no_hash_in_place = (address_word)no_hash << hash_shift,
*** 170,179 **** --- 191,208 ---- enum { max_age = age_mask }; enum { max_bias_epoch = epoch_mask }; + enum { larval_state_pattern = (1 << larval_shift) }; + + static markOop always_locked_prototype() { + return markOop(always_locked_pattern); + } + + bool is_always_locked() const { return mask_bits(value(), always_locked_pattern) == always_locked_pattern; } + // Biased Locking accessors. // These must be checked by all code which calls into the // ObjectSynchronizer and other code. The biasing is not understood // by the lower-level CAS-based locking code, although the runtime // fixes up biased locks to be compatible with it when a bias is
*** 181,190 **** --- 210,220 ---- bool has_bias_pattern() const { return (mask_bits(value(), biased_lock_mask_in_place) == biased_lock_pattern); } JavaThread* biased_locker() const { assert(has_bias_pattern(), "should not call this otherwise"); + assert(!is_always_locked(), "invariant"); return (JavaThread*) ((intptr_t) (mask_bits(value(), ~(biased_lock_mask_in_place | age_mask_in_place | epoch_mask_in_place)))); } // Indicates that the mark has the bias bit set but that it has not // yet been biased toward a particular thread bool is_biased_anonymously() const {
*** 198,207 **** --- 228,238 ---- return (mask_bits(value(), epoch_mask_in_place) >> epoch_shift); } markOop set_bias_epoch(int epoch) { assert(has_bias_pattern(), "should not call this otherwise"); assert((epoch & (~epoch_mask)) == 0, "epoch overflow"); + assert(!is_always_locked(), "Rebias needs to fail"); return markOop(mask_bits(value(), ~epoch_mask_in_place) | (epoch << epoch_shift)); } markOop incr_bias_epoch() { return set_bias_epoch((1 + bias_epoch()) & epoch_mask); }
*** 347,356 **** --- 378,398 ---- bool has_no_hash() const { return hash() == no_hash; } + // private buffered value operations + markOop enter_larval_state() const { + return markOop((value() & ~larval_mask_in_place) | larval_state_pattern); + } + markOop exit_larval_state() const { + return markOop(value() & ~larval_mask_in_place); + } + bool is_larval_state() const { + return (value() & larval_mask_in_place) == larval_state_pattern; + } + // Prototype mark for initialization static markOop prototype() { return markOop( no_hash_in_place | no_lock_in_place ); }
*** 362,372 **** // Prepare address of oop for placement into mark inline static markOop encode_pointer_as_mark(void* p) { return markOop(p)->set_marked(); } // Recover address of oop from encoded form used in mark ! inline void* decode_pointer() { if (UseBiasedLocking && has_bias_pattern()) return NULL; return clear_lock_bits(); } // These markOops indicate cms free chunk blocks and not objects. // In 64 bit, the markOop is set to distinguish them from oops. // These are defined in 32 bit mode for vmStructs. const static uintptr_t cms_free_chunk_pattern = 0x1; --- 404,414 ---- // Prepare address of oop for placement into mark inline static markOop encode_pointer_as_mark(void* p) { return markOop(p)->set_marked(); } // Recover address of oop from encoded form used in mark ! inline void* decode_pointer() { if (has_bias_pattern()) return NULL; return clear_lock_bits(); } // These markOops indicate cms free chunk blocks and not objects. // In 64 bit, the markOop is set to distinguish them from oops. // These are defined in 32 bit mode for vmStructs. const static uintptr_t cms_free_chunk_pattern = 0x1;
< prev index next >