< prev index next >

src/hotspot/share/oops/markOop.hpp

Print this page




  87 //    [0           | epoch | age | 1 | 01]       lock is anonymously biased
  88 //
  89 //  - the two lock bits are used to describe three states: locked/unlocked and monitor.
  90 //
  91 //    [ptr             | 00]  locked             ptr points to real header on stack
  92 //    [header      | 0 | 01]  unlocked           regular object header
  93 //    [ptr             | 10]  monitor            inflated lock (header is wapped out)
  94 //    [ptr             | 11]  marked             used by markSweep to mark an object
  95 //                                               not valid at any other time
  96 //
  97 //    We assume that stack/thread pointers have the lowest two bits cleared.
  98 
  99 class BasicLock;
 100 class ObjectMonitor;
 101 class JavaThread;
 102 
 103 class markWord {
 104  private:
 105   uintptr_t _value;
 106 





 107  public:
 108   explicit markWord(uintptr_t value) : _value(value) {}
 109 
 110   markWord() { /* uninitialized */}
 111 
 112   // It is critical for performance that this class be trivially
 113   // destructable, copyable, and assignable.
 114 
 115   static markWord from_pointer(void* ptr) {
 116     return markWord((uintptr_t)ptr);
 117   }



 118 
 119   bool operator==(const markWord& other) const {
 120     return _value == other._value;
 121   }
 122   bool operator!=(const markWord& other) const {
 123     return !operator==(other);
 124   }
 125 
 126   // Conversion
 127   uintptr_t value() const { return _value; }
 128 
 129   // Constants


 130   enum { age_bits                 = 4,
 131          lock_bits                = 2,
 132          biased_lock_bits         = 1,
 133          max_hash_bits            = BitsPerWord - age_bits - lock_bits - biased_lock_bits,
 134          hash_bits                = max_hash_bits > 31 ? 31 : max_hash_bits,
 135          cms_bits                 = LP64_ONLY(1) NOT_LP64(0),
 136          epoch_bits               = 2
 137   };
 138 
 139   // The biased locking code currently requires that the age bits be
 140   // contiguous to the lock bits.
 141   enum { lock_shift               = 0,
 142          biased_lock_shift        = lock_bits,
 143          age_shift                = lock_bits + biased_lock_bits,
 144          cms_shift                = age_shift + age_bits,
 145          hash_shift               = cms_shift + cms_bits,
 146          epoch_shift              = hash_shift
 147   };
 148 
 149   enum { lock_mask                = right_n_bits(lock_bits),


 226     return (mask_bits(value(), lock_mask_in_place) != unlocked_value);
 227   }
 228   bool is_unlocked() const {
 229     return (mask_bits(value(), biased_lock_mask_in_place) == unlocked_value);
 230   }
 231   bool is_marked()   const {
 232     return (mask_bits(value(), lock_mask_in_place) == marked_value);
 233   }
 234   bool is_neutral()  const { return (mask_bits(value(), biased_lock_mask_in_place) == unlocked_value); }
 235 
 236   // Special temporary state of the markWord while being inflated.
 237   // Code that looks at mark outside a lock need to take this into account.
 238   bool is_being_inflated() const { return (value() == 0); }
 239 
 240   // Distinguished markword value - used when inflating over
 241   // an existing stacklock.  0 indicates the markword is "BUSY".
 242   // Lockword mutators that use a LD...CAS idiom should always
 243   // check for and avoid overwriting a 0 value installed by some
 244   // other thread.  (They should spin or block instead.  The 0 value
 245   // is transient and *should* be short-lived).
 246   static markWord INFLATING() { return markWord(0); }    // inflate-in-progress
 247 
 248   // Should this header be preserved during GC?
 249   inline bool must_be_preserved(oop obj_containing_mark) const;
 250   inline bool must_be_preserved_with_bias(oop obj_containing_mark) const;
 251 
 252   // Should this header (including its age bits) be preserved in the
 253   // case of a promotion failure during scavenge?
 254   // Note that we special case this situation. We want to avoid
 255   // calling BiasedLocking::preserve_marks()/restore_marks() (which
 256   // decrease the number of mark words that need to be preserved
 257   // during GC) during each scavenge. During scavenges in which there
 258   // is no promotion failure, we actually don't need to call the above
 259   // routines at all, since we don't mutate and re-initialize the
 260   // marks of promoted objects using init_mark(). However, during
 261   // scavenges which result in promotion failure, we do re-initialize
 262   // the mark words of objects, meaning that we should have called
 263   // these mark word preservation routines. Currently there's no good
 264   // place in which to call them in any of the scavengers (although
 265   // guarded by appropriate locks we could make one), but the
 266   // observation is that promotion failures are quite rare and




  87 //    [0           | epoch | age | 1 | 01]       lock is anonymously biased
  88 //
  89 //  - the two lock bits are used to describe three states: locked/unlocked and monitor.
  90 //
  91 //    [ptr             | 00]  locked             ptr points to real header on stack
  92 //    [header      | 0 | 01]  unlocked           regular object header
  93 //    [ptr             | 10]  monitor            inflated lock (header is wapped out)
  94 //    [ptr             | 11]  marked             used by markSweep to mark an object
  95 //                                               not valid at any other time
  96 //
  97 //    We assume that stack/thread pointers have the lowest two bits cleared.
  98 
  99 class BasicLock;
 100 class ObjectMonitor;
 101 class JavaThread;
 102 
 103 class markWord {
 104  private:
 105   uintptr_t _value;
 106 
 107   // Poison - prevent casts and pointer conversions.
 108   // Use to_pointer and from_pointer instead.
 109   template<typename T> operator T();
 110   markWord(const volatile void*);
 111 
 112  public:
 113   explicit markWord(uintptr_t value) : _value(value) {}
 114 
 115   markWord() { /* uninitialized */}
 116 
 117   // It is critical for performance that this class be trivially
 118   // destructable, copyable, and assignable.
 119 
 120   static markWord from_pointer(void* ptr) {
 121     return markWord((uintptr_t)ptr);
 122   }
 123   void* to_pointer() const {
 124     return (void*)_value;
 125   }
 126 
 127   bool operator==(const markWord& other) const {
 128     return _value == other._value;
 129   }
 130   bool operator!=(const markWord& other) const {
 131     return !operator==(other);
 132   }
 133 
 134   // Conversion
 135   uintptr_t value() const { return _value; }
 136 
 137   // Constants
 138   static const uintptr_t zero     = 0;
 139 
 140   enum { age_bits                 = 4,
 141          lock_bits                = 2,
 142          biased_lock_bits         = 1,
 143          max_hash_bits            = BitsPerWord - age_bits - lock_bits - biased_lock_bits,
 144          hash_bits                = max_hash_bits > 31 ? 31 : max_hash_bits,
 145          cms_bits                 = LP64_ONLY(1) NOT_LP64(0),
 146          epoch_bits               = 2
 147   };
 148 
 149   // The biased locking code currently requires that the age bits be
 150   // contiguous to the lock bits.
 151   enum { lock_shift               = 0,
 152          biased_lock_shift        = lock_bits,
 153          age_shift                = lock_bits + biased_lock_bits,
 154          cms_shift                = age_shift + age_bits,
 155          hash_shift               = cms_shift + cms_bits,
 156          epoch_shift              = hash_shift
 157   };
 158 
 159   enum { lock_mask                = right_n_bits(lock_bits),


 236     return (mask_bits(value(), lock_mask_in_place) != unlocked_value);
 237   }
 238   bool is_unlocked() const {
 239     return (mask_bits(value(), biased_lock_mask_in_place) == unlocked_value);
 240   }
 241   bool is_marked()   const {
 242     return (mask_bits(value(), lock_mask_in_place) == marked_value);
 243   }
 244   bool is_neutral()  const { return (mask_bits(value(), biased_lock_mask_in_place) == unlocked_value); }
 245 
 246   // Special temporary state of the markWord while being inflated.
 247   // Code that looks at mark outside a lock need to take this into account.
 248   bool is_being_inflated() const { return (value() == 0); }
 249 
 250   // Distinguished markword value - used when inflating over
 251   // an existing stacklock.  0 indicates the markword is "BUSY".
 252   // Lockword mutators that use a LD...CAS idiom should always
 253   // check for and avoid overwriting a 0 value installed by some
 254   // other thread.  (They should spin or block instead.  The 0 value
 255   // is transient and *should* be short-lived).
 256   static markWord INFLATING() { return markWord(zero); }    // inflate-in-progress
 257 
 258   // Should this header be preserved during GC?
 259   inline bool must_be_preserved(oop obj_containing_mark) const;
 260   inline bool must_be_preserved_with_bias(oop obj_containing_mark) const;
 261 
 262   // Should this header (including its age bits) be preserved in the
 263   // case of a promotion failure during scavenge?
 264   // Note that we special case this situation. We want to avoid
 265   // calling BiasedLocking::preserve_marks()/restore_marks() (which
 266   // decrease the number of mark words that need to be preserved
 267   // during GC) during each scavenge. During scavenges in which there
 268   // is no promotion failure, we actually don't need to call the above
 269   // routines at all, since we don't mutate and re-initialize the
 270   // marks of promoted objects using init_mark(). However, during
 271   // scavenges which result in promotion failure, we do re-initialize
 272   // the mark words of objects, meaning that we should have called
 273   // these mark word preservation routines. Currently there's no good
 274   // place in which to call them in any of the scavengers (although
 275   // guarded by appropriate locks we could make one), but the
 276   // observation is that promotion failures are quite rare and


< prev index next >