< prev index next >

src/hotspot/share/oops/markOop.hpp

Print this page




  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_OOPS_MARKOOP_HPP
  26 #define SHARE_OOPS_MARKOOP_HPP
  27 
  28 #include "oops/oop.hpp"
  29 
  30 // The markOop describes the header of an object.
  31 //
  32 // Note that the mark is not a real oop but just a word.
  33 // It is placed in the oop hierarchy for historical reasons.
  34 //
  35 // Bit-format of an object header (most significant first, big endian layout below):
  36 //
  37 //  32 bits:
  38 //  --------
  39 //             hash:25 ------------>| age:4    biased_lock:1 lock:2 (normal object)
  40 //             JavaThread*:23 epoch:2 age:4    biased_lock:1 lock:2 (biased object)

  41 //             size:32 ------------------------------------------>| (CMS free block)
  42 //             PromotedObject*:29 ---------->| promo_bits:3 ----->| (CMS promoted object)
  43 //
  44 //  64 bits:
  45 //  --------
  46 //  unused:25 hash:31 -->| unused:1   age:4    biased_lock:1 lock:2 (normal object)
  47 //  JavaThread*:54 epoch:2 unused:1   age:4    biased_lock:1 lock:2 (biased object)

  48 //  PromotedObject*:61 --------------------->| promo_bits:3 ----->| (CMS promoted object)
  49 //  size:64 ----------------------------------------------------->| (CMS free block)
  50 //
  51 //  unused:25 hash:31 -->| cms_free:1 age:4    biased_lock:1 lock:2 (COOPs && normal object)
  52 //  JavaThread*:54 epoch:2 cms_free:1 age:4    biased_lock:1 lock:2 (COOPs && biased object)
  53 //  narrowOop:32 unused:24 cms_free:1 unused:4 promo_bits:3 ----->| (COOPs && CMS promoted object)
  54 //  unused:21 size:35 -->| cms_free:1 unused:7 ------------------>| (COOPs && CMS free block)
  55 //
  56 //  - hash contains the identity hash value: largest value is
  57 //    31 bits, see os::random().  Also, 64-bit vm's require
  58 //    a hash value no bigger than 32 bits because they will not
  59 //    properly generate a mask larger than that: see library_call.cpp
  60 //    and c1_CodePatterns_sparc.cpp.
  61 //
  62 //  - the biased lock pattern is used to bias a lock toward a given
  63 //    thread. When this pattern is set in the low three bits, the lock
  64 //    is either biased toward a given thread or "anonymously" biased,
  65 //    indicating that it is possible for it to be biased. When the
  66 //    lock is biased toward a given thread, locking and unlocking can
  67 //    be performed by that thread without using atomic operations.


  79 //    assigned to all biased objects, because they tended to consume a
  80 //    significant fraction of the eden semispaces and were not
  81 //    promoted promptly, causing an increase in the amount of copying
  82 //    performed. The runtime system aligns all JavaThread* pointers to
  83 //    a very large value (currently 128 bytes (32bVM) or 256 bytes (64bVM))
  84 //    to make room for the age bits & the epoch bits (used in support of
  85 //    biased locking), and for the CMS "freeness" bit in the 64bVM (+COOPs).
  86 //
  87 //    [JavaThread* | epoch | age | 1 | 01]       lock is biased toward given thread
  88 //    [0           | epoch | age | 1 | 01]       lock is anonymously biased
  89 //
  90 //  - the two lock bits are used to describe three states: locked/unlocked and monitor.
  91 //
  92 //    [ptr             | 00]  locked             ptr points to real header on stack
  93 //    [header      | 0 | 01]  unlocked           regular object header
  94 //    [ptr             | 10]  monitor            inflated lock (header is wapped out)
  95 //    [ptr             | 11]  marked             used by markSweep to mark an object
  96 //                                               not valid at any other time
  97 //
  98 //    We assume that stack/thread pointers have the lowest two bits cleared.












  99 
 100 class BasicLock;
 101 class ObjectMonitor;
 102 class JavaThread;
 103 
 104 class markOopDesc: public oopDesc {
 105  private:
 106   // Conversion
 107   uintptr_t value() const { return (uintptr_t) this; }
 108 
 109  public:
 110   // Constants
 111   enum { age_bits                 = 4,
 112          lock_bits                = 2,
 113          biased_lock_bits         = 1,
 114          max_hash_bits            = BitsPerWord - age_bits - lock_bits - biased_lock_bits,
 115          hash_bits                = max_hash_bits > 31 ? 31 : max_hash_bits,
 116          cms_bits                 = LP64_ONLY(1) NOT_LP64(0),
 117          epoch_bits               = 2


 118   };
 119 
 120   // The biased locking code currently requires that the age bits be
 121   // contiguous to the lock bits.
 122   enum { lock_shift               = 0,
 123          biased_lock_shift        = lock_bits,
 124          age_shift                = lock_bits + biased_lock_bits,
 125          cms_shift                = age_shift + age_bits,
 126          hash_shift               = cms_shift + cms_bits,
 127          epoch_shift              = hash_shift


 128   };
 129 
 130   enum { lock_mask                = right_n_bits(lock_bits),
 131          lock_mask_in_place       = lock_mask << lock_shift,
 132          biased_lock_mask         = right_n_bits(lock_bits + biased_lock_bits),
 133          biased_lock_mask_in_place= biased_lock_mask << lock_shift,
 134          biased_lock_bit_in_place = 1 << biased_lock_shift,
 135          age_mask                 = right_n_bits(age_bits),
 136          age_mask_in_place        = age_mask << age_shift,
 137          epoch_mask               = right_n_bits(epoch_bits),
 138          epoch_mask_in_place      = epoch_mask << epoch_shift,
 139          cms_mask                 = right_n_bits(cms_bits),
 140          cms_mask_in_place        = cms_mask << cms_shift
 141 #ifndef _WIN64
 142          ,hash_mask               = right_n_bits(hash_bits),
 143          hash_mask_in_place       = (address_word)hash_mask << hash_shift
 144 #endif


 145   };
 146 
 147   // Alignment of JavaThread pointers encoded in object header required by biased locking
 148   enum { biased_lock_alignment    = 2 << (epoch_shift + epoch_bits)
 149   };
 150 
 151 #ifdef _WIN64
 152     // These values are too big for Win64
 153     const static uintptr_t hash_mask = right_n_bits(hash_bits);
 154     const static uintptr_t hash_mask_in_place  =
 155                             (address_word)hash_mask << hash_shift;
 156 #endif
 157 
 158   enum { locked_value             = 0,
 159          unlocked_value           = 1,
 160          monitor_value            = 2,
 161          marked_value             = 3,
 162          biased_lock_pattern      = 5

 163   };
 164 
 165   enum { no_hash                  = 0 };  // no hash value assigned
 166 
 167   enum { no_hash_in_place         = (address_word)no_hash << hash_shift,
 168          no_lock_in_place         = unlocked_value
 169   };
 170 
 171   enum { max_age                  = age_mask };
 172 
 173   enum { max_bias_epoch           = epoch_mask };
 174 








 175   // Biased Locking accessors.
 176   // These must be checked by all code which calls into the
 177   // ObjectSynchronizer and other code. The biasing is not understood
 178   // by the lower-level CAS-based locking code, although the runtime
 179   // fixes up biased locks to be compatible with it when a bias is
 180   // revoked.
 181   bool has_bias_pattern() const {
 182     return (mask_bits(value(), biased_lock_mask_in_place) == biased_lock_pattern);
 183   }
 184   JavaThread* biased_locker() const {
 185     assert(has_bias_pattern(), "should not call this otherwise");

 186     return (JavaThread*) ((intptr_t) (mask_bits(value(), ~(biased_lock_mask_in_place | age_mask_in_place | epoch_mask_in_place))));
 187   }
 188   // Indicates that the mark has the bias bit set but that it has not
 189   // yet been biased toward a particular thread
 190   bool is_biased_anonymously() const {
 191     return (has_bias_pattern() && (biased_locker() == NULL));
 192   }
 193   // Indicates epoch in which this bias was acquired. If the epoch
 194   // changes due to too many bias revocations occurring, the biases
 195   // from the previous epochs are all considered invalid.
 196   int bias_epoch() const {
 197     assert(has_bias_pattern(), "should not call this otherwise");
 198     return (mask_bits(value(), epoch_mask_in_place) >> epoch_shift);
 199   }
 200   markOop set_bias_epoch(int epoch) {
 201     assert(has_bias_pattern(), "should not call this otherwise");
 202     assert((epoch & (~epoch_mask)) == 0, "epoch overflow");

 203     return markOop(mask_bits(value(), ~epoch_mask_in_place) | (epoch << epoch_shift));
 204   }
 205   markOop incr_bias_epoch() {
 206     return set_bias_epoch((1 + bias_epoch()) & epoch_mask);
 207   }
 208   // Prototype mark for initialization
 209   static markOop biased_locking_prototype() {
 210     return markOop( biased_lock_pattern );
 211   }
 212 
 213   // lock accessors (note that these assume lock_shift == 0)
 214   bool is_locked()   const {
 215     return (mask_bits(value(), lock_mask_in_place) != unlocked_value);
 216   }
 217   bool is_unlocked() const {
 218     return (mask_bits(value(), biased_lock_mask_in_place) == unlocked_value);
 219   }
 220   bool is_marked()   const {
 221     return (mask_bits(value(), lock_mask_in_place) == marked_value);
 222   }


 332   // age operations
 333   markOop set_marked()   { return markOop((value() & ~lock_mask_in_place) | marked_value); }
 334   markOop set_unmarked() { return markOop((value() & ~lock_mask_in_place) | unlocked_value); }
 335 
 336   uint    age()               const { return mask_bits(value() >> age_shift, age_mask); }
 337   markOop set_age(uint v) const {
 338     assert((v & ~age_mask) == 0, "shouldn't overflow age field");
 339     return markOop((value() & ~age_mask_in_place) | (((uintptr_t)v & age_mask) << age_shift));
 340   }
 341   markOop incr_age()          const { return age() == max_age ? markOop(this) : set_age(age() + 1); }
 342 
 343   // hash operations
 344   intptr_t hash() const {
 345     return mask_bits(value() >> hash_shift, hash_mask);
 346   }
 347 
 348   bool has_no_hash() const {
 349     return hash() == no_hash;
 350   }
 351 











 352   // Prototype mark for initialization
 353   static markOop prototype() {
 354     return markOop( no_hash_in_place | no_lock_in_place );
 355   }
 356 
 357   // Helper function for restoration of unmarked mark oops during GC
 358   static inline markOop prototype_for_object(oop obj);
 359 
 360   // Debugging
 361   void print_on(outputStream* st) const;
 362 
 363   // Prepare address of oop for placement into mark
 364   inline static markOop encode_pointer_as_mark(void* p) { return markOop(p)->set_marked(); }
 365 
 366   // Recover address of oop from encoded form used in mark
 367   inline void* decode_pointer() { if (UseBiasedLocking && has_bias_pattern()) return NULL; return clear_lock_bits(); }
 368 
 369   // These markOops indicate cms free chunk blocks and not objects.
 370   // In 64 bit, the markOop is set to distinguish them from oops.
 371   // These are defined in 32 bit mode for vmStructs.
 372   const static uintptr_t cms_free_chunk_pattern  = 0x1;
 373 
 374   // Constants for the size field.
 375   enum { size_shift                = cms_shift + cms_bits,
 376          size_bits                 = 35    // need for compressed oops 32G
 377        };
 378   // These values are too big for Win64
 379   const static uintptr_t size_mask = LP64_ONLY(right_n_bits(size_bits))
 380                                      NOT_LP64(0);
 381   const static uintptr_t size_mask_in_place =
 382                                      (address_word)size_mask << size_shift;
 383 
 384 #ifdef _LP64
 385   static markOop cms_free_prototype() {
 386     return markOop(((intptr_t)prototype() & ~cms_mask_in_place) |
 387                    ((cms_free_chunk_pattern & cms_mask) << cms_shift));


  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_OOPS_MARKOOP_HPP
  26 #define SHARE_OOPS_MARKOOP_HPP
  27 
  28 #include "oops/oop.hpp"
  29 
  30 // The markOop describes the header of an object.
  31 //
  32 // Note that the mark is not a real oop but just a word.
  33 // It is placed in the oop hierarchy for historical reasons.
  34 //
  35 // Bit-format of an object header (most significant first, big endian layout below):
  36 //
  37 //  32 bits:
  38 //  --------
  39 //             hash:25 ------------>| age:4    biased_lock:1 lock:2 (normal object)
  40 //             JavaThread*:23 epoch:2 age:4    biased_lock:1 lock:2 (biased object)
  41 //             "1"        :23 epoch:2 age:4    biased_lock:1 lock:2 (biased always locked object)
  42 //             size:32 ------------------------------------------>| (CMS free block)
  43 //             PromotedObject*:29 ---------->| promo_bits:3 ----->| (CMS promoted object)
  44 //
  45 //  64 bits:
  46 //  --------
  47 //  unused:25 hash:31 -->| unused:1   age:4    biased_lock:1 lock:2 (normal object)
  48 //  JavaThread*:54 epoch:2 unused:1   age:4    biased_lock:1 lock:2 (biased object)
  49 //  "1"        :54 epoch:2 unused:1   age:4    biased_lock:1 lock:2 (biased always locked object)
  50 //  PromotedObject*:61 --------------------->| promo_bits:3 ----->| (CMS promoted object)
  51 //  size:64 ----------------------------------------------------->| (CMS free block)
  52 //
  53 //  unused:25 hash:31 -->| cms_free:1 age:4    biased_lock:1 lock:2 (COOPs && normal object)
  54 //  JavaThread*:54 epoch:2 cms_free:1 age:4    biased_lock:1 lock:2 (COOPs && biased object)
  55 //  narrowOop:32 unused:24 cms_free:1 unused:4 promo_bits:3 ----->| (COOPs && CMS promoted object)
  56 //  unused:21 size:35 -->| cms_free:1 unused:7 ------------------>| (COOPs && CMS free block)
  57 //
  58 //  - hash contains the identity hash value: largest value is
  59 //    31 bits, see os::random().  Also, 64-bit vm's require
  60 //    a hash value no bigger than 32 bits because they will not
  61 //    properly generate a mask larger than that: see library_call.cpp
  62 //    and c1_CodePatterns_sparc.cpp.
  63 //
  64 //  - the biased lock pattern is used to bias a lock toward a given
  65 //    thread. When this pattern is set in the low three bits, the lock
  66 //    is either biased toward a given thread or "anonymously" biased,
  67 //    indicating that it is possible for it to be biased. When the
  68 //    lock is biased toward a given thread, locking and unlocking can
  69 //    be performed by that thread without using atomic operations.


  81 //    assigned to all biased objects, because they tended to consume a
  82 //    significant fraction of the eden semispaces and were not
  83 //    promoted promptly, causing an increase in the amount of copying
  84 //    performed. The runtime system aligns all JavaThread* pointers to
  85 //    a very large value (currently 128 bytes (32bVM) or 256 bytes (64bVM))
  86 //    to make room for the age bits & the epoch bits (used in support of
  87 //    biased locking), and for the CMS "freeness" bit in the 64bVM (+COOPs).
  88 //
  89 //    [JavaThread* | epoch | age | 1 | 01]       lock is biased toward given thread
  90 //    [0           | epoch | age | 1 | 01]       lock is anonymously biased
  91 //
  92 //  - the two lock bits are used to describe three states: locked/unlocked and monitor.
  93 //
  94 //    [ptr             | 00]  locked             ptr points to real header on stack
  95 //    [header      | 0 | 01]  unlocked           regular object header
  96 //    [ptr             | 10]  monitor            inflated lock (header is wapped out)
  97 //    [ptr             | 11]  marked             used by markSweep to mark an object
  98 //                                               not valid at any other time
  99 //
 100 //    We assume that stack/thread pointers have the lowest two bits cleared.
 101 //
 102 //    Always locked: since displaced and monitor references require memory at a
 103 //    fixed address, and hash code can be displaced, an efficiently providing a
 104 //    *permanent lock* leaves us with specializing the biased pattern (even when
 105 //    biased locking isn't enabled). Since biased_lock_alignment for the thread
 106 //    reference doesn't use the lowest bit ("2 << thread_shift"), we can use
 107 //    this illegal thread pointer alignment to denote "always locked" pattern.
 108 //
 109 //    [ <unused> | larval |1| epoch | age | 1 | 01]       permanently locked
 110 //
 111 //    A private buffered value is always locked and can be in a larval state.
 112 //
 113 
 114 class BasicLock;
 115 class ObjectMonitor;
 116 class JavaThread;
 117 
 118 class markOopDesc: public oopDesc {
 119  private:
 120   // Conversion
 121   uintptr_t value() const { return (uintptr_t) this; }
 122 
 123  public:
 124   // Constants
 125   enum { age_bits                 = 4,
 126          lock_bits                = 2,
 127          biased_lock_bits         = 1,
 128          max_hash_bits            = BitsPerWord - age_bits - lock_bits - biased_lock_bits,
 129          hash_bits                = max_hash_bits > 31 ? 31 : max_hash_bits,
 130          cms_bits                 = LP64_ONLY(1) NOT_LP64(0),
 131          epoch_bits               = 2,
 132          always_locked_bits       = 1,
 133          larval_bits              = 1
 134   };
 135 
 136   // The biased locking code currently requires that the age bits be
 137   // contiguous to the lock bits.
 138   enum { lock_shift               = 0,
 139          biased_lock_shift        = lock_bits,
 140          age_shift                = lock_bits + biased_lock_bits,
 141          cms_shift                = age_shift + age_bits,
 142          hash_shift               = cms_shift + cms_bits,
 143          epoch_shift              = hash_shift,
 144          thread_shift             = epoch_shift + epoch_bits,
 145          larval_shift             = thread_shift + always_locked_bits
 146   };
 147 
 148   enum { lock_mask                = right_n_bits(lock_bits),
 149          lock_mask_in_place       = lock_mask << lock_shift,
 150          biased_lock_mask         = right_n_bits(lock_bits + biased_lock_bits),
 151          biased_lock_mask_in_place= biased_lock_mask << lock_shift,
 152          biased_lock_bit_in_place = 1 << biased_lock_shift,
 153          age_mask                 = right_n_bits(age_bits),
 154          age_mask_in_place        = age_mask << age_shift,
 155          epoch_mask               = right_n_bits(epoch_bits),
 156          epoch_mask_in_place      = epoch_mask << epoch_shift,
 157          cms_mask                 = right_n_bits(cms_bits),
 158          cms_mask_in_place        = cms_mask << cms_shift,
 159 #ifndef _WIN64
 160          hash_mask                = right_n_bits(hash_bits),
 161          hash_mask_in_place       = (address_word)hash_mask << hash_shift,
 162 #endif
 163          larval_mask              = right_n_bits(larval_bits),
 164          larval_mask_in_place     = larval_mask << larval_shift
 165   };
 166 
 167   // Alignment of JavaThread pointers encoded in object header required by biased locking
 168   enum { biased_lock_alignment    = 2 << thread_shift
 169   };
 170 
 171 #ifdef _WIN64
 172     // These values are too big for Win64
 173     const static uintptr_t hash_mask = right_n_bits(hash_bits);
 174     const static uintptr_t hash_mask_in_place  =
 175                             (address_word)hash_mask << hash_shift;
 176 #endif
 177 
 178   enum { locked_value             = 0,
 179          unlocked_value           = 1,
 180          monitor_value            = 2,
 181          marked_value             = 3,
 182          biased_lock_pattern      = 5,
 183          always_locked_pattern    = 1 << thread_shift | biased_lock_pattern
 184   };
 185 
 186   enum { no_hash                  = 0 };  // no hash value assigned
 187 
 188   enum { no_hash_in_place         = (address_word)no_hash << hash_shift,
 189          no_lock_in_place         = unlocked_value
 190   };
 191 
 192   enum { max_age                  = age_mask };
 193 
 194   enum { max_bias_epoch           = epoch_mask };
 195 
 196   enum { larval_state_pattern     = (1 << larval_shift) };
 197 
 198   static markOop always_locked_prototype() {
 199     return markOop(always_locked_pattern);
 200   }
 201 
 202   bool is_always_locked() const { return mask_bits(value(), always_locked_pattern) == always_locked_pattern; }
 203 
 204   // Biased Locking accessors.
 205   // These must be checked by all code which calls into the
 206   // ObjectSynchronizer and other code. The biasing is not understood
 207   // by the lower-level CAS-based locking code, although the runtime
 208   // fixes up biased locks to be compatible with it when a bias is
 209   // revoked.
 210   bool has_bias_pattern() const {
 211     return (mask_bits(value(), biased_lock_mask_in_place) == biased_lock_pattern);
 212   }
 213   JavaThread* biased_locker() const {
 214     assert(has_bias_pattern(), "should not call this otherwise");
 215     assert(!is_always_locked(), "invariant");
 216     return (JavaThread*) ((intptr_t) (mask_bits(value(), ~(biased_lock_mask_in_place | age_mask_in_place | epoch_mask_in_place))));
 217   }
 218   // Indicates that the mark has the bias bit set but that it has not
 219   // yet been biased toward a particular thread
 220   bool is_biased_anonymously() const {
 221     return (has_bias_pattern() && (biased_locker() == NULL));
 222   }
 223   // Indicates epoch in which this bias was acquired. If the epoch
 224   // changes due to too many bias revocations occurring, the biases
 225   // from the previous epochs are all considered invalid.
 226   int bias_epoch() const {
 227     assert(has_bias_pattern(), "should not call this otherwise");
 228     return (mask_bits(value(), epoch_mask_in_place) >> epoch_shift);
 229   }
 230   markOop set_bias_epoch(int epoch) {
 231     assert(has_bias_pattern(), "should not call this otherwise");
 232     assert((epoch & (~epoch_mask)) == 0, "epoch overflow");
 233     assert(!is_always_locked(), "Rebias needs to fail");
 234     return markOop(mask_bits(value(), ~epoch_mask_in_place) | (epoch << epoch_shift));
 235   }
 236   markOop incr_bias_epoch() {
 237     return set_bias_epoch((1 + bias_epoch()) & epoch_mask);
 238   }
 239   // Prototype mark for initialization
 240   static markOop biased_locking_prototype() {
 241     return markOop( biased_lock_pattern );
 242   }
 243 
 244   // lock accessors (note that these assume lock_shift == 0)
 245   bool is_locked()   const {
 246     return (mask_bits(value(), lock_mask_in_place) != unlocked_value);
 247   }
 248   bool is_unlocked() const {
 249     return (mask_bits(value(), biased_lock_mask_in_place) == unlocked_value);
 250   }
 251   bool is_marked()   const {
 252     return (mask_bits(value(), lock_mask_in_place) == marked_value);
 253   }


 363   // age operations
 364   markOop set_marked()   { return markOop((value() & ~lock_mask_in_place) | marked_value); }
 365   markOop set_unmarked() { return markOop((value() & ~lock_mask_in_place) | unlocked_value); }
 366 
 367   uint    age()               const { return mask_bits(value() >> age_shift, age_mask); }
 368   markOop set_age(uint v) const {
 369     assert((v & ~age_mask) == 0, "shouldn't overflow age field");
 370     return markOop((value() & ~age_mask_in_place) | (((uintptr_t)v & age_mask) << age_shift));
 371   }
 372   markOop incr_age()          const { return age() == max_age ? markOop(this) : set_age(age() + 1); }
 373 
 374   // hash operations
 375   intptr_t hash() const {
 376     return mask_bits(value() >> hash_shift, hash_mask);
 377   }
 378 
 379   bool has_no_hash() const {
 380     return hash() == no_hash;
 381   }
 382 
 383   // private buffered value operations
 384   markOop enter_larval_state() const {
 385     return markOop((value() & ~larval_mask_in_place) | larval_state_pattern);
 386   }
 387   markOop exit_larval_state() const {
 388     return markOop(value() & ~larval_mask_in_place);
 389   }
 390   bool is_larval_state() const {
 391     return (value() & larval_mask_in_place) == larval_state_pattern;
 392   }
 393 
 394   // Prototype mark for initialization
 395   static markOop prototype() {
 396     return markOop( no_hash_in_place | no_lock_in_place );
 397   }
 398 
 399   // Helper function for restoration of unmarked mark oops during GC
 400   static inline markOop prototype_for_object(oop obj);
 401 
 402   // Debugging
 403   void print_on(outputStream* st) const;
 404 
 405   // Prepare address of oop for placement into mark
 406   inline static markOop encode_pointer_as_mark(void* p) { return markOop(p)->set_marked(); }
 407 
 408   // Recover address of oop from encoded form used in mark
 409   inline void* decode_pointer() { if (has_bias_pattern()) return NULL; return clear_lock_bits(); }
 410 
 411   // These markOops indicate cms free chunk blocks and not objects.
 412   // In 64 bit, the markOop is set to distinguish them from oops.
 413   // These are defined in 32 bit mode for vmStructs.
 414   const static uintptr_t cms_free_chunk_pattern  = 0x1;
 415 
 416   // Constants for the size field.
 417   enum { size_shift                = cms_shift + cms_bits,
 418          size_bits                 = 35    // need for compressed oops 32G
 419        };
 420   // These values are too big for Win64
 421   const static uintptr_t size_mask = LP64_ONLY(right_n_bits(size_bits))
 422                                      NOT_LP64(0);
 423   const static uintptr_t size_mask_in_place =
 424                                      (address_word)size_mask << size_shift;
 425 
 426 #ifdef _LP64
 427   static markOop cms_free_prototype() {
 428     return markOop(((intptr_t)prototype() & ~cms_mask_in_place) |
 429                    ((cms_free_chunk_pattern & cms_mask) << cms_shift));
< prev index next >