1 /*
   2  * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_OOPS_MARKOOP_HPP
  26 #define SHARE_OOPS_MARKOOP_HPP
  27 
  28 #include "metaprogramming/integralConstant.hpp"
  29 #include "metaprogramming/primitiveConversions.hpp"
  30 #include "oops/oopsHierarchy.hpp"
  31 
  32 // The markWord describes the header of an object.
  33 //
  34 // Bit-format of an object header (most significant first, big endian layout below):
  35 //
  36 //  32 bits:
  37 //  --------
  38 //             hash:25 ------------>| age:4    biased_lock:1 lock:2 (normal object)
  39 //             JavaThread*:23 epoch:2 age:4    biased_lock:1 lock:2 (biased object)
  40 //             size:32 ------------------------------------------>| (CMS free block)
  41 //             PromotedObject*:29 ---------->| promo_bits:3 ----->| (CMS promoted object)
  42 //
  43 //  64 bits:
  44 //  --------
  45 //  unused:25 hash:31 -->| unused:1   age:4    biased_lock:1 lock:2 (normal object)
  46 //  JavaThread*:54 epoch:2 unused:1   age:4    biased_lock:1 lock:2 (biased object)
  47 //  PromotedObject*:61 --------------------->| promo_bits:3 ----->| (CMS promoted object)
  48 //  size:64 ----------------------------------------------------->| (CMS free block)
  49 //
  50 //  unused:25 hash:31 -->| cms_free:1 age:4    biased_lock:1 lock:2 (COOPs && normal object)
  51 //  JavaThread*:54 epoch:2 cms_free:1 age:4    biased_lock:1 lock:2 (COOPs && biased object)
  52 //  narrowOop:32 unused:24 cms_free:1 unused:4 promo_bits:3 ----->| (COOPs && CMS promoted object)
  53 //  unused:21 size:35 -->| cms_free:1 unused:7 ------------------>| (COOPs && CMS free block)
  54 //
  55 //  - hash contains the identity hash value: largest value is
  56 //    31 bits, see os::random().  Also, 64-bit vm's require
  57 //    a hash value no bigger than 32 bits because they will not
  58 //    properly generate a mask larger than that: see library_call.cpp
  59 //    and c1_CodePatterns_sparc.cpp.
  60 //
  61 //  - the biased lock pattern is used to bias a lock toward a given
  62 //    thread. When this pattern is set in the low three bits, the lock
  63 //    is either biased toward a given thread or "anonymously" biased,
  64 //    indicating that it is possible for it to be biased. When the
  65 //    lock is biased toward a given thread, locking and unlocking can
  66 //    be performed by that thread without using atomic operations.
  67 //    When a lock's bias is revoked, it reverts back to the normal
  68 //    locking scheme described below.
  69 //
  70 //    Note that we are overloading the meaning of the "unlocked" state
  71 //    of the header. Because we steal a bit from the age we can
  72 //    guarantee that the bias pattern will never be seen for a truly
  73 //    unlocked object.
  74 //
  75 //    Note also that the biased state contains the age bits normally
  76 //    contained in the object header. Large increases in scavenge
  77 //    times were seen when these bits were absent and an arbitrary age
  78 //    assigned to all biased objects, because they tended to consume a
  79 //    significant fraction of the eden semispaces and were not
  80 //    promoted promptly, causing an increase in the amount of copying
  81 //    performed. The runtime system aligns all JavaThread* pointers to
  82 //    a very large value (currently 128 bytes (32bVM) or 256 bytes (64bVM))
  83 //    to make room for the age bits & the epoch bits (used in support of
  84 //    biased locking), and for the CMS "freeness" bit in the 64bVM (+COOPs).
  85 //
  86 //    [JavaThread* | epoch | age | 1 | 01]       lock is biased toward given thread
  87 //    [0           | epoch | age | 1 | 01]       lock is anonymously biased
  88 //
  89 //  - the two lock bits are used to describe three states: locked/unlocked and monitor.
  90 //
  91 //    [ptr             | 00]  locked             ptr points to real header on stack
  92 //    [header      | 0 | 01]  unlocked           regular object header
  93 //    [ptr             | 10]  monitor            inflated lock (header is wapped out)
  94 //    [ptr             | 11]  marked             used by markSweep to mark an object
  95 //                                               not valid at any other time
  96 //
  97 //    We assume that stack/thread pointers have the lowest two bits cleared.
  98 
  99 class BasicLock;
 100 class ObjectMonitor;
 101 class JavaThread;
 102 
 103 class markWord {
 104  private:
 105   uintptr_t _value;
 106 
 107   // Poison - prevent casts and pointer conversions.
 108   // Use to_pointer and from_pointer instead.
 109   template<typename T> operator T();
 110   markWord(const volatile void*);
 111 
 112  public:
 113   explicit markWord(uintptr_t value) : _value(value) {}
 114 
 115   markWord() { /* uninitialized */}
 116 
 117   // It is critical for performance that this class be trivially
 118   // destructable, copyable, and assignable.
 119 
 120   static markWord from_pointer(void* ptr) {
 121     return markWord((uintptr_t)ptr);
 122   }
 123   void* to_pointer() const {
 124     return (void*)_value;
 125   }
 126 
 127   bool operator==(const markWord& other) const {
 128     return _value == other._value;
 129   }
 130   bool operator!=(const markWord& other) const {
 131     return !operator==(other);
 132   }
 133 
 134   // Conversion
 135   uintptr_t value() const { return _value; }
 136 
 137   // Constants
 138   static const uintptr_t zero     = 0;
 139 
 140   enum { age_bits                 = 4,
 141          lock_bits                = 2,
 142          biased_lock_bits         = 1,
 143          max_hash_bits            = BitsPerWord - age_bits - lock_bits - biased_lock_bits,
 144          hash_bits                = max_hash_bits > 31 ? 31 : max_hash_bits,
 145          cms_bits                 = LP64_ONLY(1) NOT_LP64(0),
 146          epoch_bits               = 2
 147   };
 148 
 149   // The biased locking code currently requires that the age bits be
 150   // contiguous to the lock bits.
 151   enum { lock_shift               = 0,
 152          biased_lock_shift        = lock_bits,
 153          age_shift                = lock_bits + biased_lock_bits,
 154          cms_shift                = age_shift + age_bits,
 155          hash_shift               = cms_shift + cms_bits,
 156          epoch_shift              = hash_shift
 157   };
 158 
 159   enum { lock_mask                = right_n_bits(lock_bits),
 160          lock_mask_in_place       = lock_mask << lock_shift,
 161          biased_lock_mask         = right_n_bits(lock_bits + biased_lock_bits),
 162          biased_lock_mask_in_place= biased_lock_mask << lock_shift,
 163          biased_lock_bit_in_place = 1 << biased_lock_shift,
 164          age_mask                 = right_n_bits(age_bits),
 165          age_mask_in_place        = age_mask << age_shift,
 166          epoch_mask               = right_n_bits(epoch_bits),
 167          epoch_mask_in_place      = epoch_mask << epoch_shift,
 168          cms_mask                 = right_n_bits(cms_bits),
 169          cms_mask_in_place        = cms_mask << cms_shift
 170   };
 171 
 172   const static uintptr_t hash_mask = right_n_bits(hash_bits);
 173   const static uintptr_t hash_mask_in_place = hash_mask << hash_shift;
 174 
 175   // Alignment of JavaThread pointers encoded in object header required by biased locking
 176   enum { biased_lock_alignment    = 2 << (epoch_shift + epoch_bits)
 177   };
 178 
 179   enum { locked_value             = 0,
 180          unlocked_value           = 1,
 181          monitor_value            = 2,
 182          marked_value             = 3,
 183          biased_lock_pattern      = 5
 184   };
 185 
 186   enum { no_hash                  = 0 };  // no hash value assigned
 187 
 188   enum { no_hash_in_place         = (address_word)no_hash << hash_shift,
 189          no_lock_in_place         = unlocked_value
 190   };
 191 
 192   enum { max_age                  = age_mask };
 193 
 194   enum { max_bias_epoch           = epoch_mask };
 195 
 196   // Biased Locking accessors.
 197   // These must be checked by all code which calls into the
 198   // ObjectSynchronizer and other code. The biasing is not understood
 199   // by the lower-level CAS-based locking code, although the runtime
 200   // fixes up biased locks to be compatible with it when a bias is
 201   // revoked.
 202   bool has_bias_pattern() const {
 203     return (mask_bits(value(), biased_lock_mask_in_place) == biased_lock_pattern);
 204   }
 205   JavaThread* biased_locker() const {
 206     assert(has_bias_pattern(), "should not call this otherwise");
 207     return (JavaThread*) ((intptr_t) (mask_bits(value(), ~(biased_lock_mask_in_place | age_mask_in_place | epoch_mask_in_place))));
 208   }
 209   // Indicates that the mark has the bias bit set but that it has not
 210   // yet been biased toward a particular thread
 211   bool is_biased_anonymously() const {
 212     return (has_bias_pattern() && (biased_locker() == NULL));
 213   }
 214   // Indicates epoch in which this bias was acquired. If the epoch
 215   // changes due to too many bias revocations occurring, the biases
 216   // from the previous epochs are all considered invalid.
 217   int bias_epoch() const {
 218     assert(has_bias_pattern(), "should not call this otherwise");
 219     return (mask_bits(value(), epoch_mask_in_place) >> epoch_shift);
 220   }
 221   markWord set_bias_epoch(int epoch) {
 222     assert(has_bias_pattern(), "should not call this otherwise");
 223     assert((epoch & (~epoch_mask)) == 0, "epoch overflow");
 224     return markWord(mask_bits(value(), ~epoch_mask_in_place) | (epoch << epoch_shift));
 225   }
 226   markWord incr_bias_epoch() {
 227     return set_bias_epoch((1 + bias_epoch()) & epoch_mask);
 228   }
 229   // Prototype mark for initialization
 230   static markWord biased_locking_prototype() {
 231     return markWord( biased_lock_pattern );
 232   }
 233 
 234   // lock accessors (note that these assume lock_shift == 0)
 235   bool is_locked()   const {
 236     return (mask_bits(value(), lock_mask_in_place) != unlocked_value);
 237   }
 238   bool is_unlocked() const {
 239     return (mask_bits(value(), biased_lock_mask_in_place) == unlocked_value);
 240   }
 241   bool is_marked()   const {
 242     return (mask_bits(value(), lock_mask_in_place) == marked_value);
 243   }
 244   bool is_neutral()  const { return (mask_bits(value(), biased_lock_mask_in_place) == unlocked_value); }
 245 
 246   // Special temporary state of the markWord while being inflated.
 247   // Code that looks at mark outside a lock need to take this into account.
 248   bool is_being_inflated() const { return (value() == 0); }
 249 
 250   // Distinguished markword value - used when inflating over
 251   // an existing stacklock.  0 indicates the markword is "BUSY".
 252   // Lockword mutators that use a LD...CAS idiom should always
 253   // check for and avoid overwriting a 0 value installed by some
 254   // other thread.  (They should spin or block instead.  The 0 value
 255   // is transient and *should* be short-lived).
 256   static markWord INFLATING() { return markWord(zero); }    // inflate-in-progress
 257 
 258   // Should this header be preserved during GC?
 259   inline bool must_be_preserved(oop obj_containing_mark) const;
 260   inline bool must_be_preserved_with_bias(oop obj_containing_mark) const;
 261 
 262   // Should this header (including its age bits) be preserved in the
 263   // case of a promotion failure during scavenge?
 264   // Note that we special case this situation. We want to avoid
 265   // calling BiasedLocking::preserve_marks()/restore_marks() (which
 266   // decrease the number of mark words that need to be preserved
 267   // during GC) during each scavenge. During scavenges in which there
 268   // is no promotion failure, we actually don't need to call the above
 269   // routines at all, since we don't mutate and re-initialize the
 270   // marks of promoted objects using init_mark(). However, during
 271   // scavenges which result in promotion failure, we do re-initialize
 272   // the mark words of objects, meaning that we should have called
 273   // these mark word preservation routines. Currently there's no good
 274   // place in which to call them in any of the scavengers (although
 275   // guarded by appropriate locks we could make one), but the
 276   // observation is that promotion failures are quite rare and
 277   // reducing the number of mark words preserved during them isn't a
 278   // high priority.
 279   inline bool must_be_preserved_for_promotion_failure(oop obj_containing_mark) const;
 280   inline bool must_be_preserved_with_bias_for_promotion_failure(oop obj_containing_mark) const;
 281 
 282   // Should this header be preserved during a scavenge where CMS is
 283   // the old generation?
 284   // (This is basically the same body as must_be_preserved_for_promotion_failure(),
 285   // but takes the Klass* as argument instead)
 286   inline bool must_be_preserved_for_cms_scavenge(Klass* klass_of_obj_containing_mark) const;
 287   inline bool must_be_preserved_with_bias_for_cms_scavenge(Klass* klass_of_obj_containing_mark) const;
 288 
 289   // WARNING: The following routines are used EXCLUSIVELY by
 290   // synchronization functions. They are not really gc safe.
 291   // They must get updated if markWord layout get changed.
 292   markWord set_unlocked() const {
 293     return markWord(value() | unlocked_value);
 294   }
 295   bool has_locker() const {
 296     return ((value() & lock_mask_in_place) == locked_value);
 297   }
 298   BasicLock* locker() const {
 299     assert(has_locker(), "check");
 300     return (BasicLock*) value();
 301   }
 302   bool has_monitor() const {
 303     return ((value() & monitor_value) != 0);
 304   }
 305   ObjectMonitor* monitor() const {
 306     assert(has_monitor(), "check");
 307     // Use xor instead of &~ to provide one extra tag-bit check.
 308     return (ObjectMonitor*) (value() ^ monitor_value);
 309   }
 310   bool has_displaced_mark_helper() const {
 311     return ((value() & unlocked_value) == 0);
 312   }
 313   markWord displaced_mark_helper() const {
 314     assert(has_displaced_mark_helper(), "check");
 315     intptr_t ptr = (value() & ~monitor_value);
 316     return *(markWord*)ptr;
 317   }
 318   void set_displaced_mark_helper(markWord m) const {
 319     assert(has_displaced_mark_helper(), "check");
 320     intptr_t ptr = (value() & ~monitor_value);
 321     ((markWord*)ptr)->_value = m._value;
 322   }
 323   markWord copy_set_hash(intptr_t hash) const {
 324     intptr_t tmp = value() & (~hash_mask_in_place);
 325     tmp |= ((hash & hash_mask) << hash_shift);
 326     return markWord(tmp);
 327   }
 328   // it is only used to be stored into BasicLock as the
 329   // indicator that the lock is using heavyweight monitor
 330   static markWord unused_mark() {
 331     return markWord(marked_value);
 332   }
 333   // the following two functions create the markWord to be
 334   // stored into object header, it encodes monitor info
 335   static markWord encode(BasicLock* lock) {
 336     return from_pointer(lock);
 337   }
 338   static markWord encode(ObjectMonitor* monitor) {
 339     intptr_t tmp = (intptr_t) monitor;
 340     return markWord(tmp | monitor_value);
 341   }
 342   static markWord encode(JavaThread* thread, uint age, int bias_epoch) {
 343     intptr_t tmp = (intptr_t) thread;
 344     assert(UseBiasedLocking && ((tmp & (epoch_mask_in_place | age_mask_in_place | biased_lock_mask_in_place)) == 0), "misaligned JavaThread pointer");
 345     assert(age <= max_age, "age too large");
 346     assert(bias_epoch <= max_bias_epoch, "bias epoch too large");
 347     return markWord(tmp | (bias_epoch << epoch_shift) | (age << age_shift) | biased_lock_pattern);
 348   }
 349 
 350   // used to encode pointers during GC
 351   markWord clear_lock_bits() { return markWord(value() & ~lock_mask_in_place); }
 352 
 353   // age operations
 354   markWord set_marked()   { return markWord((value() & ~lock_mask_in_place) | marked_value); }
 355   markWord set_unmarked() { return markWord((value() & ~lock_mask_in_place) | unlocked_value); }
 356 
 357   uint    age()               const { return mask_bits(value() >> age_shift, age_mask); }
 358   markWord set_age(uint v) const {
 359     assert((v & ~age_mask) == 0, "shouldn't overflow age field");
 360     return markWord((value() & ~age_mask_in_place) | (((uintptr_t)v & age_mask) << age_shift));
 361   }
 362   markWord incr_age()          const { return age() == max_age ? markWord(_value) : set_age(age() + 1); }
 363 
 364   // hash operations
 365   intptr_t hash() const {
 366     return mask_bits(value() >> hash_shift, hash_mask);
 367   }
 368 
 369   bool has_no_hash() const {
 370     return hash() == no_hash;
 371   }
 372 
 373   // Prototype mark for initialization
 374   static markWord prototype() {
 375     return markWord( no_hash_in_place | no_lock_in_place );
 376   }
 377 
 378   // Helper function for restoration of unmarked mark oops during GC
 379   static inline markWord prototype_for_object(oop obj);
 380 
 381   // Debugging
 382   void print_on(outputStream* st) const;
 383 
 384   // Prepare address of oop for placement into mark
 385   inline static markWord encode_pointer_as_mark(void* p) { return from_pointer(p).set_marked(); }
 386 
 387   // Recover address of oop from encoded form used in mark
 388   inline void* decode_pointer() { if (UseBiasedLocking && has_bias_pattern()) return NULL; return (void*)clear_lock_bits().value(); }
 389 
 390   // These markWords indicate cms free chunk blocks and not objects.
 391   // In 64 bit, the markWord is set to distinguish them from oops.
 392   // These are defined in 32 bit mode for vmStructs.
 393   const static uintptr_t cms_free_chunk_pattern  = 0x1;
 394 
 395   // Constants for the size field.
 396   enum { size_shift                = cms_shift + cms_bits,
 397          size_bits                 = 35    // need for compressed oops 32G
 398        };
 399   // These values are too big for Win64
 400   const static uintptr_t size_mask = LP64_ONLY(right_n_bits(size_bits))
 401                                      NOT_LP64(0);
 402   const static uintptr_t size_mask_in_place =
 403                                      (address_word)size_mask << size_shift;
 404 
 405 #ifdef _LP64
 406   static markWord cms_free_prototype() {
 407     return markWord(((intptr_t)prototype().value() & ~cms_mask_in_place) |
 408                    ((cms_free_chunk_pattern & cms_mask) << cms_shift));
 409   }
 410   uintptr_t cms_encoding() const {
 411     return mask_bits(value() >> cms_shift, cms_mask);
 412   }
 413   bool is_cms_free_chunk() const {
 414     return is_neutral() &&
 415            (cms_encoding() & cms_free_chunk_pattern) == cms_free_chunk_pattern;
 416   }
 417 
 418   size_t get_size() const       { return (size_t)(value() >> size_shift); }
 419   static markWord set_size_and_free(size_t size) {
 420     assert((size & ~size_mask) == 0, "shouldn't overflow size field");
 421     return markWord(((intptr_t)cms_free_prototype().value() & ~size_mask_in_place) |
 422                    (((intptr_t)size & size_mask) << size_shift));
 423   }
 424 #endif // _LP64
 425 };
 426 
 427 // Support atomic operations.
 428 template<>
 429 struct PrimitiveConversions::Translate<markWord> : public TrueType {
 430   typedef markWord Value;
 431   typedef uintptr_t Decayed;
 432 
 433   static Decayed decay(const Value& x) { return x.value(); }
 434   static Value recover(Decayed x) { return Value(x); }
 435 };
 436 
 437 #endif // SHARE_OOPS_MARKOOP_HPP