1 /*
   2  * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGION_HPP
  25 #define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGION_HPP
  26 
  27 #include "gc/shared/space.hpp"
  28 #include "gc/shenandoah/shenandoahAllocRequest.hpp"
  29 #include "gc/shenandoah/shenandoahAsserts.hpp"
  30 #include "gc/shenandoah/shenandoahHeap.hpp"
  31 #include "gc/shenandoah/shenandoahPacer.hpp"
  32 #include "memory/universe.hpp"
  33 #include "utilities/sizes.hpp"
  34 
  35 class VMStructs;
  36 
  37 class ShenandoahHeapRegion : public ContiguousSpace {
  38   friend class VMStructs;
  39 private:
  40   /*
  41     Region state is described by a state machine. Transitions are guarded by
  42     heap lock, which allows changing the state of several regions atomically.
  43     Region states can be logically aggregated in groups.
  44 
  45       "Empty":
  46       .................................................................
  47       .                                                               .
  48       .                                                               .
  49       .         Uncommitted  <-------  Committed <------------------------\
  50       .              |                     |                          .   |
  51       .              \---------v-----------/                          .   |
  52       .                        |                                      .   |
  53       .........................|.......................................   |
  54                                |                                          |
  55       "Active":                |                                          |
  56       .........................|.......................................   |
  57       .                        |                                      .   |
  58       .      /-----------------^-------------------\                  .   |
  59       .      |                                     |                  .   |
  60       .      v                                     v    "Humongous":  .   |
  61       .   Regular ---\-----\     ..................O................  .   |
  62       .     |  ^     |     |     .                 |               .  .   |
  63       .     |  |     |     |     .                 *---------\     .  .   |
  64       .     v  |     |     |     .                 v         v     .  .   |
  65       .    Pinned  Cset    |     .  HStart <--> H/Start   H/Cont   .  .   |
  66       .       ^    / |     |     .  Pinned         v         |     .  .   |
  67       .       |   /  |     |     .                 *<--------/     .  .   |
  68       .       |  v   |     |     .                 |               .  .   |
  69       .  CsetPinned  |     |     ..................O................  .   |
  70       .              |     |                       |                  .   |
  71       .              \-----\---v-------------------/                  .   |
  72       .                        |                                      .   |
  73       .........................|.......................................   |
  74                                |                                          |
  75       "Trash":                 |                                          |
  76       .........................|.......................................   |
  77       .                        |                                      .   |
  78       .                        v                                      .   |
  79       .                      Trash ---------------------------------------/
  80       .                                                               .
  81       .                                                               .
  82       .................................................................
  83 
  84     Transition from "Empty" to "Active" is first allocation. It can go from {Uncommitted, Committed}
  85     to {Regular, "Humongous"}. The allocation may happen in Regular regions too, but not in Humongous.
  86 
  87     Transition from "Active" to "Trash" is reclamation. It can go from CSet during the normal cycle,
  88     and from {Regular, "Humongous"} for immediate reclamation. The existence of Trash state allows
  89     quick reclamation without actual cleaning up.
  90 
  91     Transition from "Trash" to "Empty" is recycling. It cleans up the regions and corresponding metadata.
  92     Can be done asynchronously and in bulk.
  93 
  94     Note how internal transitions disallow logic bugs:
  95       a) No region can go Empty, unless properly reclaimed/recycled;
  96       b) No region can go Uncommitted, unless reclaimed/recycled first;
  97       c) Only Regular regions can go to CSet;
  98       d) Pinned cannot go Trash, thus it could never be reclaimed until unpinned;
  99       e) Pinned cannot go CSet, thus it never moves;
 100       f) Humongous cannot be used for regular allocations;
 101       g) Humongous cannot go CSet, thus it never moves;
 102       h) Humongous start can go pinned, and thus can be protected from moves (humongous continuations should
 103          follow associated humongous starts, not pinnable/movable by themselves);
 104       i) Empty cannot go Trash, avoiding useless work;
 105       j) ...
 106    */
 107 
 108   enum RegionState {
 109     _empty_uncommitted,       // region is empty and has memory uncommitted
 110     _empty_committed,         // region is empty and has memory committed
 111     _regular,                 // region is for regular allocations
 112     _humongous_start,         // region is the humongous start
 113     _humongous_cont,          // region is the humongous continuation
 114     _pinned_humongous_start,  // region is both humongous start and pinned
 115     _cset,                    // region is in collection set
 116     _pinned,                  // region is pinned
 117     _pinned_cset,             // region is pinned and in cset (evac failure path)
 118     _trash,                   // region contains only trash
 119   };
 120 
 121   const char* region_state_to_string(RegionState s) const {
 122     switch (s) {
 123       case _empty_uncommitted:       return "Empty Uncommitted";
 124       case _empty_committed:         return "Empty Committed";
 125       case _regular:                 return "Regular";
 126       case _humongous_start:         return "Humongous Start";
 127       case _humongous_cont:          return "Humongous Continuation";
 128       case _pinned_humongous_start:  return "Humongous Start, Pinned";
 129       case _cset:                    return "Collection Set";
 130       case _pinned:                  return "Pinned";
 131       case _pinned_cset:             return "Collection Set, Pinned";
 132       case _trash:                   return "Trash";
 133       default:
 134         ShouldNotReachHere();
 135         return "";
 136     }
 137   }
 138 
 139   // This method protects from accidental changes in enum order:
 140   int region_state_to_ordinal(RegionState s) const {
 141     switch (s) {
 142       case _empty_uncommitted:      return 0;
 143       case _empty_committed:        return 1;
 144       case _regular:                return 2;
 145       case _humongous_start:        return 3;
 146       case _humongous_cont:         return 4;
 147       case _cset:                   return 5;
 148       case _pinned:                 return 6;
 149       case _trash:                  return 7;
 150       case _pinned_cset:            return 8;
 151       case _pinned_humongous_start: return 9;
 152       default:
 153         ShouldNotReachHere();
 154         return -1;
 155     }
 156   }
 157 
 158   void report_illegal_transition(const char* method);
 159 
 160 public:
 161   // Allowed transitions from the outside code:
 162   void make_regular_allocation();
 163   void make_regular_bypass();
 164   void make_humongous_start();
 165   void make_humongous_cont();
 166   void make_humongous_start_bypass();
 167   void make_humongous_cont_bypass();
 168   void make_pinned();
 169   void make_unpinned();
 170   void make_cset();
 171   void make_trash();
 172   void make_trash_immediate();
 173   void make_empty();
 174   void make_uncommitted();
 175   void make_committed_bypass();
 176 
 177   // Individual states:
 178   bool is_empty_uncommitted()      const { return _state == _empty_uncommitted; }
 179   bool is_empty_committed()        const { return _state == _empty_committed; }
 180   bool is_regular()                const { return _state == _regular; }
 181   bool is_humongous_continuation() const { return _state == _humongous_cont; }
 182 
 183   // Participation in logical groups:
 184   bool is_empty()                  const { return is_empty_committed() || is_empty_uncommitted(); }
 185   bool is_active()                 const { return !is_empty() && !is_trash(); }
 186   bool is_trash()                  const { return _state == _trash; }
 187   bool is_humongous_start()        const { return _state == _humongous_start || _state == _pinned_humongous_start; }
 188   bool is_humongous()              const { return is_humongous_start() || is_humongous_continuation(); }
 189   bool is_committed()              const { return !is_empty_uncommitted(); }
 190   bool is_cset()                   const { return _state == _cset   || _state == _pinned_cset; }
 191   bool is_pinned()                 const { return _state == _pinned || _state == _pinned_cset || _state == _pinned_humongous_start; }
 192 
 193   // Macro-properties:
 194   bool is_alloc_allowed()          const { return is_empty() || is_regular() || _state == _pinned; }
 195   bool is_move_allowed()           const { return is_regular() || _state == _cset || (ShenandoahHumongousMoves && _state == _humongous_start); }
 196 
 197   RegionState state()              const { return _state; }
 198   int  state_ordinal()             const { return region_state_to_ordinal(_state); }
 199 
 200 private:
 201   static size_t RegionCount;
 202   static size_t RegionSizeBytes;
 203   static size_t RegionSizeWords;
 204   static size_t RegionSizeBytesShift;
 205   static size_t RegionSizeWordsShift;
 206   static size_t RegionSizeBytesMask;
 207   static size_t RegionSizeWordsMask;
 208   static size_t HumongousThresholdBytes;
 209   static size_t HumongousThresholdWords;
 210   static size_t MaxTLABSizeBytes;
 211   static size_t MaxTLABSizeWords;
 212 
 213   // Global allocation counter, increased for each allocation under Shenandoah heap lock.
 214   // Padded to avoid false sharing with the read-only fields above.
 215   struct PaddedAllocSeqNum {
 216     DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(uint64_t));
 217     uint64_t value;
 218     DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0);
 219 
 220     PaddedAllocSeqNum() {
 221       // start with 1, reserve 0 for uninitialized value
 222       value = 1;
 223     }
 224   };
 225 
 226   static PaddedAllocSeqNum _alloc_seq_num;
 227 
 228   // Never updated fields
 229   ShenandoahHeap* _heap;
 230   ShenandoahPacer* _pacer;
 231   MemRegion _reserved;
 232   size_t _region_number;
 233 
 234   // Rarely updated fields
 235   HeapWord* _new_top;
 236   size_t _critical_pins;
 237   double _empty_time;
 238 
 239   // Seldom updated fields
 240   RegionState _state;
 241 
 242   // Frequently updated fields
 243   size_t _tlab_allocs;
 244   size_t _gclab_allocs;
 245   size_t _shared_allocs;
 246 
 247   uint64_t _seqnum_first_alloc_mutator;
 248   uint64_t _seqnum_first_alloc_gc;
 249   uint64_t _seqnum_last_alloc_mutator;
 250   uint64_t _seqnum_last_alloc_gc;
 251 
 252   volatile size_t _live_data;
 253 
 254   // Claim some space at the end to protect next region
 255   DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, 0);
 256 
 257 public:
 258   ShenandoahHeapRegion(ShenandoahHeap* heap, HeapWord* start, size_t size_words, size_t index, bool committed);
 259 
 260   static const size_t MIN_NUM_REGIONS = 10;
 261 
 262   static void setup_sizes(size_t initial_heap_size, size_t max_heap_size);
 263 
 264   double empty_time() {
 265     return _empty_time;
 266   }
 267 
 268   inline static size_t required_regions(size_t bytes) {
 269     return (bytes + ShenandoahHeapRegion::region_size_bytes() - 1) >> ShenandoahHeapRegion::region_size_bytes_shift();
 270   }
 271 
 272   inline static size_t region_count() {
 273     return ShenandoahHeapRegion::RegionCount;
 274   }
 275 
 276   inline static size_t region_size_bytes() {
 277     return ShenandoahHeapRegion::RegionSizeBytes;
 278   }
 279 
 280   inline static size_t region_size_words() {
 281     return ShenandoahHeapRegion::RegionSizeWords;
 282   }
 283 
 284   inline static size_t region_size_bytes_shift() {
 285     return ShenandoahHeapRegion::RegionSizeBytesShift;
 286   }
 287 
 288   inline static size_t region_size_words_shift() {
 289     return ShenandoahHeapRegion::RegionSizeWordsShift;
 290   }
 291 
 292   inline static size_t region_size_bytes_mask() {
 293     return ShenandoahHeapRegion::RegionSizeBytesMask;
 294   }
 295 
 296   inline static size_t region_size_words_mask() {
 297     return ShenandoahHeapRegion::RegionSizeWordsMask;
 298   }
 299 
 300   // Convert to jint with sanity checking
 301   inline static jint region_size_bytes_jint() {
 302     assert (ShenandoahHeapRegion::RegionSizeBytes <= (size_t)max_jint, "sanity");
 303     return (jint)ShenandoahHeapRegion::RegionSizeBytes;
 304   }
 305 
 306   // Convert to jint with sanity checking
 307   inline static jint region_size_words_jint() {
 308     assert (ShenandoahHeapRegion::RegionSizeWords <= (size_t)max_jint, "sanity");
 309     return (jint)ShenandoahHeapRegion::RegionSizeWords;
 310   }
 311 
 312   // Convert to jint with sanity checking
 313   inline static jint region_size_bytes_shift_jint() {
 314     assert (ShenandoahHeapRegion::RegionSizeBytesShift <= (size_t)max_jint, "sanity");
 315     return (jint)ShenandoahHeapRegion::RegionSizeBytesShift;
 316   }
 317 
 318   // Convert to jint with sanity checking
 319   inline static jint region_size_words_shift_jint() {
 320     assert (ShenandoahHeapRegion::RegionSizeWordsShift <= (size_t)max_jint, "sanity");
 321     return (jint)ShenandoahHeapRegion::RegionSizeWordsShift;
 322   }
 323 
 324   inline static size_t humongous_threshold_bytes() {
 325     return ShenandoahHeapRegion::HumongousThresholdBytes;
 326   }
 327 
 328   inline static size_t humongous_threshold_words() {
 329     return ShenandoahHeapRegion::HumongousThresholdWords;
 330   }
 331 
 332   inline static size_t max_tlab_size_bytes() {
 333     return ShenandoahHeapRegion::MaxTLABSizeBytes;
 334   }
 335 
 336   inline static size_t max_tlab_size_words() {
 337     return ShenandoahHeapRegion::MaxTLABSizeWords;
 338   }
 339 
 340   static uint64_t seqnum_current_alloc() {
 341     // Last used seq number
 342     return _alloc_seq_num.value - 1;
 343   }
 344 
 345   size_t region_number() const;
 346 
 347   // Allocation (return NULL if full)
 348   inline HeapWord* allocate(size_t word_size, ShenandoahAllocRequest::Type type);
 349 
 350   HeapWord* allocate(size_t word_size) shenandoah_not_implemented_return(NULL)
 351 
 352   void clear_live_data();
 353   void set_live_data(size_t s);
 354 
 355   // Increase live data for newly allocated region
 356   inline void increase_live_data_alloc_words(size_t s);
 357 
 358   // Increase live data for region scanned with GC
 359   inline void increase_live_data_gc_words(size_t s);
 360 
 361   bool has_live() const;
 362   size_t get_live_data_bytes() const;
 363   size_t get_live_data_words() const;
 364 
 365   void print_on(outputStream* st) const;
 366 
 367   size_t garbage() const;
 368 
 369   void recycle();
 370 
 371   void oop_iterate(OopIterateClosure* cl);
 372 
 373   HeapWord* block_start_const(const void* p) const;
 374 
 375   bool in_collection_set() const;
 376 
 377   // Find humongous start region that this region belongs to
 378   ShenandoahHeapRegion* humongous_start_region() const;
 379 
 380   CompactibleSpace* next_compaction_space() const shenandoah_not_implemented_return(NULL);
 381   void prepare_for_compaction(CompactPoint* cp)   shenandoah_not_implemented;
 382   void adjust_pointers()                          shenandoah_not_implemented;
 383   void compact()                                  shenandoah_not_implemented;
 384 
 385   void set_new_top(HeapWord* new_top) { _new_top = new_top; }
 386   HeapWord* new_top() const { return _new_top; }
 387 
 388   inline void adjust_alloc_metadata(ShenandoahAllocRequest::Type type, size_t);
 389   void reset_alloc_metadata_to_shared();
 390   void reset_alloc_metadata();
 391   size_t get_shared_allocs() const;
 392   size_t get_tlab_allocs() const;
 393   size_t get_gclab_allocs() const;
 394 
 395   uint64_t seqnum_first_alloc() const {
 396     if (_seqnum_first_alloc_mutator == 0) return _seqnum_first_alloc_gc;
 397     if (_seqnum_first_alloc_gc == 0)      return _seqnum_first_alloc_mutator;
 398     return MIN2(_seqnum_first_alloc_mutator, _seqnum_first_alloc_gc);
 399   }
 400 
 401   uint64_t seqnum_last_alloc() const {
 402     return MAX2(_seqnum_last_alloc_mutator, _seqnum_last_alloc_gc);
 403   }
 404 
 405   uint64_t seqnum_first_alloc_mutator() const {
 406     return _seqnum_first_alloc_mutator;
 407   }
 408 
 409   uint64_t seqnum_last_alloc_mutator()  const {
 410     return _seqnum_last_alloc_mutator;
 411   }
 412 
 413   uint64_t seqnum_first_alloc_gc() const {
 414     return _seqnum_first_alloc_gc;
 415   }
 416 
 417   uint64_t seqnum_last_alloc_gc()  const {
 418     return _seqnum_last_alloc_gc;
 419   }
 420 
 421 private:
 422   void do_commit();
 423   void do_uncommit();
 424 
 425   void oop_iterate_objects(OopIterateClosure* cl);
 426   void oop_iterate_humongous(OopIterateClosure* cl);
 427 
 428   inline void internal_increase_live_data(size_t s);
 429 };
 430 
 431 #endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGION_HPP