1 /* 2 * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGION_HPP 25 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGION_HPP 26 27 #include "gc/shared/space.hpp" 28 #include "gc/shenandoah/shenandoahAllocRequest.hpp" 29 #include "gc/shenandoah/shenandoahAsserts.hpp" 30 #include "gc/shenandoah/shenandoahHeap.hpp" 31 #include "gc/shenandoah/shenandoahPacer.hpp" 32 #include "utilities/sizes.hpp" 33 34 class VMStructs; 35 36 class ShenandoahHeapRegion : public ContiguousSpace { 37 friend class VMStructs; 38 private: 39 /* 40 Region state is described by a state machine. Transitions are guarded by 41 heap lock, which allows changing the state of several regions atomically. 42 Region states can be logically aggregated in groups. 43 44 "Empty": 45 ................................................................. 46 . . 47 . . 48 . Uncommitted <------- Committed <------------------------\ 49 . | | . | 50 . \---------v-----------/ . | 51 . | . | 52 .........................|....................................... | 53 | | 54 "Active": | | 55 .........................|....................................... | 56 . | . | 57 . /-----------------^-------------------\ . | 58 . | | . | 59 . v v "Humongous": . | 60 . Regular ---\-----\ ..................O................ . | 61 . | ^ | | . | . . | 62 . | | | | . *---------\ . . | 63 . v | | | . v v . . | 64 . Pinned Cset | . HStart <--> H/Start H/Cont . . | 65 . ^ / | | . Pinned v | . . | 66 . | / | | . *<--------/ . . | 67 . | v | | . | . . | 68 . CsetPinned | | ..................O................ . | 69 . | | | . | 70 . \-----\---v-------------------/ . | 71 . | . | 72 .........................|....................................... | 73 | | 74 "Trash": | | 75 .........................|....................................... | 76 . | . | 77 . v . | 78 . Trash ---------------------------------------/ 79 . . 80 . . 81 ................................................................. 82 83 Transition from "Empty" to "Active" is first allocation. It can go from {Uncommitted, Committed} 84 to {Regular, "Humongous"}. The allocation may happen in Regular regions too, but not in Humongous. 85 86 Transition from "Active" to "Trash" is reclamation. It can go from CSet during the normal cycle, 87 and from {Regular, "Humongous"} for immediate reclamation. The existence of Trash state allows 88 quick reclamation without actual cleaning up. 89 90 Transition from "Trash" to "Empty" is recycling. It cleans up the regions and corresponding metadata. 91 Can be done asynchronously and in bulk. 92 93 Note how internal transitions disallow logic bugs: 94 a) No region can go Empty, unless properly reclaimed/recycled; 95 b) No region can go Uncommitted, unless reclaimed/recycled first; 96 c) Only Regular regions can go to CSet; 97 d) Pinned cannot go Trash, thus it could never be reclaimed until unpinned; 98 e) Pinned cannot go CSet, thus it never moves; 99 f) Humongous cannot be used for regular allocations; 100 g) Humongous cannot go CSet, thus it never moves; 101 h) Humongous start can go pinned, and thus can be protected from moves (humongous continuations should 102 follow associated humongous starts, not pinnable/movable by themselves); 103 i) Empty cannot go Trash, avoiding useless work; 104 j) ... 105 */ 106 107 enum RegionState { 108 _empty_uncommitted, // region is empty and has memory uncommitted 109 _empty_committed, // region is empty and has memory committed 110 _regular, // region is for regular allocations 111 _humongous_start, // region is the humongous start 112 _humongous_cont, // region is the humongous continuation 113 _pinned_humongous_start, // region is both humongous start and pinned 114 _cset, // region is in collection set 115 _pinned, // region is pinned 116 _pinned_cset, // region is pinned and in cset (evac failure path) 117 _trash, // region contains only trash 118 }; 119 120 const char* region_state_to_string(RegionState s) const { 121 switch (s) { 122 case _empty_uncommitted: return "Empty Uncommitted"; 123 case _empty_committed: return "Empty Committed"; 124 case _regular: return "Regular"; 125 case _humongous_start: return "Humongous Start"; 126 case _humongous_cont: return "Humongous Continuation"; 127 case _pinned_humongous_start: return "Humongous Start, Pinned"; 128 case _cset: return "Collection Set"; 129 case _pinned: return "Pinned"; 130 case _pinned_cset: return "Collection Set, Pinned"; 131 case _trash: return "Trash"; 132 default: 133 ShouldNotReachHere(); 134 return ""; 135 } 136 } 137 138 // This method protects from accidental changes in enum order: 139 int region_state_to_ordinal(RegionState s) const { 140 switch (s) { 141 case _empty_uncommitted: return 0; 142 case _empty_committed: return 1; 143 case _regular: return 2; 144 case _humongous_start: return 3; 145 case _humongous_cont: return 4; 146 case _cset: return 5; 147 case _pinned: return 6; 148 case _trash: return 7; 149 case _pinned_cset: return 8; 150 case _pinned_humongous_start: return 9; 151 default: 152 ShouldNotReachHere(); 153 return -1; 154 } 155 } 156 157 void report_illegal_transition(const char* method); 158 159 public: 160 // Allowed transitions from the outside code: 161 void make_regular_allocation(); 162 void make_regular_bypass(); 163 void make_humongous_start(); 164 void make_humongous_cont(); 165 void make_humongous_start_bypass(); 166 void make_humongous_cont_bypass(); 167 void make_pinned(); 168 void make_unpinned(); 169 void make_cset(); 170 void make_trash(); 171 void make_trash_immediate(); 172 void make_empty(); 173 void make_uncommitted(); 174 void make_committed_bypass(); 175 176 // Individual states: 177 bool is_empty_uncommitted() const { return _state == _empty_uncommitted; } 178 bool is_empty_committed() const { return _state == _empty_committed; } 179 bool is_regular() const { return _state == _regular; } 180 bool is_humongous_continuation() const { return _state == _humongous_cont; } 181 182 // Participation in logical groups: 183 bool is_empty() const { return is_empty_committed() || is_empty_uncommitted(); } 184 bool is_active() const { return !is_empty() && !is_trash(); } 185 bool is_trash() const { return _state == _trash; } 186 bool is_humongous_start() const { return _state == _humongous_start || _state == _pinned_humongous_start; } 187 bool is_humongous() const { return is_humongous_start() || is_humongous_continuation(); } 188 bool is_committed() const { return !is_empty_uncommitted(); } 189 bool is_cset() const { return _state == _cset || _state == _pinned_cset; } 190 bool is_pinned() const { return _state == _pinned || _state == _pinned_cset || _state == _pinned_humongous_start; } 191 192 // Macro-properties: 193 bool is_alloc_allowed() const { return is_empty() || is_regular() || _state == _pinned; } 194 bool is_move_allowed() const { return is_regular() || _state == _cset || (ShenandoahHumongousMoves && _state == _humongous_start); } 195 196 RegionState state() const { return _state; } 197 int state_ordinal() const { return region_state_to_ordinal(_state); } 198 199 private: 200 static size_t RegionCount; 201 static size_t RegionSizeBytes; 202 static size_t RegionSizeWords; 203 static size_t RegionSizeBytesShift; 204 static size_t RegionSizeWordsShift; 205 static size_t RegionSizeBytesMask; 206 static size_t RegionSizeWordsMask; 207 static size_t HumongousThresholdBytes; 208 static size_t HumongousThresholdWords; 209 static size_t MaxTLABSizeBytes; 210 static size_t MaxTLABSizeWords; 211 212 // Global allocation counter, increased for each allocation under Shenandoah heap lock. 213 // Padded to avoid false sharing with the read-only fields above. 214 struct PaddedAllocSeqNum { 215 DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(uint64_t)); 216 uint64_t value; 217 DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0); 218 219 PaddedAllocSeqNum() { 220 // start with 1, reserve 0 for uninitialized value 221 value = 1; 222 } 223 }; 224 225 static PaddedAllocSeqNum _alloc_seq_num; 226 227 // Never updated fields 228 ShenandoahHeap* _heap; 229 MemRegion _reserved; 230 size_t _region_number; 231 232 // Rarely updated fields 233 HeapWord* _new_top; 234 size_t _critical_pins; 235 double _empty_time; 236 237 // Seldom updated fields 238 RegionState _state; 239 240 // Frequently updated fields 241 size_t _tlab_allocs; 242 size_t _gclab_allocs; 243 size_t _shared_allocs; 244 245 uint64_t _seqnum_first_alloc_mutator; 246 uint64_t _seqnum_first_alloc_gc; 247 uint64_t _seqnum_last_alloc_mutator; 248 uint64_t _seqnum_last_alloc_gc; 249 250 volatile size_t _live_data; 251 252 // Claim some space at the end to protect next region 253 DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, 0); 254 255 public: 256 ShenandoahHeapRegion(ShenandoahHeap* heap, HeapWord* start, size_t size_words, size_t index, bool committed); 257 258 static const size_t MIN_NUM_REGIONS = 10; 259 260 static void setup_sizes(size_t initial_heap_size, size_t max_heap_size); 261 262 double empty_time() { 263 return _empty_time; 264 } 265 266 inline static size_t required_regions(size_t bytes) { 267 return (bytes + ShenandoahHeapRegion::region_size_bytes() - 1) >> ShenandoahHeapRegion::region_size_bytes_shift(); 268 } 269 270 inline static size_t region_count() { 271 return ShenandoahHeapRegion::RegionCount; 272 } 273 274 inline static size_t region_size_bytes() { 275 return ShenandoahHeapRegion::RegionSizeBytes; 276 } 277 278 inline static size_t region_size_words() { 279 return ShenandoahHeapRegion::RegionSizeWords; 280 } 281 282 inline static size_t region_size_bytes_shift() { 283 return ShenandoahHeapRegion::RegionSizeBytesShift; 284 } 285 286 inline static size_t region_size_words_shift() { 287 return ShenandoahHeapRegion::RegionSizeWordsShift; 288 } 289 290 inline static size_t region_size_bytes_mask() { 291 return ShenandoahHeapRegion::RegionSizeBytesMask; 292 } 293 294 inline static size_t region_size_words_mask() { 295 return ShenandoahHeapRegion::RegionSizeWordsMask; 296 } 297 298 // Convert to jint with sanity checking 299 inline static jint region_size_bytes_jint() { 300 assert (ShenandoahHeapRegion::RegionSizeBytes <= (size_t)max_jint, "sanity"); 301 return (jint)ShenandoahHeapRegion::RegionSizeBytes; 302 } 303 304 // Convert to jint with sanity checking 305 inline static jint region_size_words_jint() { 306 assert (ShenandoahHeapRegion::RegionSizeWords <= (size_t)max_jint, "sanity"); 307 return (jint)ShenandoahHeapRegion::RegionSizeWords; 308 } 309 310 // Convert to jint with sanity checking 311 inline static jint region_size_bytes_shift_jint() { 312 assert (ShenandoahHeapRegion::RegionSizeBytesShift <= (size_t)max_jint, "sanity"); 313 return (jint)ShenandoahHeapRegion::RegionSizeBytesShift; 314 } 315 316 // Convert to jint with sanity checking 317 inline static jint region_size_words_shift_jint() { 318 assert (ShenandoahHeapRegion::RegionSizeWordsShift <= (size_t)max_jint, "sanity"); 319 return (jint)ShenandoahHeapRegion::RegionSizeWordsShift; 320 } 321 322 inline static size_t humongous_threshold_bytes() { 323 return ShenandoahHeapRegion::HumongousThresholdBytes; 324 } 325 326 inline static size_t humongous_threshold_words() { 327 return ShenandoahHeapRegion::HumongousThresholdWords; 328 } 329 330 inline static size_t max_tlab_size_bytes() { 331 return ShenandoahHeapRegion::MaxTLABSizeBytes; 332 } 333 334 inline static size_t max_tlab_size_words() { 335 return ShenandoahHeapRegion::MaxTLABSizeWords; 336 } 337 338 static uint64_t seqnum_current_alloc() { 339 // Last used seq number 340 return _alloc_seq_num.value - 1; 341 } 342 343 size_t region_number() const; 344 345 // Allocation (return NULL if full) 346 inline HeapWord* allocate(size_t word_size, ShenandoahAllocRequest::Type type); 347 348 HeapWord* allocate(size_t word_size) shenandoah_not_implemented_return(NULL) 349 350 void clear_live_data(); 351 void set_live_data(size_t s); 352 353 // Increase live data for newly allocated region 354 inline void increase_live_data_alloc_words(size_t s); 355 356 // Increase live data for region scanned with GC 357 inline void increase_live_data_gc_words(size_t s); 358 359 bool has_live() const; 360 size_t get_live_data_bytes() const; 361 size_t get_live_data_words() const; 362 363 void print_on(outputStream* st) const; 364 365 size_t garbage() const; 366 367 void recycle(); 368 369 void oop_iterate(OopIterateClosure* cl); 370 371 HeapWord* block_start_const(const void* p) const; 372 373 bool in_collection_set() const; 374 375 // Find humongous start region that this region belongs to 376 ShenandoahHeapRegion* humongous_start_region() const; 377 378 CompactibleSpace* next_compaction_space() const shenandoah_not_implemented_return(NULL); 379 void prepare_for_compaction(CompactPoint* cp) shenandoah_not_implemented; 380 void adjust_pointers() shenandoah_not_implemented; 381 void compact() shenandoah_not_implemented; 382 383 void set_new_top(HeapWord* new_top) { _new_top = new_top; } 384 HeapWord* new_top() const { return _new_top; } 385 386 inline void adjust_alloc_metadata(ShenandoahAllocRequest::Type type, size_t); 387 void reset_alloc_metadata_to_shared(); 388 void reset_alloc_metadata(); 389 size_t get_shared_allocs() const; 390 size_t get_tlab_allocs() const; 391 size_t get_gclab_allocs() const; 392 393 uint64_t seqnum_first_alloc() const { 394 if (_seqnum_first_alloc_mutator == 0) return _seqnum_first_alloc_gc; 395 if (_seqnum_first_alloc_gc == 0) return _seqnum_first_alloc_mutator; 396 return MIN2(_seqnum_first_alloc_mutator, _seqnum_first_alloc_gc); 397 } 398 399 uint64_t seqnum_last_alloc() const { 400 return MAX2(_seqnum_last_alloc_mutator, _seqnum_last_alloc_gc); 401 } 402 403 uint64_t seqnum_first_alloc_mutator() const { 404 return _seqnum_first_alloc_mutator; 405 } 406 407 uint64_t seqnum_last_alloc_mutator() const { 408 return _seqnum_last_alloc_mutator; 409 } 410 411 uint64_t seqnum_first_alloc_gc() const { 412 return _seqnum_first_alloc_gc; 413 } 414 415 uint64_t seqnum_last_alloc_gc() const { 416 return _seqnum_last_alloc_gc; 417 } 418 419 private: 420 void do_commit(); 421 void do_uncommit(); 422 423 void oop_iterate_objects(OopIterateClosure* cl); 424 void oop_iterate_humongous(OopIterateClosure* cl); 425 426 inline void internal_increase_live_data(size_t s); 427 }; 428 429 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGION_HPP