1 /* 2 * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGION_HPP 25 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGION_HPP 26 27 #include "gc/shared/space.hpp" 28 #include "gc/shenandoah/shenandoahAllocRequest.hpp" 29 #include "gc/shenandoah/shenandoahAsserts.hpp" 30 #include "gc/shenandoah/shenandoahHeap.hpp" 31 #include "gc/shenandoah/shenandoahPacer.hpp" 32 #include "utilities/sizes.hpp" 33 34 class VMStructs; 35 class ShenandoahHeapRegionStateConstant; 36 37 class ShenandoahHeapRegion : public ContiguousSpace { 38 friend class VMStructs; 39 friend class ShenandoahHeapRegionStateConstant; 40 private: 41 /* 42 Region state is described by a state machine. Transitions are guarded by 43 heap lock, which allows changing the state of several regions atomically. 44 Region states can be logically aggregated in groups. 45 46 "Empty": 47 ................................................................. 48 . . 49 . . 50 . Uncommitted <------- Committed <------------------------\ 51 . | | . | 52 . \---------v-----------/ . | 53 . | . | 54 .........................|....................................... | 55 | | 56 "Active": | | 57 .........................|....................................... | 58 . | . | 59 . /-----------------^-------------------\ . | 60 . | | . | 61 . v v "Humongous": . | 62 . Regular ---\-----\ ..................O................ . | 63 . | ^ | | . | . . | 64 . | | | | . *---------\ . . | 65 . v | | | . v v . . | 66 . Pinned Cset | . HStart <--> H/Start H/Cont . . | 67 . ^ / | | . Pinned v | . . | 68 . | / | | . *<--------/ . . | 69 . | v | | . | . . | 70 . CsetPinned | | ..................O................ . | 71 . | | | . | 72 . \-----\---v-------------------/ . | 73 . | . | 74 .........................|....................................... | 75 | | 76 "Trash": | | 77 .........................|....................................... | 78 . | . | 79 . v . | 80 . Trash ---------------------------------------/ 81 . . 82 . . 83 ................................................................. 84 85 Transition from "Empty" to "Active" is first allocation. It can go from {Uncommitted, Committed} 86 to {Regular, "Humongous"}. The allocation may happen in Regular regions too, but not in Humongous. 87 88 Transition from "Active" to "Trash" is reclamation. It can go from CSet during the normal cycle, 89 and from {Regular, "Humongous"} for immediate reclamation. The existence of Trash state allows 90 quick reclamation without actual cleaning up. 91 92 Transition from "Trash" to "Empty" is recycling. It cleans up the regions and corresponding metadata. 93 Can be done asynchronously and in bulk. 94 95 Note how internal transitions disallow logic bugs: 96 a) No region can go Empty, unless properly reclaimed/recycled; 97 b) No region can go Uncommitted, unless reclaimed/recycled first; 98 c) Only Regular regions can go to CSet; 99 d) Pinned cannot go Trash, thus it could never be reclaimed until unpinned; 100 e) Pinned cannot go CSet, thus it never moves; 101 f) Humongous cannot be used for regular allocations; 102 g) Humongous cannot go CSet, thus it never moves; 103 h) Humongous start can go pinned, and thus can be protected from moves (humongous continuations should 104 follow associated humongous starts, not pinnable/movable by themselves); 105 i) Empty cannot go Trash, avoiding useless work; 106 j) ... 107 */ 108 109 enum RegionState { 110 _empty_uncommitted, // region is empty and has memory uncommitted 111 _empty_committed, // region is empty and has memory committed 112 _regular, // region is for regular allocations 113 _humongous_start, // region is the humongous start 114 _humongous_cont, // region is the humongous continuation 115 _pinned_humongous_start, // region is both humongous start and pinned 116 _cset, // region is in collection set 117 _pinned, // region is pinned 118 _pinned_cset, // region is pinned and in cset (evac failure path) 119 _trash, // region contains only trash 120 _REGION_STATES_NUM // last 121 }; 122 123 static const char* region_state_to_string(RegionState s) { 124 switch (s) { 125 case _empty_uncommitted: return "Empty Uncommitted"; 126 case _empty_committed: return "Empty Committed"; 127 case _regular: return "Regular"; 128 case _humongous_start: return "Humongous Start"; 129 case _humongous_cont: return "Humongous Continuation"; 130 case _pinned_humongous_start: return "Humongous Start, Pinned"; 131 case _cset: return "Collection Set"; 132 case _pinned: return "Pinned"; 133 case _pinned_cset: return "Collection Set, Pinned"; 134 case _trash: return "Trash"; 135 default: 136 ShouldNotReachHere(); 137 return ""; 138 } 139 } 140 141 // This method protects from accidental changes in enum order: 142 int region_state_to_ordinal(RegionState s) const { 143 switch (s) { 144 case _empty_uncommitted: return 0; 145 case _empty_committed: return 1; 146 case _regular: return 2; 147 case _humongous_start: return 3; 148 case _humongous_cont: return 4; 149 case _cset: return 5; 150 case _pinned: return 6; 151 case _trash: return 7; 152 case _pinned_cset: return 8; 153 case _pinned_humongous_start: return 9; 154 default: 155 ShouldNotReachHere(); 156 return -1; 157 } 158 } 159 160 void report_illegal_transition(const char* method); 161 162 public: 163 static const int region_states_num() { 164 return _REGION_STATES_NUM; 165 } 166 167 // Allowed transitions from the outside code: 168 void make_regular_allocation(); 169 void make_regular_bypass(); 170 void make_humongous_start(); 171 void make_humongous_cont(); 172 void make_humongous_start_bypass(); 173 void make_humongous_cont_bypass(); 174 void make_pinned(); 175 void make_unpinned(); 176 void make_cset(); 177 void make_trash(); 178 void make_trash_immediate(); 179 void make_empty(); 180 void make_uncommitted(); 181 void make_committed_bypass(); 182 183 // Individual states: 184 bool is_empty_uncommitted() const { return _state == _empty_uncommitted; } 185 bool is_empty_committed() const { return _state == _empty_committed; } 186 bool is_regular() const { return _state == _regular; } 187 bool is_humongous_continuation() const { return _state == _humongous_cont; } 188 189 // Participation in logical groups: 190 bool is_empty() const { return is_empty_committed() || is_empty_uncommitted(); } 191 bool is_active() const { return !is_empty() && !is_trash(); } 192 bool is_trash() const { return _state == _trash; } 193 bool is_humongous_start() const { return _state == _humongous_start || _state == _pinned_humongous_start; } 194 bool is_humongous() const { return is_humongous_start() || is_humongous_continuation(); } 195 bool is_committed() const { return !is_empty_uncommitted(); } 196 bool is_cset() const { return _state == _cset || _state == _pinned_cset; } 197 bool is_pinned() const { return _state == _pinned || _state == _pinned_cset || _state == _pinned_humongous_start; } 198 199 // Macro-properties: 200 bool is_alloc_allowed() const { return is_empty() || is_regular() || _state == _pinned; } 201 bool is_stw_move_allowed() const { return is_regular() || _state == _cset || (ShenandoahHumongousMoves && _state == _humongous_start); } 202 203 RegionState state() const { return _state; } 204 int state_ordinal() const { return region_state_to_ordinal(_state); } 205 206 void record_pin(); 207 void record_unpin(); 208 size_t pin_count() const; 209 210 private: 211 static size_t RegionCount; 212 static size_t RegionSizeBytes; 213 static size_t RegionSizeWords; 214 static size_t RegionSizeBytesShift; 215 static size_t RegionSizeWordsShift; 216 static size_t RegionSizeBytesMask; 217 static size_t RegionSizeWordsMask; 218 static size_t HumongousThresholdBytes; 219 static size_t HumongousThresholdWords; 220 static size_t MaxTLABSizeBytes; 221 static size_t MaxTLABSizeWords; 222 223 // Global allocation counter, increased for each allocation under Shenandoah heap lock. 224 // Padded to avoid false sharing with the read-only fields above. 225 struct PaddedAllocSeqNum { 226 DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(uint64_t)); 227 uint64_t value; 228 DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0); 229 230 PaddedAllocSeqNum() { 231 // start with 1, reserve 0 for uninitialized value 232 value = 1; 233 } 234 }; 235 236 static PaddedAllocSeqNum _alloc_seq_num; 237 238 // Never updated fields 239 ShenandoahHeap* _heap; 240 MemRegion _reserved; 241 size_t _region_number; 242 243 // Rarely updated fields 244 HeapWord* _new_top; 245 double _empty_time; 246 247 // Seldom updated fields 248 RegionState _state; 249 250 // Frequently updated fields 251 size_t _tlab_allocs; 252 size_t _gclab_allocs; 253 size_t _shared_allocs; 254 255 uint64_t _seqnum_first_alloc_mutator; 256 uint64_t _seqnum_first_alloc_gc; 257 uint64_t _seqnum_last_alloc_mutator; 258 uint64_t _seqnum_last_alloc_gc; 259 260 volatile size_t _live_data; 261 volatile size_t _critical_pins; 262 263 HeapWord* _update_watermark; 264 265 // Claim some space at the end to protect next region 266 DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, 0); 267 268 public: 269 ShenandoahHeapRegion(ShenandoahHeap* heap, HeapWord* start, size_t size_words, size_t index, bool committed); 270 271 static const size_t MIN_NUM_REGIONS = 10; 272 273 static void setup_sizes(size_t max_heap_size); 274 275 double empty_time() { 276 return _empty_time; 277 } 278 279 inline static size_t required_regions(size_t bytes) { 280 return (bytes + ShenandoahHeapRegion::region_size_bytes() - 1) >> ShenandoahHeapRegion::region_size_bytes_shift(); 281 } 282 283 inline static size_t region_count() { 284 return ShenandoahHeapRegion::RegionCount; 285 } 286 287 inline static size_t region_size_bytes() { 288 return ShenandoahHeapRegion::RegionSizeBytes; 289 } 290 291 inline static size_t region_size_words() { 292 return ShenandoahHeapRegion::RegionSizeWords; 293 } 294 295 inline static size_t region_size_bytes_shift() { 296 return ShenandoahHeapRegion::RegionSizeBytesShift; 297 } 298 299 inline static size_t region_size_words_shift() { 300 return ShenandoahHeapRegion::RegionSizeWordsShift; 301 } 302 303 inline static size_t region_size_bytes_mask() { 304 return ShenandoahHeapRegion::RegionSizeBytesMask; 305 } 306 307 inline static size_t region_size_words_mask() { 308 return ShenandoahHeapRegion::RegionSizeWordsMask; 309 } 310 311 // Convert to jint with sanity checking 312 inline static jint region_size_bytes_jint() { 313 assert (ShenandoahHeapRegion::RegionSizeBytes <= (size_t)max_jint, "sanity"); 314 return (jint)ShenandoahHeapRegion::RegionSizeBytes; 315 } 316 317 // Convert to jint with sanity checking 318 inline static jint region_size_words_jint() { 319 assert (ShenandoahHeapRegion::RegionSizeWords <= (size_t)max_jint, "sanity"); 320 return (jint)ShenandoahHeapRegion::RegionSizeWords; 321 } 322 323 // Convert to jint with sanity checking 324 inline static jint region_size_bytes_shift_jint() { 325 assert (ShenandoahHeapRegion::RegionSizeBytesShift <= (size_t)max_jint, "sanity"); 326 return (jint)ShenandoahHeapRegion::RegionSizeBytesShift; 327 } 328 329 // Convert to jint with sanity checking 330 inline static jint region_size_words_shift_jint() { 331 assert (ShenandoahHeapRegion::RegionSizeWordsShift <= (size_t)max_jint, "sanity"); 332 return (jint)ShenandoahHeapRegion::RegionSizeWordsShift; 333 } 334 335 inline static size_t humongous_threshold_bytes() { 336 return ShenandoahHeapRegion::HumongousThresholdBytes; 337 } 338 339 inline static size_t humongous_threshold_words() { 340 return ShenandoahHeapRegion::HumongousThresholdWords; 341 } 342 343 inline static size_t max_tlab_size_bytes() { 344 return ShenandoahHeapRegion::MaxTLABSizeBytes; 345 } 346 347 inline static size_t max_tlab_size_words() { 348 return ShenandoahHeapRegion::MaxTLABSizeWords; 349 } 350 351 static uint64_t seqnum_current_alloc() { 352 // Last used seq number 353 return _alloc_seq_num.value - 1; 354 } 355 356 size_t region_number() const; 357 358 // Allocation (return NULL if full) 359 inline HeapWord* allocate(size_t word_size, ShenandoahAllocRequest::Type type); 360 361 HeapWord* allocate(size_t word_size) shenandoah_not_implemented_return(NULL) 362 363 void clear_live_data(); 364 void set_live_data(size_t s); 365 366 // Increase live data for newly allocated region 367 inline void increase_live_data_alloc_words(size_t s); 368 369 // Increase live data for region scanned with GC 370 inline void increase_live_data_gc_words(size_t s); 371 372 bool has_live() const; 373 size_t get_live_data_bytes() const; 374 size_t get_live_data_words() const; 375 376 void print_on(outputStream* st) const; 377 378 size_t garbage() const; 379 380 void recycle(); 381 382 void oop_iterate(OopIterateClosure* cl); 383 384 HeapWord* block_start_const(const void* p) const; 385 386 bool in_collection_set() const; 387 388 // Find humongous start region that this region belongs to 389 ShenandoahHeapRegion* humongous_start_region() const; 390 391 CompactibleSpace* next_compaction_space() const shenandoah_not_implemented_return(NULL); 392 void prepare_for_compaction(CompactPoint* cp) shenandoah_not_implemented; 393 void adjust_pointers() shenandoah_not_implemented; 394 void compact() shenandoah_not_implemented; 395 396 void set_new_top(HeapWord* new_top) { _new_top = new_top; } 397 HeapWord* new_top() const { return _new_top; } 398 399 inline void adjust_alloc_metadata(ShenandoahAllocRequest::Type type, size_t); 400 void reset_alloc_metadata_to_shared(); 401 void reset_alloc_metadata(); 402 size_t get_shared_allocs() const; 403 size_t get_tlab_allocs() const; 404 size_t get_gclab_allocs() const; 405 406 uint64_t seqnum_first_alloc() const { 407 if (_seqnum_first_alloc_mutator == 0) return _seqnum_first_alloc_gc; 408 if (_seqnum_first_alloc_gc == 0) return _seqnum_first_alloc_mutator; 409 return MIN2(_seqnum_first_alloc_mutator, _seqnum_first_alloc_gc); 410 } 411 412 uint64_t seqnum_last_alloc() const { 413 return MAX2(_seqnum_last_alloc_mutator, _seqnum_last_alloc_gc); 414 } 415 416 uint64_t seqnum_first_alloc_mutator() const { 417 return _seqnum_first_alloc_mutator; 418 } 419 420 uint64_t seqnum_last_alloc_mutator() const { 421 return _seqnum_last_alloc_mutator; 422 } 423 424 uint64_t seqnum_first_alloc_gc() const { 425 return _seqnum_first_alloc_gc; 426 } 427 428 uint64_t seqnum_last_alloc_gc() const { 429 return _seqnum_last_alloc_gc; 430 } 431 432 HeapWord* get_update_watermark() const { 433 assert(bottom() <= _update_watermark && _update_watermark <= top(), "within bounds"); 434 return _update_watermark; 435 } 436 437 void set_update_watermark(HeapWord* w) { 438 assert(bottom() <= w && w <= top(), "within bounds"); 439 _update_watermark = w; 440 } 441 442 private: 443 void do_commit(); 444 void do_uncommit(); 445 446 void oop_iterate_objects(OopIterateClosure* cl); 447 void oop_iterate_humongous(OopIterateClosure* cl); 448 449 inline void internal_increase_live_data(size_t s); 450 451 void set_state(RegionState to); 452 }; 453 454 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGION_HPP