1 /* 2 * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGION_HPP 25 #define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGION_HPP 26 27 #include "memory/space.hpp" 28 #include "gc_implementation/shenandoah/shenandoahAllocRequest.hpp" 29 #include "gc_implementation/shenandoah/shenandoahAsserts.hpp" 30 #include "gc_implementation/shenandoah/shenandoahHeap.hpp" 31 #include "gc_implementation/shenandoah/shenandoahPacer.hpp" 32 33 class VMStructs; 34 35 class ShenandoahHeapRegion : public ContiguousSpace { 36 friend class VMStructs; 37 private: 38 /* 39 Region state is described by a state machine. Transitions are guarded by 40 heap lock, which allows changing the state of several regions atomically. 41 Region states can be logically aggregated in groups. 42 43 "Empty": 44 ................................................................. 45 . . 46 . . 47 . Uncommitted <------- Committed <------------------------\ 48 . | | . | 49 . \---------v-----------/ . | 50 . | . | 51 .........................|....................................... | 52 | | 53 "Active": | | 54 .........................|....................................... | 55 . | . | 56 . /-----------------^-------------------\ . | 57 . | | . | 58 . v v "Humongous": . | 59 . Regular ---\-----\ ..................O................ . | 60 . | ^ | | . | . . | 61 . | | | | . *---------\ . . | 62 . v | | | . v v . . | 63 . Pinned Cset | . HStart <--> H/Start H/Cont . . | 64 . ^ / | | . Pinned v | . . | 65 . | / | | . *<--------/ . . | 66 . | v | | . | . . | 67 . CsetPinned | | ..................O................ . | 68 . | | | . | 69 . \-----\---v-------------------/ . | 70 . | . | 71 .........................|....................................... | 72 | | 73 "Trash": | | 74 .........................|....................................... | 75 . | . | 76 . v . | 77 . Trash ---------------------------------------/ 78 . . 79 . . 80 ................................................................. 81 82 Transition from "Empty" to "Active" is first allocation. It can go from {Uncommitted, Committed} 83 to {Regular, "Humongous"}. The allocation may happen in Regular regions too, but not in Humongous. 84 85 Transition from "Active" to "Trash" is reclamation. It can go from CSet during the normal cycle, 86 and from {Regular, "Humongous"} for immediate reclamation. The existence of Trash state allows 87 quick reclamation without actual cleaning up. 88 89 Transition from "Trash" to "Empty" is recycling. It cleans up the regions and corresponding metadata. 90 Can be done asynchronously and in bulk. 91 92 Note how internal transitions disallow logic bugs: 93 a) No region can go Empty, unless properly reclaimed/recycled; 94 b) No region can go Uncommitted, unless reclaimed/recycled first; 95 c) Only Regular regions can go to CSet; 96 d) Pinned cannot go Trash, thus it could never be reclaimed until unpinned; 97 e) Pinned cannot go CSet, thus it never moves; 98 f) Humongous cannot be used for regular allocations; 99 g) Humongous cannot go CSet, thus it never moves; 100 h) Humongous start can go pinned, and thus can be protected from moves (humongous continuations should 101 follow associated humongous starts, not pinnable/movable by themselves); 102 i) Empty cannot go Trash, avoiding useless work; 103 j) ... 104 */ 105 106 enum RegionState { 107 _empty_uncommitted, // region is empty and has memory uncommitted 108 _empty_committed, // region is empty and has memory committed 109 _regular, // region is for regular allocations 110 _humongous_start, // region is the humongous start 111 _humongous_cont, // region is the humongous continuation 112 _pinned_humongous_start, // region is both humongous start and pinned 113 _cset, // region is in collection set 114 _pinned, // region is pinned 115 _pinned_cset, // region is pinned and in cset (evac failure path) 116 _trash, // region contains only trash 117 }; 118 119 const char* region_state_to_string(RegionState s) const { 120 switch (s) { 121 case _empty_uncommitted: return "Empty Uncommitted"; 122 case _empty_committed: return "Empty Committed"; 123 case _regular: return "Regular"; 124 case _humongous_start: return "Humongous Start"; 125 case _humongous_cont: return "Humongous Continuation"; 126 case _pinned_humongous_start: return "Humongous Start, Pinned"; 127 case _cset: return "Collection Set"; 128 case _pinned: return "Pinned"; 129 case _pinned_cset: return "Collection Set, Pinned"; 130 case _trash: return "Trash"; 131 default: 132 ShouldNotReachHere(); 133 return ""; 134 } 135 } 136 137 // This method protects from accidental changes in enum order: 138 int region_state_to_ordinal(RegionState s) const { 139 switch (s) { 140 case _empty_uncommitted: return 0; 141 case _empty_committed: return 1; 142 case _regular: return 2; 143 case _humongous_start: return 3; 144 case _humongous_cont: return 4; 145 case _cset: return 5; 146 case _pinned: return 6; 147 case _trash: return 7; 148 case _pinned_cset: return 8; 149 case _pinned_humongous_start: return 9; 150 default: 151 ShouldNotReachHere(); 152 return -1; 153 } 154 } 155 156 void report_illegal_transition(const char* method); 157 158 public: 159 // Allowed transitions from the outside code: 160 void make_regular_allocation(); 161 void make_regular_bypass(); 162 void make_humongous_start(); 163 void make_humongous_cont(); 164 void make_humongous_start_bypass(); 165 void make_humongous_cont_bypass(); 166 void make_pinned(); 167 void make_unpinned(); 168 void make_cset(); 169 void make_trash(); 170 void make_trash_immediate(); 171 void make_empty(); 172 void make_uncommitted(); 173 void make_committed_bypass(); 174 175 // Individual states: 176 bool is_empty_uncommitted() const { return _state == _empty_uncommitted; } 177 bool is_empty_committed() const { return _state == _empty_committed; } 178 bool is_regular() const { return _state == _regular; } 179 bool is_humongous_continuation() const { return _state == _humongous_cont; } 180 181 // Participation in logical groups: 182 bool is_empty() const { return is_empty_committed() || is_empty_uncommitted(); } 183 bool is_active() const { return !is_empty() && !is_trash(); } 184 bool is_trash() const { return _state == _trash; } 185 bool is_humongous_start() const { return _state == _humongous_start || _state == _pinned_humongous_start; } 186 bool is_humongous() const { return is_humongous_start() || is_humongous_continuation(); } 187 bool is_committed() const { return !is_empty_uncommitted(); } 188 bool is_cset() const { return _state == _cset || _state == _pinned_cset; } 189 bool is_pinned() const { return _state == _pinned || _state == _pinned_cset || _state == _pinned_humongous_start; } 190 191 // Macro-properties: 192 bool is_alloc_allowed() const { return is_empty() || is_regular() || _state == _pinned; } 193 bool is_move_allowed() const { return is_regular() || _state == _cset || (ShenandoahHumongousMoves && _state == _humongous_start); } 194 195 RegionState state() const { return _state; } 196 int state_ordinal() const { return region_state_to_ordinal(_state); } 197 198 private: 199 static size_t RegionCount; 200 static size_t RegionSizeBytes; 201 static size_t RegionSizeWords; 202 static size_t RegionSizeBytesShift; 203 static size_t RegionSizeWordsShift; 204 static size_t RegionSizeBytesMask; 205 static size_t RegionSizeWordsMask; 206 static size_t HumongousThresholdBytes; 207 static size_t HumongousThresholdWords; 208 static size_t MaxTLABSizeBytes; 209 static size_t MaxTLABSizeWords; 210 211 // Never updated fields 212 ShenandoahHeap* _heap; 213 MemRegion _reserved; 214 size_t _region_number; 215 216 // Rarely updated fields 217 HeapWord* _new_top; 218 size_t _critical_pins; 219 double _empty_time; 220 221 // Seldom updated fields 222 RegionState _state; 223 224 // Frequently updated fields 225 size_t _tlab_allocs; 226 size_t _gclab_allocs; 227 size_t _shared_allocs; 228 229 volatile jint _live_data; 230 231 // Claim some space at the end to protect next region 232 char _pad0[DEFAULT_CACHE_LINE_SIZE]; 233 234 public: 235 ShenandoahHeapRegion(ShenandoahHeap* heap, HeapWord* start, size_t size_words, size_t index, bool committed); 236 237 static const size_t MIN_NUM_REGIONS = 10; 238 239 static void setup_sizes(size_t initial_heap_size, size_t max_heap_size); 240 241 double empty_time() { 242 return _empty_time; 243 } 244 245 inline static size_t required_regions(size_t bytes) { 246 return (bytes + ShenandoahHeapRegion::region_size_bytes() - 1) >> ShenandoahHeapRegion::region_size_bytes_shift(); 247 } 248 249 inline static size_t region_count() { 250 return ShenandoahHeapRegion::RegionCount; 251 } 252 253 inline static size_t region_size_bytes() { 254 return ShenandoahHeapRegion::RegionSizeBytes; 255 } 256 257 inline static size_t region_size_words() { 258 return ShenandoahHeapRegion::RegionSizeWords; 259 } 260 261 inline static size_t region_size_bytes_shift() { 262 return ShenandoahHeapRegion::RegionSizeBytesShift; 263 } 264 265 inline static size_t region_size_words_shift() { 266 return ShenandoahHeapRegion::RegionSizeWordsShift; 267 } 268 269 inline static size_t region_size_bytes_mask() { 270 return ShenandoahHeapRegion::RegionSizeBytesMask; 271 } 272 273 inline static size_t region_size_words_mask() { 274 return ShenandoahHeapRegion::RegionSizeWordsMask; 275 } 276 277 // Convert to jint with sanity checking 278 inline static jint region_size_bytes_jint() { 279 assert (ShenandoahHeapRegion::RegionSizeBytes <= (size_t)max_jint, "sanity"); 280 return (jint)ShenandoahHeapRegion::RegionSizeBytes; 281 } 282 283 // Convert to jint with sanity checking 284 inline static jint region_size_words_jint() { 285 assert (ShenandoahHeapRegion::RegionSizeWords <= (size_t)max_jint, "sanity"); 286 return (jint)ShenandoahHeapRegion::RegionSizeWords; 287 } 288 289 // Convert to jint with sanity checking 290 inline static jint region_size_bytes_shift_jint() { 291 assert (ShenandoahHeapRegion::RegionSizeBytesShift <= (size_t)max_jint, "sanity"); 292 return (jint)ShenandoahHeapRegion::RegionSizeBytesShift; 293 } 294 295 // Convert to jint with sanity checking 296 inline static jint region_size_words_shift_jint() { 297 assert (ShenandoahHeapRegion::RegionSizeWordsShift <= (size_t)max_jint, "sanity"); 298 return (jint)ShenandoahHeapRegion::RegionSizeWordsShift; 299 } 300 301 inline static size_t humongous_threshold_bytes() { 302 return ShenandoahHeapRegion::HumongousThresholdBytes; 303 } 304 305 inline static size_t humongous_threshold_words() { 306 return ShenandoahHeapRegion::HumongousThresholdWords; 307 } 308 309 inline static size_t max_tlab_size_bytes() { 310 return ShenandoahHeapRegion::MaxTLABSizeBytes; 311 } 312 313 inline static size_t max_tlab_size_words() { 314 return ShenandoahHeapRegion::MaxTLABSizeWords; 315 } 316 317 size_t region_number() const; 318 319 // Allocation (return NULL if full) 320 inline HeapWord* allocate(size_t word_size, ShenandoahAllocRequest::Type type); 321 322 HeapWord* allocate(size_t word_size) shenandoah_not_implemented_return(NULL) 323 324 void clear_live_data(); 325 void set_live_data(size_t s); 326 327 // Increase live data for newly allocated region 328 inline void increase_live_data_alloc_words(size_t s); 329 330 // Increase live data for region scanned with GC 331 inline void increase_live_data_gc_words(size_t s); 332 333 bool has_live() const; 334 size_t get_live_data_bytes() const; 335 size_t get_live_data_words() const; 336 337 void print_on(outputStream* st) const; 338 339 size_t garbage() const; 340 341 void recycle(); 342 343 void oop_iterate_skip_unreachable(ExtendedOopClosure* cl, bool skip_unreachable_objects) shenandoah_not_implemented; 344 HeapWord* object_iterate_careful(ObjectClosureCareful* cl) shenandoah_not_implemented_return(NULL); 345 346 HeapWord* block_start_const(const void* p) const; 347 348 bool in_collection_set() const; 349 350 // Find humongous start region that this region belongs to 351 ShenandoahHeapRegion* humongous_start_region() const; 352 353 void set_new_top(HeapWord* new_top) { _new_top = new_top; } 354 HeapWord* new_top() const { return _new_top; } 355 356 inline void adjust_alloc_metadata(ShenandoahAllocRequest::Type type, size_t); 357 void reset_alloc_metadata_to_shared(); 358 void reset_alloc_metadata(); 359 size_t get_shared_allocs() const; 360 size_t get_tlab_allocs() const; 361 size_t get_gclab_allocs() const; 362 363 private: 364 void do_commit(); 365 void do_uncommit(); 366 367 inline void internal_increase_live_data(size_t s); 368 }; 369 370 #endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGION_HPP