1 /* 2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_CMS_COMPACTIBLEFREELISTSPACE_HPP 26 #define SHARE_VM_GC_CMS_COMPACTIBLEFREELISTSPACE_HPP 27 28 #include "gc/cms/adaptiveFreeList.hpp" 29 #include "gc/cms/promotionInfo.hpp" 30 #include "gc/shared/blockOffsetTable.hpp" 31 #include "gc/shared/cardTable.hpp" 32 #include "gc/shared/space.hpp" 33 #include "logging/log.hpp" 34 #include "memory/binaryTreeDictionary.hpp" 35 #include "memory/freeList.hpp" 36 37 // Classes in support of keeping track of promotions into a non-Contiguous 38 // space, in this case a CompactibleFreeListSpace. 39 40 // Forward declarations 41 class CMSCollector; 42 class CompactibleFreeListSpace; 43 class ConcurrentMarkSweepGeneration; 44 class BlkClosure; 45 class BlkClosureCareful; 46 class FreeChunk; 47 class UpwardsObjectClosure; 48 class ObjectClosureCareful; 49 class Klass; 50 51 class AFLBinaryTreeDictionary : public BinaryTreeDictionary<FreeChunk, AdaptiveFreeList<FreeChunk> > { 52 public: 53 AFLBinaryTreeDictionary(MemRegion mr) 54 : BinaryTreeDictionary<FreeChunk, AdaptiveFreeList<FreeChunk> >(mr) {} 55 56 // Find the list with size "size" in the binary tree and update 57 // the statistics in the list according to "split" (chunk was 58 // split or coalesce) and "birth" (chunk was added or removed). 59 void dict_census_update(size_t size, bool split, bool birth); 60 // Return true if the dictionary is overpopulated (more chunks of 61 // this size than desired) for size "size". 62 bool coal_dict_over_populated(size_t size); 63 // Methods called at the beginning of a sweep to prepare the 64 // statistics for the sweep. 65 void begin_sweep_dict_census(double coalSurplusPercent, 66 float inter_sweep_current, 67 float inter_sweep_estimate, 68 float intra_sweep_estimate); 69 // Methods called after the end of a sweep to modify the 70 // statistics for the sweep. 71 void end_sweep_dict_census(double splitSurplusPercent); 72 // Accessors for statistics 73 void set_tree_surplus(double splitSurplusPercent); 74 void set_tree_hints(void); 75 // Reset statistics for all the lists in the tree. 76 void clear_tree_census(void); 77 // Print the statistics for all the lists in the tree. Also may 78 // print out summaries. 79 void print_dict_census(outputStream* st) const; 80 }; 81 82 class LinearAllocBlock { 83 public: 84 LinearAllocBlock() : _ptr(0), _word_size(0), _refillSize(0), 85 _allocation_size_limit(0) {} 86 void set(HeapWord* ptr, size_t word_size, size_t refill_size, 87 size_t allocation_size_limit) { 88 _ptr = ptr; 89 _word_size = word_size; 90 _refillSize = refill_size; 91 _allocation_size_limit = allocation_size_limit; 92 } 93 HeapWord* _ptr; 94 size_t _word_size; 95 size_t _refillSize; 96 size_t _allocation_size_limit; // Largest size that will be allocated 97 98 void print_on(outputStream* st) const; 99 }; 100 101 // Concrete subclass of CompactibleSpace that implements 102 // a free list space, such as used in the concurrent mark sweep 103 // generation. 104 105 class CompactibleFreeListSpace: public CompactibleSpace { 106 friend class VMStructs; 107 friend class ConcurrentMarkSweepGeneration; 108 friend class CMSCollector; 109 // Local alloc buffer for promotion into this space. 110 friend class CompactibleFreeListSpaceLAB; 111 // Allow scan_and_* functions to call (private) overrides of the auxiliary functions on this class 112 #if INCLUDE_SERIALGC 113 template <typename SpaceType> 114 friend void CompactibleSpace::scan_and_adjust_pointers(SpaceType* space); 115 template <typename SpaceType> 116 friend void CompactibleSpace::scan_and_compact(SpaceType* space); 117 template <typename SpaceType> 118 friend void CompactibleSpace::verify_up_to_first_dead(SpaceType* space); 119 template <typename SpaceType> 120 friend void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp); 121 #endif 122 123 // "Size" of chunks of work (executed during parallel remark phases 124 // of CMS collection); this probably belongs in CMSCollector, although 125 // it's cached here because it's used in 126 // initialize_sequential_subtasks_for_rescan() which modifies 127 // par_seq_tasks which also lives in Space. XXX 128 const size_t _rescan_task_size; 129 const size_t _marking_task_size; 130 131 // Yet another sequential tasks done structure. This supports 132 // CMS GC, where we have threads dynamically 133 // claiming sub-tasks from a larger parallel task. 134 SequentialSubTasksDone _conc_par_seq_tasks; 135 136 BlockOffsetArrayNonContigSpace _bt; 137 138 CMSCollector* _collector; 139 ConcurrentMarkSweepGeneration* _old_gen; 140 141 // Data structures for free blocks (used during allocation/sweeping) 142 143 // Allocation is done linearly from two different blocks depending on 144 // whether the request is small or large, in an effort to reduce 145 // fragmentation. We assume that any locking for allocation is done 146 // by the containing generation. Thus, none of the methods in this 147 // space are re-entrant. 148 enum SomeConstants { 149 SmallForLinearAlloc = 16, // size < this then use _sLAB 150 SmallForDictionary = 257, // size < this then use _indexedFreeList 151 IndexSetSize = SmallForDictionary // keep this odd-sized 152 }; 153 static size_t IndexSetStart; 154 static size_t IndexSetStride; 155 static size_t _min_chunk_size_in_bytes; 156 157 private: 158 enum FitStrategyOptions { 159 FreeBlockStrategyNone = 0, 160 FreeBlockBestFitFirst 161 }; 162 163 PromotionInfo _promoInfo; 164 165 // Helps to impose a global total order on freelistLock ranks; 166 // assumes that CFLSpace's are allocated in global total order 167 static int _lockRank; 168 169 // A lock protecting the free lists and free blocks; 170 // mutable because of ubiquity of locking even for otherwise const methods 171 mutable Mutex _freelistLock; 172 173 // Locking verifier convenience function 174 void assert_locked() const PRODUCT_RETURN; 175 void assert_locked(const Mutex* lock) const PRODUCT_RETURN; 176 177 // Linear allocation blocks 178 LinearAllocBlock _smallLinearAllocBlock; 179 180 AFLBinaryTreeDictionary* _dictionary; // Pointer to dictionary for large size blocks 181 182 // Indexed array for small size blocks 183 AdaptiveFreeList<FreeChunk> _indexedFreeList[IndexSetSize]; 184 185 // Allocation strategy 186 bool _fitStrategy; // Use best fit strategy 187 188 // This is an address close to the largest free chunk in the heap. 189 // It is currently assumed to be at the end of the heap. Free 190 // chunks with addresses greater than nearLargestChunk are coalesced 191 // in an effort to maintain a large chunk at the end of the heap. 192 HeapWord* _nearLargestChunk; 193 194 // Used to keep track of limit of sweep for the space 195 HeapWord* _sweep_limit; 196 197 // Used to make the young collector update the mod union table 198 MemRegionClosure* _preconsumptionDirtyCardClosure; 199 200 // Support for compacting cms 201 HeapWord* cross_threshold(HeapWord* start, HeapWord* end); 202 HeapWord* forward(oop q, size_t size, CompactPoint* cp, HeapWord* compact_top); 203 204 // Initialization helpers. 205 void initializeIndexedFreeListArray(); 206 207 // Extra stuff to manage promotion parallelism. 208 209 // A lock protecting the dictionary during par promotion allocation. 210 mutable Mutex _parDictionaryAllocLock; 211 Mutex* parDictionaryAllocLock() const { return &_parDictionaryAllocLock; } 212 213 // Locks protecting the exact lists during par promotion allocation. 214 Mutex* _indexedFreeListParLocks[IndexSetSize]; 215 216 // Attempt to obtain up to "n" blocks of the size "word_sz" (which is 217 // required to be smaller than "IndexSetSize".) If successful, 218 // adds them to "fl", which is required to be an empty free list. 219 // If the count of "fl" is negative, it's absolute value indicates a 220 // number of free chunks that had been previously "borrowed" from global 221 // list of size "word_sz", and must now be decremented. 222 void par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl); 223 224 // Used by par_get_chunk_of_blocks() for the chunks from the 225 // indexed_free_lists. 226 bool par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl); 227 228 // Used by par_get_chunk_of_blocks_dictionary() to get a chunk 229 // evenly splittable into "n" "word_sz" chunks. Returns that 230 // evenly splittable chunk. May split a larger chunk to get the 231 // evenly splittable chunk. 232 FreeChunk* get_n_way_chunk_to_split(size_t word_sz, size_t n); 233 234 // Used by par_get_chunk_of_blocks() for the chunks from the 235 // dictionary. 236 void par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl); 237 238 // Allocation helper functions 239 // Allocate using a strategy that takes from the indexed free lists 240 // first. This allocation strategy assumes a companion sweeping 241 // strategy that attempts to keep the needed number of chunks in each 242 // indexed free lists. 243 HeapWord* allocate_adaptive_freelists(size_t size); 244 245 // Gets a chunk from the linear allocation block (LinAB). If there 246 // is not enough space in the LinAB, refills it. 247 HeapWord* getChunkFromLinearAllocBlock(LinearAllocBlock* blk, size_t size); 248 HeapWord* getChunkFromSmallLinearAllocBlock(size_t size); 249 // Get a chunk from the space remaining in the linear allocation block. Do 250 // not attempt to refill if the space is not available, return NULL. Do the 251 // repairs on the linear allocation block as appropriate. 252 HeapWord* getChunkFromLinearAllocBlockRemainder(LinearAllocBlock* blk, size_t size); 253 inline HeapWord* getChunkFromSmallLinearAllocBlockRemainder(size_t size); 254 255 // Helper function for getChunkFromIndexedFreeList. 256 // Replenish the indexed free list for this "size". Do not take from an 257 // underpopulated size. 258 FreeChunk* getChunkFromIndexedFreeListHelper(size_t size, bool replenish = true); 259 260 // Get a chunk from the indexed free list. If the indexed free list 261 // does not have a free chunk, try to replenish the indexed free list 262 // then get the free chunk from the replenished indexed free list. 263 inline FreeChunk* getChunkFromIndexedFreeList(size_t size); 264 265 // The returned chunk may be larger than requested (or null). 266 FreeChunk* getChunkFromDictionary(size_t size); 267 // The returned chunk is the exact size requested (or null). 268 FreeChunk* getChunkFromDictionaryExact(size_t size); 269 270 // Find a chunk in the indexed free list that is the best 271 // fit for size "numWords". 272 FreeChunk* bestFitSmall(size_t numWords); 273 // For free list "fl" of chunks of size > numWords, 274 // remove a chunk, split off a chunk of size numWords 275 // and return it. The split off remainder is returned to 276 // the free lists. The old name for getFromListGreater 277 // was lookInListGreater. 278 FreeChunk* getFromListGreater(AdaptiveFreeList<FreeChunk>* fl, size_t numWords); 279 // Get a chunk in the indexed free list or dictionary, 280 // by considering a larger chunk and splitting it. 281 FreeChunk* getChunkFromGreater(size_t numWords); 282 // Verify that the given chunk is in the indexed free lists. 283 bool verifyChunkInIndexedFreeLists(FreeChunk* fc) const; 284 // Remove the specified chunk from the indexed free lists. 285 void removeChunkFromIndexedFreeList(FreeChunk* fc); 286 // Remove the specified chunk from the dictionary. 287 void removeChunkFromDictionary(FreeChunk* fc); 288 // Split a free chunk into a smaller free chunk of size "new_size". 289 // Return the smaller free chunk and return the remainder to the 290 // free lists. 291 FreeChunk* splitChunkAndReturnRemainder(FreeChunk* chunk, size_t new_size); 292 // Add a chunk to the free lists. 293 void addChunkToFreeLists(HeapWord* chunk, size_t size); 294 // Add a chunk to the free lists, preferring to suffix it 295 // to the last free chunk at end of space if possible, and 296 // updating the block census stats as well as block offset table. 297 // Take any locks as appropriate if we are multithreaded. 298 void addChunkToFreeListsAtEndRecordingStats(HeapWord* chunk, size_t size); 299 // Add a free chunk to the indexed free lists. 300 void returnChunkToFreeList(FreeChunk* chunk); 301 // Add a free chunk to the dictionary. 302 void returnChunkToDictionary(FreeChunk* chunk); 303 304 // Functions for maintaining the linear allocation buffers (LinAB). 305 // Repairing a linear allocation block refers to operations 306 // performed on the remainder of a LinAB after an allocation 307 // has been made from it. 308 void repairLinearAllocationBlocks(); 309 void repairLinearAllocBlock(LinearAllocBlock* blk); 310 void refillLinearAllocBlock(LinearAllocBlock* blk); 311 void refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk); 312 void refillLinearAllocBlocksIfNeeded(); 313 314 void verify_objects_initialized() const; 315 316 // Statistics reporting helper functions 317 void reportFreeListStatistics(const char* title) const; 318 void reportIndexedFreeListStatistics(outputStream* st) const; 319 size_t maxChunkSizeInIndexedFreeLists() const; 320 size_t numFreeBlocksInIndexedFreeLists() const; 321 // Accessor 322 HeapWord* unallocated_block() const { 323 if (BlockOffsetArrayUseUnallocatedBlock) { 324 HeapWord* ub = _bt.unallocated_block(); 325 assert(ub >= bottom() && 326 ub <= end(), "space invariant"); 327 return ub; 328 } else { 329 return end(); 330 } 331 } 332 void freed(HeapWord* start, size_t size) { 333 _bt.freed(start, size); 334 } 335 336 // Auxiliary functions for scan_and_{forward,adjust_pointers,compact} support. 337 // See comments for CompactibleSpace for more information. 338 inline HeapWord* scan_limit() const { 339 return end(); 340 } 341 342 inline bool scanned_block_is_obj(const HeapWord* addr) const { 343 return CompactibleFreeListSpace::block_is_obj(addr); // Avoid virtual call 344 } 345 346 inline size_t scanned_block_size(const HeapWord* addr) const { 347 return CompactibleFreeListSpace::block_size(addr); // Avoid virtual call 348 } 349 350 inline size_t adjust_obj_size(size_t size) const { 351 return adjustObjectSize(size); 352 } 353 354 inline size_t obj_size(const HeapWord* addr) const; 355 356 protected: 357 // Reset the indexed free list to its initial empty condition. 358 void resetIndexedFreeListArray(); 359 // Reset to an initial state with a single free block described 360 // by the MemRegion parameter. 361 void reset(MemRegion mr); 362 // Return the total number of words in the indexed free lists. 363 size_t totalSizeInIndexedFreeLists() const; 364 365 public: 366 // Constructor 367 CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr); 368 // Accessors 369 bool bestFitFirst() { return _fitStrategy == FreeBlockBestFitFirst; } 370 AFLBinaryTreeDictionary* dictionary() const { return _dictionary; } 371 HeapWord* nearLargestChunk() const { return _nearLargestChunk; } 372 void set_nearLargestChunk(HeapWord* v) { _nearLargestChunk = v; } 373 374 // Set CMS global values. 375 static void set_cms_values(); 376 377 // Return the free chunk at the end of the space. If no such 378 // chunk exists, return NULL. 379 FreeChunk* find_chunk_at_end(); 380 381 void set_collector(CMSCollector* collector) { _collector = collector; } 382 383 // Support for parallelization of rescan and marking. 384 const size_t rescan_task_size() const { return _rescan_task_size; } 385 const size_t marking_task_size() const { return _marking_task_size; } 386 // Return ergonomic max size for CMSRescanMultiple and CMSConcMarkMultiple. 387 const size_t max_flag_size_for_task_size() const; 388 SequentialSubTasksDone* conc_par_seq_tasks() {return &_conc_par_seq_tasks; } 389 void initialize_sequential_subtasks_for_rescan(int n_threads); 390 void initialize_sequential_subtasks_for_marking(int n_threads, 391 HeapWord* low = NULL); 392 393 virtual MemRegionClosure* preconsumptionDirtyCardClosure() const { 394 return _preconsumptionDirtyCardClosure; 395 } 396 397 void setPreconsumptionDirtyCardClosure(MemRegionClosure* cl) { 398 _preconsumptionDirtyCardClosure = cl; 399 } 400 401 // Space enquiries 402 size_t used() const; 403 size_t free() const; 404 size_t max_alloc_in_words() const; 405 // XXX: should have a less conservative used_region() than that of 406 // Space; we could consider keeping track of highest allocated 407 // address and correcting that at each sweep, as the sweeper 408 // goes through the entire allocated part of the generation. We 409 // could also use that information to keep the sweeper from 410 // sweeping more than is necessary. The allocator and sweeper will 411 // of course need to synchronize on this, since the sweeper will 412 // try to bump down the address and the allocator will try to bump it up. 413 // For now, however, we'll just use the default used_region() 414 // which overestimates the region by returning the entire 415 // committed region (this is safe, but inefficient). 416 417 // Returns a subregion of the space containing all the objects in 418 // the space. 419 MemRegion used_region() const { 420 return MemRegion(bottom(), 421 BlockOffsetArrayUseUnallocatedBlock ? 422 unallocated_block() : end()); 423 } 424 425 virtual bool is_free_block(const HeapWord* p) const; 426 427 // Resizing support 428 void set_end(HeapWord* value); // override 429 430 // Never mangle CompactibleFreeListSpace 431 void mangle_unused_area() {} 432 void mangle_unused_area_complete() {} 433 434 // Mutual exclusion support 435 Mutex* freelistLock() const { return &_freelistLock; } 436 437 // Iteration support 438 void oop_iterate(OopIterateClosure* cl); 439 440 void object_iterate(ObjectClosure* blk); 441 // Apply the closure to each object in the space whose references 442 // point to objects in the heap. The usage of CompactibleFreeListSpace 443 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows 444 // objects in the space with references to objects that are no longer 445 // valid. For example, an object may reference another object 446 // that has already been sweep up (collected). This method uses 447 // obj_is_alive() to determine whether it is safe to iterate of 448 // an object. 449 void safe_object_iterate(ObjectClosure* blk); 450 451 // Iterate over all objects that intersect with mr, calling "cl->do_object" 452 // on each. There is an exception to this: if this closure has already 453 // been invoked on an object, it may skip such objects in some cases. This is 454 // Most likely to happen in an "upwards" (ascending address) iteration of 455 // MemRegions. 456 void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl); 457 458 // Requires that "mr" be entirely within the space. 459 // Apply "cl->do_object" to all objects that intersect with "mr". 460 // If the iteration encounters an unparseable portion of the region, 461 // terminate the iteration and return the address of the start of the 462 // subregion that isn't done. Return of "NULL" indicates that the 463 // iteration completed. 464 HeapWord* object_iterate_careful_m(MemRegion mr, 465 ObjectClosureCareful* cl); 466 467 // Override: provides a DCTO_CL specific to this kind of space. 468 DirtyCardToOopClosure* new_dcto_cl(OopIterateClosure* cl, 469 CardTable::PrecisionStyle precision, 470 HeapWord* boundary, 471 bool parallel); 472 473 void blk_iterate(BlkClosure* cl); 474 void blk_iterate_careful(BlkClosureCareful* cl); 475 HeapWord* block_start_const(const void* p) const; 476 HeapWord* block_start_careful(const void* p) const; 477 size_t block_size(const HeapWord* p) const; 478 size_t block_size_no_stall(HeapWord* p, const CMSCollector* c) const; 479 bool block_is_obj(const HeapWord* p) const; 480 bool obj_is_alive(const HeapWord* p) const; 481 size_t block_size_nopar(const HeapWord* p) const; 482 bool block_is_obj_nopar(const HeapWord* p) const; 483 484 // Iteration support for promotion 485 void save_marks(); 486 bool no_allocs_since_save_marks(); 487 488 // Iteration support for sweeping 489 void save_sweep_limit() { 490 _sweep_limit = BlockOffsetArrayUseUnallocatedBlock ? 491 unallocated_block() : end(); 492 log_develop_trace(gc, sweep)(">>>>> Saving sweep limit " PTR_FORMAT 493 " for space [" PTR_FORMAT "," PTR_FORMAT ") <<<<<<", 494 p2i(_sweep_limit), p2i(bottom()), p2i(end())); 495 } 496 NOT_PRODUCT( 497 void clear_sweep_limit() { _sweep_limit = NULL; } 498 ) 499 HeapWord* sweep_limit() { return _sweep_limit; } 500 501 // Apply "blk->do_oop" to the addresses of all reference fields in objects 502 // promoted into this generation since the most recent save_marks() call. 503 // Fields in objects allocated by applications of the closure 504 // *are* included in the iteration. Thus, when the iteration completes 505 // there should be no further such objects remaining. 506 template <typename OopClosureType> 507 void oop_since_save_marks_iterate(OopClosureType* blk); 508 509 // Allocation support 510 HeapWord* allocate(size_t size); 511 HeapWord* par_allocate(size_t size); 512 513 oop promote(oop obj, size_t obj_size); 514 void gc_prologue(); 515 void gc_epilogue(); 516 517 // This call is used by a containing CMS generation / collector 518 // to inform the CFLS space that a sweep has been completed 519 // and that the space can do any related house-keeping functions. 520 void sweep_completed(); 521 522 // For an object in this space, the mark-word's two 523 // LSB's having the value [11] indicates that it has been 524 // promoted since the most recent call to save_marks() on 525 // this generation and has not subsequently been iterated 526 // over (using oop_since_save_marks_iterate() above). 527 // This property holds only for single-threaded collections, 528 // and is typically used for Cheney scans; for MT scavenges, 529 // the property holds for all objects promoted during that 530 // scavenge for the duration of the scavenge and is used 531 // by card-scanning to avoid scanning objects (being) promoted 532 // during that scavenge. 533 bool obj_allocated_since_save_marks(const oop obj) const { 534 assert(is_in_reserved(obj), "Wrong space?"); 535 return ((PromotedObject*)obj)->hasPromotedMark(); 536 } 537 538 // A worst-case estimate of the space required (in HeapWords) to expand the 539 // heap when promoting an obj of size obj_size. 540 size_t expansionSpaceRequired(size_t obj_size) const; 541 542 FreeChunk* allocateScratch(size_t size); 543 544 // Returns true if either the small or large linear allocation buffer is empty. 545 bool linearAllocationWouldFail() const; 546 547 // Adjust the chunk for the minimum size. This version is called in 548 // most cases in CompactibleFreeListSpace methods. 549 inline static size_t adjustObjectSize(size_t size) { 550 return align_object_size(MAX2(size, (size_t)MinChunkSize)); 551 } 552 // This is a virtual version of adjustObjectSize() that is called 553 // only occasionally when the compaction space changes and the type 554 // of the new compaction space is is only known to be CompactibleSpace. 555 size_t adjust_object_size_v(size_t size) const { 556 return adjustObjectSize(size); 557 } 558 // Minimum size of a free block. 559 virtual size_t minimum_free_block_size() const { return MinChunkSize; } 560 void removeFreeChunkFromFreeLists(FreeChunk* chunk); 561 void addChunkAndRepairOffsetTable(HeapWord* chunk, size_t size, 562 bool coalesced); 563 564 // Support for compaction. 565 void prepare_for_compaction(CompactPoint* cp); 566 void adjust_pointers(); 567 void compact(); 568 // Reset the space to reflect the fact that a compaction of the 569 // space has been done. 570 virtual void reset_after_compaction(); 571 572 // Debugging support. 573 void print() const; 574 void print_on(outputStream* st) const; 575 void prepare_for_verify(); 576 void verify() const; 577 void verifyFreeLists() const PRODUCT_RETURN; 578 void verifyIndexedFreeLists() const; 579 void verifyIndexedFreeList(size_t size) const; 580 // Verify that the given chunk is in the free lists: 581 // i.e. either the binary tree dictionary, the indexed free lists 582 // or the linear allocation block. 583 bool verify_chunk_in_free_list(FreeChunk* fc) const; 584 // Verify that the given chunk is the linear allocation block. 585 bool verify_chunk_is_linear_alloc_block(FreeChunk* fc) const; 586 // Do some basic checks on the the free lists. 587 void check_free_list_consistency() const PRODUCT_RETURN; 588 589 // Printing support 590 void dump_at_safepoint_with_locks(CMSCollector* c, outputStream* st); 591 void print_indexed_free_lists(outputStream* st) const; 592 void print_dictionary_free_lists(outputStream* st) const; 593 void print_promo_info_blocks(outputStream* st) const; 594 595 NOT_PRODUCT ( 596 void initializeIndexedFreeListArrayReturnedBytes(); 597 size_t sumIndexedFreeListArrayReturnedBytes(); 598 // Return the total number of chunks in the indexed free lists. 599 size_t totalCountInIndexedFreeLists() const; 600 // Return the total number of chunks in the space. 601 size_t totalCount(); 602 ) 603 604 // The census consists of counts of the quantities such as 605 // the current count of the free chunks, number of chunks 606 // created as a result of the split of a larger chunk or 607 // coalescing of smaller chucks, etc. The counts in the 608 // census is used to make decisions on splitting and 609 // coalescing of chunks during the sweep of garbage. 610 611 // Print the statistics for the free lists. 612 void printFLCensus(size_t sweep_count) const; 613 614 // Statistics functions 615 // Initialize census for lists before the sweep. 616 void beginSweepFLCensus(float inter_sweep_current, 617 float inter_sweep_estimate, 618 float intra_sweep_estimate); 619 // Set the surplus for each of the free lists. 620 void setFLSurplus(); 621 // Set the hint for each of the free lists. 622 void setFLHints(); 623 // Clear the census for each of the free lists. 624 void clearFLCensus(); 625 // Perform functions for the census after the end of the sweep. 626 void endSweepFLCensus(size_t sweep_count); 627 // Return true if the count of free chunks is greater 628 // than the desired number of free chunks. 629 bool coalOverPopulated(size_t size); 630 631 // Record (for each size): 632 // 633 // split-births = #chunks added due to splits in (prev-sweep-end, 634 // this-sweep-start) 635 // split-deaths = #chunks removed for splits in (prev-sweep-end, 636 // this-sweep-start) 637 // num-curr = #chunks at start of this sweep 638 // num-prev = #chunks at end of previous sweep 639 // 640 // The above are quantities that are measured. Now define: 641 // 642 // num-desired := num-prev + split-births - split-deaths - num-curr 643 // 644 // Roughly, num-prev + split-births is the supply, 645 // split-deaths is demand due to other sizes 646 // and num-curr is what we have left. 647 // 648 // Thus, num-desired is roughly speaking the "legitimate demand" 649 // for blocks of this size and what we are striving to reach at the 650 // end of the current sweep. 651 // 652 // For a given list, let num-len be its current population. 653 // Define, for a free list of a given size: 654 // 655 // coal-overpopulated := num-len >= num-desired * coal-surplus 656 // (coal-surplus is set to 1.05, i.e. we allow a little slop when 657 // coalescing -- we do not coalesce unless we think that the current 658 // supply has exceeded the estimated demand by more than 5%). 659 // 660 // For the set of sizes in the binary tree, which is neither dense nor 661 // closed, it may be the case that for a particular size we have never 662 // had, or do not now have, or did not have at the previous sweep, 663 // chunks of that size. We need to extend the definition of 664 // coal-overpopulated to such sizes as well: 665 // 666 // For a chunk in/not in the binary tree, extend coal-overpopulated 667 // defined above to include all sizes as follows: 668 // 669 // . a size that is non-existent is coal-overpopulated 670 // . a size that has a num-desired <= 0 as defined above is 671 // coal-overpopulated. 672 // 673 // Also define, for a chunk heap-offset C and mountain heap-offset M: 674 // 675 // close-to-mountain := C >= 0.99 * M 676 // 677 // Now, the coalescing strategy is: 678 // 679 // Coalesce left-hand chunk with right-hand chunk if and 680 // only if: 681 // 682 // EITHER 683 // . left-hand chunk is of a size that is coal-overpopulated 684 // OR 685 // . right-hand chunk is close-to-mountain 686 void smallCoalBirth(size_t size); 687 void smallCoalDeath(size_t size); 688 void coalBirth(size_t size); 689 void coalDeath(size_t size); 690 void smallSplitBirth(size_t size); 691 void smallSplitDeath(size_t size); 692 void split_birth(size_t size); 693 void splitDeath(size_t size); 694 void split(size_t from, size_t to1); 695 696 double flsFrag() const; 697 }; 698 699 // A parallel-GC-thread-local allocation buffer for allocation into a 700 // CompactibleFreeListSpace. 701 class CompactibleFreeListSpaceLAB : public CHeapObj<mtGC> { 702 // The space that this buffer allocates into. 703 CompactibleFreeListSpace* _cfls; 704 705 // Our local free lists. 706 AdaptiveFreeList<FreeChunk> _indexedFreeList[CompactibleFreeListSpace::IndexSetSize]; 707 708 // Initialized from a command-line arg. 709 710 // Allocation statistics in support of dynamic adjustment of 711 // #blocks to claim per get_from_global_pool() call below. 712 static AdaptiveWeightedAverage 713 _blocks_to_claim [CompactibleFreeListSpace::IndexSetSize]; 714 static size_t _global_num_blocks [CompactibleFreeListSpace::IndexSetSize]; 715 static uint _global_num_workers[CompactibleFreeListSpace::IndexSetSize]; 716 size_t _num_blocks [CompactibleFreeListSpace::IndexSetSize]; 717 718 // Internal work method 719 void get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl); 720 721 public: 722 static const int _default_dynamic_old_plab_size = 16; 723 static const int _default_static_old_plab_size = 50; 724 725 CompactibleFreeListSpaceLAB(CompactibleFreeListSpace* cfls); 726 727 // Allocate and return a block of the given size, or else return NULL. 728 HeapWord* alloc(size_t word_sz); 729 730 // Return any unused portions of the buffer to the global pool. 731 void retire(int tid); 732 733 // Dynamic OldPLABSize sizing 734 static void compute_desired_plab_size(); 735 // When the settings are modified from default static initialization 736 static void modify_initialization(size_t n, unsigned wt); 737 }; 738 739 size_t PromotionInfo::refillSize() const { 740 const size_t CMSSpoolBlockSize = 256; 741 const size_t sz = heap_word_size(sizeof(SpoolBlock) + sizeof(markOop) 742 * CMSSpoolBlockSize); 743 return CompactibleFreeListSpace::adjustObjectSize(sz); 744 } 745 746 #endif // SHARE_VM_GC_CMS_COMPACTIBLEFREELISTSPACE_HPP