1 /* 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_COMPACTIBLEFREELISTSPACE_HPP 26 #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_COMPACTIBLEFREELISTSPACE_HPP 27 28 #include "gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp" 29 #include "gc_implementation/concurrentMarkSweep/promotionInfo.hpp" 30 #include "memory/binaryTreeDictionary.hpp" 31 #include "memory/blockOffsetTable.inline.hpp" 32 #include "memory/freeList.hpp" 33 #include "memory/space.hpp" 34 35 // Classes in support of keeping track of promotions into a non-Contiguous 36 // space, in this case a CompactibleFreeListSpace. 37 38 // Forward declarations 39 class CompactibleFreeListSpace; 40 class BlkClosure; 41 class BlkClosureCareful; 42 class FreeChunk; 43 class UpwardsObjectClosure; 44 class ObjectClosureCareful; 45 class Klass; 46 47 class LinearAllocBlock VALUE_OBJ_CLASS_SPEC { 48 public: 49 LinearAllocBlock() : _ptr(0), _word_size(0), _refillSize(0), 50 _allocation_size_limit(0) {} 51 void set(HeapWord* ptr, size_t word_size, size_t refill_size, 52 size_t allocation_size_limit) { 53 _ptr = ptr; 54 _word_size = word_size; 55 _refillSize = refill_size; 56 _allocation_size_limit = allocation_size_limit; 57 } 58 HeapWord* _ptr; 59 size_t _word_size; 60 size_t _refillSize; 61 size_t _allocation_size_limit; // Largest size that will be allocated 62 63 void print_on(outputStream* st) const; 64 }; 65 66 // Concrete subclass of CompactibleSpace that implements 67 // a free list space, such as used in the concurrent mark sweep 68 // generation. 69 70 class CompactibleFreeListSpace: public CompactibleSpace { 71 friend class VMStructs; 72 friend class ConcurrentMarkSweepGeneration; 73 friend class CMSCollector; 74 // Local alloc buffer for promotion into this space. 75 friend class CFLS_LAB; 76 77 // "Size" of chunks of work (executed during parallel remark phases 78 // of CMS collection); this probably belongs in CMSCollector, although 79 // it's cached here because it's used in 80 // initialize_sequential_subtasks_for_rescan() which modifies 81 // par_seq_tasks which also lives in Space. XXX 82 const size_t _rescan_task_size; 83 const size_t _marking_task_size; 84 85 // Yet another sequential tasks done structure. This supports 86 // CMS GC, where we have threads dynamically 87 // claiming sub-tasks from a larger parallel task. 88 SequentialSubTasksDone _conc_par_seq_tasks; 89 90 BlockOffsetArrayNonContigSpace _bt; 91 92 CMSCollector* _collector; 93 ConcurrentMarkSweepGeneration* _gen; 94 95 // Data structures for free blocks (used during allocation/sweeping) 96 97 // Allocation is done linearly from two different blocks depending on 98 // whether the request is small or large, in an effort to reduce 99 // fragmentation. We assume that any locking for allocation is done 100 // by the containing generation. Thus, none of the methods in this 101 // space are re-entrant. 102 enum SomeConstants { 103 SmallForLinearAlloc = 16, // size < this then use _sLAB 104 SmallForDictionary = 257, // size < this then use _indexedFreeList 105 IndexSetSize = SmallForDictionary // keep this odd-sized 106 }; 107 static size_t IndexSetStart; 108 static size_t IndexSetStride; 109 110 private: 111 enum FitStrategyOptions { 112 FreeBlockStrategyNone = 0, 113 FreeBlockBestFitFirst 114 }; 115 116 PromotionInfo _promoInfo; 117 118 // Helps to impose a global total order on freelistLock ranks; 119 // assumes that CFLSpace's are allocated in global total order 120 static int _lockRank; 121 122 // A lock protecting the free lists and free blocks; 123 // mutable because of ubiquity of locking even for otherwise const methods 124 mutable Mutex _freelistLock; 125 // Locking verifier convenience function 126 void assert_locked() const PRODUCT_RETURN; 127 void assert_locked(const Mutex* lock) const PRODUCT_RETURN; 128 129 // Linear allocation blocks 130 LinearAllocBlock _smallLinearAllocBlock; 131 132 FreeBlockDictionary<FreeChunk>::DictionaryChoice _dictionaryChoice; 133 AFLBinaryTreeDictionary* _dictionary; // Pointer to dictionary for large size blocks 134 135 // Indexed array for small size blocks 136 AdaptiveFreeList<FreeChunk> _indexedFreeList[IndexSetSize]; 137 138 // Allocation strategy 139 bool _fitStrategy; // Use best fit strategy 140 bool _adaptive_freelists; // Use adaptive freelists 141 142 // This is an address close to the largest free chunk in the heap. 143 // It is currently assumed to be at the end of the heap. Free 144 // chunks with addresses greater than nearLargestChunk are coalesced 145 // in an effort to maintain a large chunk at the end of the heap. 146 HeapWord* _nearLargestChunk; 147 148 // Used to keep track of limit of sweep for the space 149 HeapWord* _sweep_limit; 150 151 // Support for compacting cms 152 HeapWord* cross_threshold(HeapWord* start, HeapWord* end); 153 HeapWord* forward(oop q, size_t size, CompactPoint* cp, HeapWord* compact_top); 154 155 // Initialization helpers. 156 void initializeIndexedFreeListArray(); 157 158 // Extra stuff to manage promotion parallelism. 159 160 // A lock protecting the dictionary during par promotion allocation. 161 mutable Mutex _parDictionaryAllocLock; 162 Mutex* parDictionaryAllocLock() const { return &_parDictionaryAllocLock; } 163 164 // Locks protecting the exact lists during par promotion allocation. 165 Mutex* _indexedFreeListParLocks[IndexSetSize]; 166 167 // Attempt to obtain up to "n" blocks of the size "word_sz" (which is 168 // required to be smaller than "IndexSetSize".) If successful, 169 // adds them to "fl", which is required to be an empty free list. 170 // If the count of "fl" is negative, it's absolute value indicates a 171 // number of free chunks that had been previously "borrowed" from global 172 // list of size "word_sz", and must now be decremented. 173 void par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl); 174 175 // Allocation helper functions 176 // Allocate using a strategy that takes from the indexed free lists 177 // first. This allocation strategy assumes a companion sweeping 178 // strategy that attempts to keep the needed number of chunks in each 179 // indexed free lists. 180 HeapWord* allocate_adaptive_freelists(size_t size); 181 // Allocate from the linear allocation buffers first. This allocation 182 // strategy assumes maximal coalescing can maintain chunks large enough 183 // to be used as linear allocation buffers. 184 HeapWord* allocate_non_adaptive_freelists(size_t size); 185 186 // Gets a chunk from the linear allocation block (LinAB). If there 187 // is not enough space in the LinAB, refills it. 188 HeapWord* getChunkFromLinearAllocBlock(LinearAllocBlock* blk, size_t size); 189 HeapWord* getChunkFromSmallLinearAllocBlock(size_t size); 190 // Get a chunk from the space remaining in the linear allocation block. Do 191 // not attempt to refill if the space is not available, return NULL. Do the 192 // repairs on the linear allocation block as appropriate. 193 HeapWord* getChunkFromLinearAllocBlockRemainder(LinearAllocBlock* blk, size_t size); 194 inline HeapWord* getChunkFromSmallLinearAllocBlockRemainder(size_t size); 195 196 // Helper function for getChunkFromIndexedFreeList. 197 // Replenish the indexed free list for this "size". Do not take from an 198 // underpopulated size. 199 FreeChunk* getChunkFromIndexedFreeListHelper(size_t size, bool replenish = true); 200 201 // Get a chunk from the indexed free list. If the indexed free list 202 // does not have a free chunk, try to replenish the indexed free list 203 // then get the free chunk from the replenished indexed free list. 204 inline FreeChunk* getChunkFromIndexedFreeList(size_t size); 205 206 // The returned chunk may be larger than requested (or null). 207 FreeChunk* getChunkFromDictionary(size_t size); 208 // The returned chunk is the exact size requested (or null). 209 FreeChunk* getChunkFromDictionaryExact(size_t size); 210 211 // Find a chunk in the indexed free list that is the best 212 // fit for size "numWords". 213 FreeChunk* bestFitSmall(size_t numWords); 214 // For free list "fl" of chunks of size > numWords, 215 // remove a chunk, split off a chunk of size numWords 216 // and return it. The split off remainder is returned to 217 // the free lists. The old name for getFromListGreater 218 // was lookInListGreater. 219 FreeChunk* getFromListGreater(AdaptiveFreeList<FreeChunk>* fl, size_t numWords); 220 // Get a chunk in the indexed free list or dictionary, 221 // by considering a larger chunk and splitting it. 222 FreeChunk* getChunkFromGreater(size_t numWords); 223 // Verify that the given chunk is in the indexed free lists. 224 bool verifyChunkInIndexedFreeLists(FreeChunk* fc) const; 225 // Remove the specified chunk from the indexed free lists. 226 void removeChunkFromIndexedFreeList(FreeChunk* fc); 227 // Remove the specified chunk from the dictionary. 228 void removeChunkFromDictionary(FreeChunk* fc); 229 // Split a free chunk into a smaller free chunk of size "new_size". 230 // Return the smaller free chunk and return the remainder to the 231 // free lists. 232 FreeChunk* splitChunkAndReturnRemainder(FreeChunk* chunk, size_t new_size); 233 // Add a chunk to the free lists. 234 void addChunkToFreeLists(HeapWord* chunk, size_t size); 235 // Add a chunk to the free lists, preferring to suffix it 236 // to the last free chunk at end of space if possible, and 237 // updating the block census stats as well as block offset table. 238 // Take any locks as appropriate if we are multithreaded. 239 void addChunkToFreeListsAtEndRecordingStats(HeapWord* chunk, size_t size); 240 // Add a free chunk to the indexed free lists. 241 void returnChunkToFreeList(FreeChunk* chunk); 242 // Add a free chunk to the dictionary. 243 void returnChunkToDictionary(FreeChunk* chunk); 244 245 // Functions for maintaining the linear allocation buffers (LinAB). 246 // Repairing a linear allocation block refers to operations 247 // performed on the remainder of a LinAB after an allocation 248 // has been made from it. 249 void repairLinearAllocationBlocks(); 250 void repairLinearAllocBlock(LinearAllocBlock* blk); 251 void refillLinearAllocBlock(LinearAllocBlock* blk); 252 void refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk); 253 void refillLinearAllocBlocksIfNeeded(); 254 255 void verify_objects_initialized() const; 256 257 // Statistics reporting helper functions 258 void reportFreeListStatistics() const; 259 void reportIndexedFreeListStatistics() const; 260 size_t maxChunkSizeInIndexedFreeLists() const; 261 size_t numFreeBlocksInIndexedFreeLists() const; 262 // Accessor 263 HeapWord* unallocated_block() const { 264 if (BlockOffsetArrayUseUnallocatedBlock) { 265 HeapWord* ub = _bt.unallocated_block(); 266 assert(ub >= bottom() && 267 ub <= end(), "space invariant"); 268 return ub; 269 } else { 270 return end(); 271 } 272 } 273 void freed(HeapWord* start, size_t size) { 274 _bt.freed(start, size); 275 } 276 277 protected: 278 // Reset the indexed free list to its initial empty condition. 279 void resetIndexedFreeListArray(); 280 // Reset to an initial state with a single free block described 281 // by the MemRegion parameter. 282 void reset(MemRegion mr); 283 // Return the total number of words in the indexed free lists. 284 size_t totalSizeInIndexedFreeLists() const; 285 286 public: 287 // Constructor 288 CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr, 289 bool use_adaptive_freelists, 290 FreeBlockDictionary<FreeChunk>::DictionaryChoice); 291 // Accessors 292 bool bestFitFirst() { return _fitStrategy == FreeBlockBestFitFirst; } 293 FreeBlockDictionary<FreeChunk>* dictionary() const { return _dictionary; } 294 HeapWord* nearLargestChunk() const { return _nearLargestChunk; } 295 void set_nearLargestChunk(HeapWord* v) { _nearLargestChunk = v; } 296 297 // Set CMS global values. 298 static void set_cms_values(); 299 300 // Return the free chunk at the end of the space. If no such 301 // chunk exists, return NULL. 302 FreeChunk* find_chunk_at_end(); 303 304 bool adaptive_freelists() const { return _adaptive_freelists; } 305 306 void set_collector(CMSCollector* collector) { _collector = collector; } 307 308 // Support for parallelization of rescan and marking. 309 const size_t rescan_task_size() const { return _rescan_task_size; } 310 const size_t marking_task_size() const { return _marking_task_size; } 311 SequentialSubTasksDone* conc_par_seq_tasks() {return &_conc_par_seq_tasks; } 312 void initialize_sequential_subtasks_for_rescan(int n_threads); 313 void initialize_sequential_subtasks_for_marking(int n_threads, 314 HeapWord* low = NULL); 315 316 // Space enquiries 317 size_t used() const; 318 size_t free() const; 319 size_t max_alloc_in_words() const; 320 // XXX: should have a less conservative used_region() than that of 321 // Space; we could consider keeping track of highest allocated 322 // address and correcting that at each sweep, as the sweeper 323 // goes through the entire allocated part of the generation. We 324 // could also use that information to keep the sweeper from 325 // sweeping more than is necessary. The allocator and sweeper will 326 // of course need to synchronize on this, since the sweeper will 327 // try to bump down the address and the allocator will try to bump it up. 328 // For now, however, we'll just use the default used_region() 329 // which overestimates the region by returning the entire 330 // committed region (this is safe, but inefficient). 331 332 // Returns a subregion of the space containing all the objects in 333 // the space. 334 MemRegion used_region() const { 335 return MemRegion(bottom(), 336 BlockOffsetArrayUseUnallocatedBlock ? 337 unallocated_block() : end()); 338 } 339 340 virtual bool is_free_block(const HeapWord* p) const; 341 342 // Resizing support 343 void set_end(HeapWord* value); // override 344 345 // Mutual exclusion support 346 Mutex* freelistLock() const { return &_freelistLock; } 347 348 // Iteration support 349 void oop_iterate(ExtendedOopClosure* cl); 350 351 void object_iterate(ObjectClosure* blk); 352 // Apply the closure to each object in the space whose references 353 // point to objects in the heap. The usage of CompactibleFreeListSpace 354 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows 355 // objects in the space with references to objects that are no longer 356 // valid. For example, an object may reference another object 357 // that has already been sweep up (collected). This method uses 358 // obj_is_alive() to determine whether it is safe to iterate of 359 // an object. 360 void safe_object_iterate(ObjectClosure* blk); 361 362 // Iterate over all objects that intersect with mr, calling "cl->do_object" 363 // on each. There is an exception to this: if this closure has already 364 // been invoked on an object, it may skip such objects in some cases. This is 365 // Most likely to happen in an "upwards" (ascending address) iteration of 366 // MemRegions. 367 void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl); 368 369 // Requires that "mr" be entirely within the space. 370 // Apply "cl->do_object" to all objects that intersect with "mr". 371 // If the iteration encounters an unparseable portion of the region, 372 // terminate the iteration and return the address of the start of the 373 // subregion that isn't done. Return of "NULL" indicates that the 374 // iteration completed. 375 HeapWord* object_iterate_careful_m(MemRegion mr, 376 ObjectClosureCareful* cl); 377 378 // Override: provides a DCTO_CL specific to this kind of space. 379 DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl, 380 CardTableModRefBS::PrecisionStyle precision, 381 HeapWord* boundary); 382 383 void blk_iterate(BlkClosure* cl); 384 void blk_iterate_careful(BlkClosureCareful* cl); 385 HeapWord* block_start_const(const void* p) const; 386 HeapWord* block_start_careful(const void* p) const; 387 size_t block_size(const HeapWord* p) const; 388 size_t block_size_no_stall(HeapWord* p, const CMSCollector* c) const; 389 bool block_is_obj(const HeapWord* p) const; 390 bool obj_is_alive(const HeapWord* p) const; 391 size_t block_size_nopar(const HeapWord* p) const; 392 bool block_is_obj_nopar(const HeapWord* p) const; 393 394 // Iteration support for promotion 395 void save_marks(); 396 bool no_allocs_since_save_marks(); 397 398 // Iteration support for sweeping 399 void save_sweep_limit() { 400 _sweep_limit = BlockOffsetArrayUseUnallocatedBlock ? 401 unallocated_block() : end(); 402 if (CMSTraceSweeper) { 403 gclog_or_tty->print_cr(">>>>> Saving sweep limit " PTR_FORMAT 404 " for space [" PTR_FORMAT "," PTR_FORMAT ") <<<<<<", 405 p2i(_sweep_limit), p2i(bottom()), p2i(end())); 406 } 407 } 408 NOT_PRODUCT( 409 void clear_sweep_limit() { _sweep_limit = NULL; } 410 ) 411 HeapWord* sweep_limit() { return _sweep_limit; } 412 413 // Apply "blk->do_oop" to the addresses of all reference fields in objects 414 // promoted into this generation since the most recent save_marks() call. 415 // Fields in objects allocated by applications of the closure 416 // *are* included in the iteration. Thus, when the iteration completes 417 // there should be no further such objects remaining. 418 #define CFLS_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ 419 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk); 420 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DECL) 421 #undef CFLS_OOP_SINCE_SAVE_MARKS_DECL 422 423 // Allocation support 424 HeapWord* allocate(size_t size); 425 HeapWord* par_allocate(size_t size); 426 427 oop promote(oop obj, size_t obj_size); 428 void gc_prologue(); 429 void gc_epilogue(); 430 431 // This call is used by a containing CMS generation / collector 432 // to inform the CFLS space that a sweep has been completed 433 // and that the space can do any related house-keeping functions. 434 void sweep_completed(); 435 436 // For an object in this space, the mark-word's two 437 // LSB's having the value [11] indicates that it has been 438 // promoted since the most recent call to save_marks() on 439 // this generation and has not subsequently been iterated 440 // over (using oop_since_save_marks_iterate() above). 441 // This property holds only for single-threaded collections, 442 // and is typically used for Cheney scans; for MT scavenges, 443 // the property holds for all objects promoted during that 444 // scavenge for the duration of the scavenge and is used 445 // by card-scanning to avoid scanning objects (being) promoted 446 // during that scavenge. 447 bool obj_allocated_since_save_marks(const oop obj) const { 448 assert(is_in_reserved(obj), "Wrong space?"); 449 return ((PromotedObject*)obj)->hasPromotedMark(); 450 } 451 452 // A worst-case estimate of the space required (in HeapWords) to expand the 453 // heap when promoting an obj of size obj_size. 454 size_t expansionSpaceRequired(size_t obj_size) const; 455 456 FreeChunk* allocateScratch(size_t size); 457 458 // Returns true if either the small or large linear allocation buffer is empty. 459 bool linearAllocationWouldFail() const; 460 461 // Adjust the chunk for the minimum size. This version is called in 462 // most cases in CompactibleFreeListSpace methods. 463 inline static size_t adjustObjectSize(size_t size) { 464 return (size_t) align_object_size(MAX2(size, (size_t)MinChunkSize)); 465 } 466 // This is a virtual version of adjustObjectSize() that is called 467 // only occasionally when the compaction space changes and the type 468 // of the new compaction space is is only known to be CompactibleSpace. 469 size_t adjust_object_size_v(size_t size) const { 470 return adjustObjectSize(size); 471 } 472 // Minimum size of a free block. 473 virtual size_t minimum_free_block_size() const { return MinChunkSize; } 474 void removeFreeChunkFromFreeLists(FreeChunk* chunk); 475 void addChunkAndRepairOffsetTable(HeapWord* chunk, size_t size, 476 bool coalesced); 477 478 // Support for decisions regarding concurrent collection policy. 479 bool should_concurrent_collect() const; 480 481 // Support for compaction. 482 void prepare_for_compaction(CompactPoint* cp); 483 void adjust_pointers(); 484 void compact(); 485 // Reset the space to reflect the fact that a compaction of the 486 // space has been done. 487 virtual void reset_after_compaction(); 488 489 // Debugging support. 490 void print() const; 491 void print_on(outputStream* st) const; 492 void prepare_for_verify(); 493 void verify() const; 494 void verifyFreeLists() const PRODUCT_RETURN; 495 void verifyIndexedFreeLists() const; 496 void verifyIndexedFreeList(size_t size) const; 497 // Verify that the given chunk is in the free lists: 498 // i.e. either the binary tree dictionary, the indexed free lists 499 // or the linear allocation block. 500 bool verify_chunk_in_free_list(FreeChunk* fc) const; 501 // Verify that the given chunk is the linear allocation block. 502 bool verify_chunk_is_linear_alloc_block(FreeChunk* fc) const; 503 // Do some basic checks on the the free lists. 504 void check_free_list_consistency() const PRODUCT_RETURN; 505 506 // Printing support 507 void dump_at_safepoint_with_locks(CMSCollector* c, outputStream* st); 508 void print_indexed_free_lists(outputStream* st) const; 509 void print_dictionary_free_lists(outputStream* st) const; 510 void print_promo_info_blocks(outputStream* st) const; 511 512 NOT_PRODUCT ( 513 void initializeIndexedFreeListArrayReturnedBytes(); 514 size_t sumIndexedFreeListArrayReturnedBytes(); 515 // Return the total number of chunks in the indexed free lists. 516 size_t totalCountInIndexedFreeLists() const; 517 // Return the total number of chunks in the space. 518 size_t totalCount(); 519 ) 520 521 // The census consists of counts of the quantities such as 522 // the current count of the free chunks, number of chunks 523 // created as a result of the split of a larger chunk or 524 // coalescing of smaller chucks, etc. The counts in the 525 // census is used to make decisions on splitting and 526 // coalescing of chunks during the sweep of garbage. 527 528 // Print the statistics for the free lists. 529 void printFLCensus(size_t sweep_count) const; 530 531 // Statistics functions 532 // Initialize census for lists before the sweep. 533 void beginSweepFLCensus(float inter_sweep_current, 534 float inter_sweep_estimate, 535 float intra_sweep_estimate); 536 // Set the surplus for each of the free lists. 537 void setFLSurplus(); 538 // Set the hint for each of the free lists. 539 void setFLHints(); 540 // Clear the census for each of the free lists. 541 void clearFLCensus(); 542 // Perform functions for the census after the end of the sweep. 543 void endSweepFLCensus(size_t sweep_count); 544 // Return true if the count of free chunks is greater 545 // than the desired number of free chunks. 546 bool coalOverPopulated(size_t size); 547 548 // Record (for each size): 549 // 550 // split-births = #chunks added due to splits in (prev-sweep-end, 551 // this-sweep-start) 552 // split-deaths = #chunks removed for splits in (prev-sweep-end, 553 // this-sweep-start) 554 // num-curr = #chunks at start of this sweep 555 // num-prev = #chunks at end of previous sweep 556 // 557 // The above are quantities that are measured. Now define: 558 // 559 // num-desired := num-prev + split-births - split-deaths - num-curr 560 // 561 // Roughly, num-prev + split-births is the supply, 562 // split-deaths is demand due to other sizes 563 // and num-curr is what we have left. 564 // 565 // Thus, num-desired is roughly speaking the "legitimate demand" 566 // for blocks of this size and what we are striving to reach at the 567 // end of the current sweep. 568 // 569 // For a given list, let num-len be its current population. 570 // Define, for a free list of a given size: 571 // 572 // coal-overpopulated := num-len >= num-desired * coal-surplus 573 // (coal-surplus is set to 1.05, i.e. we allow a little slop when 574 // coalescing -- we do not coalesce unless we think that the current 575 // supply has exceeded the estimated demand by more than 5%). 576 // 577 // For the set of sizes in the binary tree, which is neither dense nor 578 // closed, it may be the case that for a particular size we have never 579 // had, or do not now have, or did not have at the previous sweep, 580 // chunks of that size. We need to extend the definition of 581 // coal-overpopulated to such sizes as well: 582 // 583 // For a chunk in/not in the binary tree, extend coal-overpopulated 584 // defined above to include all sizes as follows: 585 // 586 // . a size that is non-existent is coal-overpopulated 587 // . a size that has a num-desired <= 0 as defined above is 588 // coal-overpopulated. 589 // 590 // Also define, for a chunk heap-offset C and mountain heap-offset M: 591 // 592 // close-to-mountain := C >= 0.99 * M 593 // 594 // Now, the coalescing strategy is: 595 // 596 // Coalesce left-hand chunk with right-hand chunk if and 597 // only if: 598 // 599 // EITHER 600 // . left-hand chunk is of a size that is coal-overpopulated 601 // OR 602 // . right-hand chunk is close-to-mountain 603 void smallCoalBirth(size_t size); 604 void smallCoalDeath(size_t size); 605 void coalBirth(size_t size); 606 void coalDeath(size_t size); 607 void smallSplitBirth(size_t size); 608 void smallSplitDeath(size_t size); 609 void split_birth(size_t size); 610 void splitDeath(size_t size); 611 void split(size_t from, size_t to1); 612 613 double flsFrag() const; 614 }; 615 616 // A parallel-GC-thread-local allocation buffer for allocation into a 617 // CompactibleFreeListSpace. 618 class CFLS_LAB : public CHeapObj<mtGC> { 619 // The space that this buffer allocates into. 620 CompactibleFreeListSpace* _cfls; 621 622 // Our local free lists. 623 AdaptiveFreeList<FreeChunk> _indexedFreeList[CompactibleFreeListSpace::IndexSetSize]; 624 625 // Initialized from a command-line arg. 626 627 // Allocation statistics in support of dynamic adjustment of 628 // #blocks to claim per get_from_global_pool() call below. 629 static AdaptiveWeightedAverage 630 _blocks_to_claim [CompactibleFreeListSpace::IndexSetSize]; 631 static size_t _global_num_blocks [CompactibleFreeListSpace::IndexSetSize]; 632 static uint _global_num_workers[CompactibleFreeListSpace::IndexSetSize]; 633 size_t _num_blocks [CompactibleFreeListSpace::IndexSetSize]; 634 635 // Internal work method 636 void get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl); 637 638 public: 639 CFLS_LAB(CompactibleFreeListSpace* cfls); 640 641 // Allocate and return a block of the given size, or else return NULL. 642 HeapWord* alloc(size_t word_sz); 643 644 // Return any unused portions of the buffer to the global pool. 645 void retire(int tid); 646 647 // Dynamic OldPLABSize sizing 648 static void compute_desired_plab_size(); 649 // When the settings are modified from default static initialization 650 static void modify_initialization(size_t n, unsigned wt); 651 }; 652 653 size_t PromotionInfo::refillSize() const { 654 const size_t CMSSpoolBlockSize = 256; 655 const size_t sz = heap_word_size(sizeof(SpoolBlock) + sizeof(markOop) 656 * CMSSpoolBlockSize); 657 return CompactibleFreeListSpace::adjustObjectSize(sz); 658 } 659 660 #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_COMPACTIBLEFREELISTSPACE_HPP