1 /* 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_CMS_COMPACTIBLEFREELISTSPACE_HPP 26 #define SHARE_VM_GC_CMS_COMPACTIBLEFREELISTSPACE_HPP 27 28 #include "gc/cms/adaptiveFreeList.hpp" 29 #include "gc/cms/promotionInfo.hpp" 30 #include "gc/shared/blockOffsetTable.hpp" 31 #include "gc/shared/space.hpp" 32 #include "memory/binaryTreeDictionary.hpp" 33 #include "memory/freeList.hpp" 34 35 // Classes in support of keeping track of promotions into a non-Contiguous 36 // space, in this case a CompactibleFreeListSpace. 37 38 // Forward declarations 39 class CMSCollector; 40 class CompactibleFreeListSpace; 41 class ConcurrentMarkSweepGeneration; 42 class BlkClosure; 43 class BlkClosureCareful; 44 class FreeChunk; 45 class UpwardsObjectClosure; 46 class ObjectClosureCareful; 47 class Klass; 48 49 class LinearAllocBlock VALUE_OBJ_CLASS_SPEC { 50 public: 51 LinearAllocBlock() : _ptr(0), _word_size(0), _refillSize(0), 52 _allocation_size_limit(0) {} 53 void set(HeapWord* ptr, size_t word_size, size_t refill_size, 54 size_t allocation_size_limit) { 55 _ptr = ptr; 56 _word_size = word_size; 57 _refillSize = refill_size; 58 _allocation_size_limit = allocation_size_limit; 59 } 60 HeapWord* _ptr; 61 size_t _word_size; 62 size_t _refillSize; 63 size_t _allocation_size_limit; // Largest size that will be allocated 64 65 void print_on(outputStream* st) const; 66 }; 67 68 // Concrete subclass of CompactibleSpace that implements 69 // a free list space, such as used in the concurrent mark sweep 70 // generation. 71 72 class CompactibleFreeListSpace: public CompactibleSpace { 73 friend class VMStructs; 74 friend class ConcurrentMarkSweepGeneration; 75 friend class CMSCollector; 76 // Local alloc buffer for promotion into this space. 77 friend class CFLS_LAB; 78 // Allow scan_and_* functions to call (private) overrides of the auxiliary functions on this class 79 template <typename SpaceType> 80 friend void CompactibleSpace::scan_and_adjust_pointers(SpaceType* space); 81 template <typename SpaceType> 82 friend void CompactibleSpace::scan_and_compact(SpaceType* space); 83 template <typename SpaceType> 84 friend void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp); 85 86 // "Size" of chunks of work (executed during parallel remark phases 87 // of CMS collection); this probably belongs in CMSCollector, although 88 // it's cached here because it's used in 89 // initialize_sequential_subtasks_for_rescan() which modifies 90 // par_seq_tasks which also lives in Space. XXX 91 const size_t _rescan_task_size; 92 const size_t _marking_task_size; 93 94 // Yet another sequential tasks done structure. This supports 95 // CMS GC, where we have threads dynamically 96 // claiming sub-tasks from a larger parallel task. 97 SequentialSubTasksDone _conc_par_seq_tasks; 98 99 BlockOffsetArrayNonContigSpace _bt; 100 101 CMSCollector* _collector; 102 ConcurrentMarkSweepGeneration* _gen; 103 104 // Data structures for free blocks (used during allocation/sweeping) 105 106 // Allocation is done linearly from two different blocks depending on 107 // whether the request is small or large, in an effort to reduce 108 // fragmentation. We assume that any locking for allocation is done 109 // by the containing generation. Thus, none of the methods in this 110 // space are re-entrant. 111 enum SomeConstants { 112 SmallForLinearAlloc = 16, // size < this then use _sLAB 113 SmallForDictionary = 257, // size < this then use _indexedFreeList 114 IndexSetSize = SmallForDictionary // keep this odd-sized 115 }; 116 static size_t IndexSetStart; 117 static size_t IndexSetStride; 118 119 private: 120 enum FitStrategyOptions { 121 FreeBlockStrategyNone = 0, 122 FreeBlockBestFitFirst 123 }; 124 125 PromotionInfo _promoInfo; 126 127 // Helps to impose a global total order on freelistLock ranks; 128 // assumes that CFLSpace's are allocated in global total order 129 static int _lockRank; 130 131 // A lock protecting the free lists and free blocks; 132 // mutable because of ubiquity of locking even for otherwise const methods 133 mutable Mutex _freelistLock; 134 // Locking verifier convenience function 135 void assert_locked() const PRODUCT_RETURN; 136 void assert_locked(const Mutex* lock) const PRODUCT_RETURN; 137 138 // Linear allocation blocks 139 LinearAllocBlock _smallLinearAllocBlock; 140 141 FreeBlockDictionary<FreeChunk>::DictionaryChoice _dictionaryChoice; 142 AFLBinaryTreeDictionary* _dictionary; // Pointer to dictionary for large size blocks 143 144 // Indexed array for small size blocks 145 AdaptiveFreeList<FreeChunk> _indexedFreeList[IndexSetSize]; 146 147 // Allocation strategy 148 bool _fitStrategy; // Use best fit strategy 149 bool _adaptive_freelists; // Use adaptive freelists 150 151 // This is an address close to the largest free chunk in the heap. 152 // It is currently assumed to be at the end of the heap. Free 153 // chunks with addresses greater than nearLargestChunk are coalesced 154 // in an effort to maintain a large chunk at the end of the heap. 155 HeapWord* _nearLargestChunk; 156 157 // Used to keep track of limit of sweep for the space 158 HeapWord* _sweep_limit; 159 160 // Used to make the young collector update the mod union table 161 MemRegionClosure* _preconsumptionDirtyCardClosure; 162 163 // Support for compacting cms 164 HeapWord* cross_threshold(HeapWord* start, HeapWord* end); 165 HeapWord* forward(oop q, size_t size, CompactPoint* cp, HeapWord* compact_top); 166 167 // Initialization helpers. 168 void initializeIndexedFreeListArray(); 169 170 // Extra stuff to manage promotion parallelism. 171 172 // A lock protecting the dictionary during par promotion allocation. 173 mutable Mutex _parDictionaryAllocLock; 174 Mutex* parDictionaryAllocLock() const { return &_parDictionaryAllocLock; } 175 176 // Locks protecting the exact lists during par promotion allocation. 177 Mutex* _indexedFreeListParLocks[IndexSetSize]; 178 179 // Attempt to obtain up to "n" blocks of the size "word_sz" (which is 180 // required to be smaller than "IndexSetSize".) If successful, 181 // adds them to "fl", which is required to be an empty free list. 182 // If the count of "fl" is negative, it's absolute value indicates a 183 // number of free chunks that had been previously "borrowed" from global 184 // list of size "word_sz", and must now be decremented. 185 void par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl); 186 187 // Used by par_get_chunk_of_blocks() for the chunks from the 188 // indexed_free_lists. 189 bool par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl); 190 191 // Used by par_get_chunk_of_blocks_dictionary() to get a chunk 192 // evenly splittable into "n" "word_sz" chunks. Returns that 193 // evenly splittable chunk. May split a larger chunk to get the 194 // evenly splittable chunk. 195 FreeChunk* get_n_way_chunk_to_split(size_t word_sz, size_t n); 196 197 // Used by par_get_chunk_of_blocks() for the chunks from the 198 // dictionary. 199 void par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl); 200 201 // Allocation helper functions 202 // Allocate using a strategy that takes from the indexed free lists 203 // first. This allocation strategy assumes a companion sweeping 204 // strategy that attempts to keep the needed number of chunks in each 205 // indexed free lists. 206 HeapWord* allocate_adaptive_freelists(size_t size); 207 // Allocate from the linear allocation buffers first. This allocation 208 // strategy assumes maximal coalescing can maintain chunks large enough 209 // to be used as linear allocation buffers. 210 HeapWord* allocate_non_adaptive_freelists(size_t size); 211 212 // Gets a chunk from the linear allocation block (LinAB). If there 213 // is not enough space in the LinAB, refills it. 214 HeapWord* getChunkFromLinearAllocBlock(LinearAllocBlock* blk, size_t size); 215 HeapWord* getChunkFromSmallLinearAllocBlock(size_t size); 216 // Get a chunk from the space remaining in the linear allocation block. Do 217 // not attempt to refill if the space is not available, return NULL. Do the 218 // repairs on the linear allocation block as appropriate. 219 HeapWord* getChunkFromLinearAllocBlockRemainder(LinearAllocBlock* blk, size_t size); 220 inline HeapWord* getChunkFromSmallLinearAllocBlockRemainder(size_t size); 221 222 // Helper function for getChunkFromIndexedFreeList. 223 // Replenish the indexed free list for this "size". Do not take from an 224 // underpopulated size. 225 FreeChunk* getChunkFromIndexedFreeListHelper(size_t size, bool replenish = true); 226 227 // Get a chunk from the indexed free list. If the indexed free list 228 // does not have a free chunk, try to replenish the indexed free list 229 // then get the free chunk from the replenished indexed free list. 230 inline FreeChunk* getChunkFromIndexedFreeList(size_t size); 231 232 // The returned chunk may be larger than requested (or null). 233 FreeChunk* getChunkFromDictionary(size_t size); 234 // The returned chunk is the exact size requested (or null). 235 FreeChunk* getChunkFromDictionaryExact(size_t size); 236 237 // Find a chunk in the indexed free list that is the best 238 // fit for size "numWords". 239 FreeChunk* bestFitSmall(size_t numWords); 240 // For free list "fl" of chunks of size > numWords, 241 // remove a chunk, split off a chunk of size numWords 242 // and return it. The split off remainder is returned to 243 // the free lists. The old name for getFromListGreater 244 // was lookInListGreater. 245 FreeChunk* getFromListGreater(AdaptiveFreeList<FreeChunk>* fl, size_t numWords); 246 // Get a chunk in the indexed free list or dictionary, 247 // by considering a larger chunk and splitting it. 248 FreeChunk* getChunkFromGreater(size_t numWords); 249 // Verify that the given chunk is in the indexed free lists. 250 bool verifyChunkInIndexedFreeLists(FreeChunk* fc) const; 251 // Remove the specified chunk from the indexed free lists. 252 void removeChunkFromIndexedFreeList(FreeChunk* fc); 253 // Remove the specified chunk from the dictionary. 254 void removeChunkFromDictionary(FreeChunk* fc); 255 // Split a free chunk into a smaller free chunk of size "new_size". 256 // Return the smaller free chunk and return the remainder to the 257 // free lists. 258 FreeChunk* splitChunkAndReturnRemainder(FreeChunk* chunk, size_t new_size); 259 // Add a chunk to the free lists. 260 void addChunkToFreeLists(HeapWord* chunk, size_t size); 261 // Add a chunk to the free lists, preferring to suffix it 262 // to the last free chunk at end of space if possible, and 263 // updating the block census stats as well as block offset table. 264 // Take any locks as appropriate if we are multithreaded. 265 void addChunkToFreeListsAtEndRecordingStats(HeapWord* chunk, size_t size); 266 // Add a free chunk to the indexed free lists. 267 void returnChunkToFreeList(FreeChunk* chunk); 268 // Add a free chunk to the dictionary. 269 void returnChunkToDictionary(FreeChunk* chunk); 270 271 // Functions for maintaining the linear allocation buffers (LinAB). 272 // Repairing a linear allocation block refers to operations 273 // performed on the remainder of a LinAB after an allocation 274 // has been made from it. 275 void repairLinearAllocationBlocks(); 276 void repairLinearAllocBlock(LinearAllocBlock* blk); 277 void refillLinearAllocBlock(LinearAllocBlock* blk); 278 void refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk); 279 void refillLinearAllocBlocksIfNeeded(); 280 281 void verify_objects_initialized() const; 282 283 // Statistics reporting helper functions 284 void reportFreeListStatistics() const; 285 void reportIndexedFreeListStatistics() const; 286 size_t maxChunkSizeInIndexedFreeLists() const; 287 size_t numFreeBlocksInIndexedFreeLists() const; 288 // Accessor 289 HeapWord* unallocated_block() const { 290 if (BlockOffsetArrayUseUnallocatedBlock) { 291 HeapWord* ub = _bt.unallocated_block(); 292 assert(ub >= bottom() && 293 ub <= end(), "space invariant"); 294 return ub; 295 } else { 296 return end(); 297 } 298 } 299 void freed(HeapWord* start, size_t size) { 300 _bt.freed(start, size); 301 } 302 303 // Auxiliary functions for scan_and_{forward,adjust_pointers,compact} support. 304 // See comments for CompactibleSpace for more information. 305 inline HeapWord* scan_limit() const { 306 return end(); 307 } 308 309 inline bool scanned_block_is_obj(const HeapWord* addr) const { 310 return CompactibleFreeListSpace::block_is_obj(addr); // Avoid virtual call 311 } 312 313 inline size_t scanned_block_size(const HeapWord* addr) const { 314 return CompactibleFreeListSpace::block_size(addr); // Avoid virtual call 315 } 316 317 inline size_t adjust_obj_size(size_t size) const { 318 return adjustObjectSize(size); 319 } 320 321 inline size_t obj_size(const HeapWord* addr) const { 322 return adjustObjectSize(oop(addr)->size()); 323 } 324 325 protected: 326 // Reset the indexed free list to its initial empty condition. 327 void resetIndexedFreeListArray(); 328 // Reset to an initial state with a single free block described 329 // by the MemRegion parameter. 330 void reset(MemRegion mr); 331 // Return the total number of words in the indexed free lists. 332 size_t totalSizeInIndexedFreeLists() const; 333 334 public: 335 // Constructor 336 CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr, 337 bool use_adaptive_freelists, 338 FreeBlockDictionary<FreeChunk>::DictionaryChoice); 339 // Accessors 340 bool bestFitFirst() { return _fitStrategy == FreeBlockBestFitFirst; } 341 FreeBlockDictionary<FreeChunk>* dictionary() const { return _dictionary; } 342 HeapWord* nearLargestChunk() const { return _nearLargestChunk; } 343 void set_nearLargestChunk(HeapWord* v) { _nearLargestChunk = v; } 344 345 virtual bool isCompactibleFreeListSpace() { return true; } 346 347 // Set CMS global values. 348 static void set_cms_values(); 349 350 // Return the free chunk at the end of the space. If no such 351 // chunk exists, return NULL. 352 FreeChunk* find_chunk_at_end(); 353 354 bool adaptive_freelists() const { return _adaptive_freelists; } 355 356 void set_collector(CMSCollector* collector) { _collector = collector; } 357 358 // Support for parallelization of rescan and marking. 359 const size_t rescan_task_size() const { return _rescan_task_size; } 360 const size_t marking_task_size() const { return _marking_task_size; } 361 SequentialSubTasksDone* conc_par_seq_tasks() {return &_conc_par_seq_tasks; } 362 void initialize_sequential_subtasks_for_rescan(int n_threads); 363 void initialize_sequential_subtasks_for_marking(int n_threads, 364 HeapWord* low = NULL); 365 366 virtual MemRegionClosure* preconsumptionDirtyCardClosure() const { 367 return _preconsumptionDirtyCardClosure; 368 } 369 370 void setPreconsumptionDirtyCardClosure(MemRegionClosure* cl) { 371 _preconsumptionDirtyCardClosure = cl; 372 } 373 374 // Space enquiries 375 size_t used() const; 376 size_t free() const; 377 size_t max_alloc_in_words() const; 378 // XXX: should have a less conservative used_region() than that of 379 // Space; we could consider keeping track of highest allocated 380 // address and correcting that at each sweep, as the sweeper 381 // goes through the entire allocated part of the generation. We 382 // could also use that information to keep the sweeper from 383 // sweeping more than is necessary. The allocator and sweeper will 384 // of course need to synchronize on this, since the sweeper will 385 // try to bump down the address and the allocator will try to bump it up. 386 // For now, however, we'll just use the default used_region() 387 // which overestimates the region by returning the entire 388 // committed region (this is safe, but inefficient). 389 390 // Returns a subregion of the space containing all the objects in 391 // the space. 392 MemRegion used_region() const { 393 return MemRegion(bottom(), 394 BlockOffsetArrayUseUnallocatedBlock ? 395 unallocated_block() : end()); 396 } 397 398 virtual bool is_free_block(const HeapWord* p) const; 399 400 // Resizing support 401 void set_end(HeapWord* value); // override 402 403 // Never mangle CompactibleFreeListSpace 404 void mangle_unused_area() {} 405 void mangle_unused_area_complete() {} 406 407 // Mutual exclusion support 408 Mutex* freelistLock() const { return &_freelistLock; } 409 410 // Iteration support 411 void oop_iterate(ExtendedOopClosure* cl); 412 413 void object_iterate(ObjectClosure* blk); 414 // Apply the closure to each object in the space whose references 415 // point to objects in the heap. The usage of CompactibleFreeListSpace 416 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows 417 // objects in the space with references to objects that are no longer 418 // valid. For example, an object may reference another object 419 // that has already been sweep up (collected). This method uses 420 // obj_is_alive() to determine whether it is safe to iterate of 421 // an object. 422 void safe_object_iterate(ObjectClosure* blk); 423 424 // Iterate over all objects that intersect with mr, calling "cl->do_object" 425 // on each. There is an exception to this: if this closure has already 426 // been invoked on an object, it may skip such objects in some cases. This is 427 // Most likely to happen in an "upwards" (ascending address) iteration of 428 // MemRegions. 429 void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl); 430 431 // Requires that "mr" be entirely within the space. 432 // Apply "cl->do_object" to all objects that intersect with "mr". 433 // If the iteration encounters an unparseable portion of the region, 434 // terminate the iteration and return the address of the start of the 435 // subregion that isn't done. Return of "NULL" indicates that the 436 // iteration completed. 437 HeapWord* object_iterate_careful_m(MemRegion mr, 438 ObjectClosureCareful* cl); 439 440 // Override: provides a DCTO_CL specific to this kind of space. 441 DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl, 442 CardTableModRefBS::PrecisionStyle precision, 443 HeapWord* boundary, 444 bool parallel); 445 446 void blk_iterate(BlkClosure* cl); 447 void blk_iterate_careful(BlkClosureCareful* cl); 448 HeapWord* block_start_const(const void* p) const; 449 HeapWord* block_start_careful(const void* p) const; 450 size_t block_size(const HeapWord* p) const; 451 size_t block_size_no_stall(HeapWord* p, const CMSCollector* c) const; 452 bool block_is_obj(const HeapWord* p) const; 453 bool obj_is_alive(const HeapWord* p) const; 454 size_t block_size_nopar(const HeapWord* p) const; 455 bool block_is_obj_nopar(const HeapWord* p) const; 456 457 // Iteration support for promotion 458 void save_marks(); 459 bool no_allocs_since_save_marks(); 460 461 // Iteration support for sweeping 462 void save_sweep_limit() { 463 _sweep_limit = BlockOffsetArrayUseUnallocatedBlock ? 464 unallocated_block() : end(); 465 if (CMSTraceSweeper) { 466 gclog_or_tty->print_cr(">>>>> Saving sweep limit " PTR_FORMAT 467 " for space [" PTR_FORMAT "," PTR_FORMAT ") <<<<<<", 468 p2i(_sweep_limit), p2i(bottom()), p2i(end())); 469 } 470 } 471 NOT_PRODUCT( 472 void clear_sweep_limit() { _sweep_limit = NULL; } 473 ) 474 HeapWord* sweep_limit() { return _sweep_limit; } 475 476 // Apply "blk->do_oop" to the addresses of all reference fields in objects 477 // promoted into this generation since the most recent save_marks() call. 478 // Fields in objects allocated by applications of the closure 479 // *are* included in the iteration. Thus, when the iteration completes 480 // there should be no further such objects remaining. 481 #define CFLS_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ 482 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk); 483 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DECL) 484 #undef CFLS_OOP_SINCE_SAVE_MARKS_DECL 485 486 // Allocation support 487 HeapWord* allocate(size_t size); 488 HeapWord* par_allocate(size_t size); 489 490 oop promote(oop obj, size_t obj_size); 491 void gc_prologue(); 492 void gc_epilogue(); 493 494 // This call is used by a containing CMS generation / collector 495 // to inform the CFLS space that a sweep has been completed 496 // and that the space can do any related house-keeping functions. 497 void sweep_completed(); 498 499 // For an object in this space, the mark-word's two 500 // LSB's having the value [11] indicates that it has been 501 // promoted since the most recent call to save_marks() on 502 // this generation and has not subsequently been iterated 503 // over (using oop_since_save_marks_iterate() above). 504 // This property holds only for single-threaded collections, 505 // and is typically used for Cheney scans; for MT scavenges, 506 // the property holds for all objects promoted during that 507 // scavenge for the duration of the scavenge and is used 508 // by card-scanning to avoid scanning objects (being) promoted 509 // during that scavenge. 510 bool obj_allocated_since_save_marks(const oop obj) const { 511 assert(is_in_reserved(obj), "Wrong space?"); 512 return ((PromotedObject*)obj)->hasPromotedMark(); 513 } 514 515 // A worst-case estimate of the space required (in HeapWords) to expand the 516 // heap when promoting an obj of size obj_size. 517 size_t expansionSpaceRequired(size_t obj_size) const; 518 519 FreeChunk* allocateScratch(size_t size); 520 521 // Returns true if either the small or large linear allocation buffer is empty. 522 bool linearAllocationWouldFail() const; 523 524 // Adjust the chunk for the minimum size. This version is called in 525 // most cases in CompactibleFreeListSpace methods. 526 inline static size_t adjustObjectSize(size_t size) { 527 return (size_t) align_object_size(MAX2(size, (size_t)MinChunkSize)); 528 } 529 // This is a virtual version of adjustObjectSize() that is called 530 // only occasionally when the compaction space changes and the type 531 // of the new compaction space is is only known to be CompactibleSpace. 532 size_t adjust_object_size_v(size_t size) const { 533 return adjustObjectSize(size); 534 } 535 // Minimum size of a free block. 536 virtual size_t minimum_free_block_size() const { return MinChunkSize; } 537 void removeFreeChunkFromFreeLists(FreeChunk* chunk); 538 void addChunkAndRepairOffsetTable(HeapWord* chunk, size_t size, 539 bool coalesced); 540 541 // Support for decisions regarding concurrent collection policy. 542 bool should_concurrent_collect() const; 543 544 // Support for compaction. 545 void prepare_for_compaction(CompactPoint* cp); 546 void pms_prepare_for_compaction_work(CompactPoint* cp); 547 void adjust_pointers(); 548 void compact(); 549 void pms_compact_work(); 550 // Reset the space to reflect the fact that a compaction of the 551 // space has been done. 552 virtual void reset_after_compaction(); 553 554 // Debugging support. 555 void print() const; 556 void print_on(outputStream* st) const; 557 void prepare_for_verify(); 558 void verify() const; 559 void verifyFreeLists() const PRODUCT_RETURN; 560 void verifyIndexedFreeLists() const; 561 void verifyIndexedFreeList(size_t size) const; 562 // Verify that the given chunk is in the free lists: 563 // i.e. either the binary tree dictionary, the indexed free lists 564 // or the linear allocation block. 565 bool verify_chunk_in_free_list(FreeChunk* fc) const; 566 // Verify that the given chunk is the linear allocation block. 567 bool verify_chunk_is_linear_alloc_block(FreeChunk* fc) const; 568 // Do some basic checks on the the free lists. 569 void check_free_list_consistency() const PRODUCT_RETURN; 570 571 // Printing support 572 void dump_at_safepoint_with_locks(CMSCollector* c, outputStream* st); 573 void print_indexed_free_lists(outputStream* st) const; 574 void print_dictionary_free_lists(outputStream* st) const; 575 void print_promo_info_blocks(outputStream* st) const; 576 577 NOT_PRODUCT ( 578 void initializeIndexedFreeListArrayReturnedBytes(); 579 size_t sumIndexedFreeListArrayReturnedBytes(); 580 // Return the total number of chunks in the indexed free lists. 581 size_t totalCountInIndexedFreeLists() const; 582 // Return the total number of chunks in the space. 583 size_t totalCount(); 584 ) 585 586 // The census consists of counts of the quantities such as 587 // the current count of the free chunks, number of chunks 588 // created as a result of the split of a larger chunk or 589 // coalescing of smaller chucks, etc. The counts in the 590 // census is used to make decisions on splitting and 591 // coalescing of chunks during the sweep of garbage. 592 593 // Print the statistics for the free lists. 594 void printFLCensus(size_t sweep_count) const; 595 596 // Statistics functions 597 // Initialize census for lists before the sweep. 598 void beginSweepFLCensus(float inter_sweep_current, 599 float inter_sweep_estimate, 600 float intra_sweep_estimate); 601 // Set the surplus for each of the free lists. 602 void setFLSurplus(); 603 // Set the hint for each of the free lists. 604 void setFLHints(); 605 // Clear the census for each of the free lists. 606 void clearFLCensus(); 607 // Perform functions for the census after the end of the sweep. 608 void endSweepFLCensus(size_t sweep_count); 609 // Return true if the count of free chunks is greater 610 // than the desired number of free chunks. 611 bool coalOverPopulated(size_t size); 612 613 // Record (for each size): 614 // 615 // split-births = #chunks added due to splits in (prev-sweep-end, 616 // this-sweep-start) 617 // split-deaths = #chunks removed for splits in (prev-sweep-end, 618 // this-sweep-start) 619 // num-curr = #chunks at start of this sweep 620 // num-prev = #chunks at end of previous sweep 621 // 622 // The above are quantities that are measured. Now define: 623 // 624 // num-desired := num-prev + split-births - split-deaths - num-curr 625 // 626 // Roughly, num-prev + split-births is the supply, 627 // split-deaths is demand due to other sizes 628 // and num-curr is what we have left. 629 // 630 // Thus, num-desired is roughly speaking the "legitimate demand" 631 // for blocks of this size and what we are striving to reach at the 632 // end of the current sweep. 633 // 634 // For a given list, let num-len be its current population. 635 // Define, for a free list of a given size: 636 // 637 // coal-overpopulated := num-len >= num-desired * coal-surplus 638 // (coal-surplus is set to 1.05, i.e. we allow a little slop when 639 // coalescing -- we do not coalesce unless we think that the current 640 // supply has exceeded the estimated demand by more than 5%). 641 // 642 // For the set of sizes in the binary tree, which is neither dense nor 643 // closed, it may be the case that for a particular size we have never 644 // had, or do not now have, or did not have at the previous sweep, 645 // chunks of that size. We need to extend the definition of 646 // coal-overpopulated to such sizes as well: 647 // 648 // For a chunk in/not in the binary tree, extend coal-overpopulated 649 // defined above to include all sizes as follows: 650 // 651 // . a size that is non-existent is coal-overpopulated 652 // . a size that has a num-desired <= 0 as defined above is 653 // coal-overpopulated. 654 // 655 // Also define, for a chunk heap-offset C and mountain heap-offset M: 656 // 657 // close-to-mountain := C >= 0.99 * M 658 // 659 // Now, the coalescing strategy is: 660 // 661 // Coalesce left-hand chunk with right-hand chunk if and 662 // only if: 663 // 664 // EITHER 665 // . left-hand chunk is of a size that is coal-overpopulated 666 // OR 667 // . right-hand chunk is close-to-mountain 668 void smallCoalBirth(size_t size); 669 void smallCoalDeath(size_t size); 670 void coalBirth(size_t size); 671 void coalDeath(size_t size); 672 void smallSplitBirth(size_t size); 673 void smallSplitDeath(size_t size); 674 void split_birth(size_t size); 675 void splitDeath(size_t size); 676 void split(size_t from, size_t to1); 677 678 double flsFrag() const; 679 }; 680 681 // A parallel-GC-thread-local allocation buffer for allocation into a 682 // CompactibleFreeListSpace. 683 class CFLS_LAB : public CHeapObj<mtGC> { 684 // The space that this buffer allocates into. 685 CompactibleFreeListSpace* _cfls; 686 687 // Our local free lists. 688 AdaptiveFreeList<FreeChunk> _indexedFreeList[CompactibleFreeListSpace::IndexSetSize]; 689 690 // Initialized from a command-line arg. 691 692 // Allocation statistics in support of dynamic adjustment of 693 // #blocks to claim per get_from_global_pool() call below. 694 static AdaptiveWeightedAverage 695 _blocks_to_claim [CompactibleFreeListSpace::IndexSetSize]; 696 static size_t _global_num_blocks [CompactibleFreeListSpace::IndexSetSize]; 697 static uint _global_num_workers[CompactibleFreeListSpace::IndexSetSize]; 698 size_t _num_blocks [CompactibleFreeListSpace::IndexSetSize]; 699 700 // Internal work method 701 void get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl); 702 703 public: 704 static const int _default_dynamic_old_plab_size = 16; 705 static const int _default_static_old_plab_size = 50; 706 707 CFLS_LAB(CompactibleFreeListSpace* cfls); 708 709 // Allocate and return a block of the given size, or else return NULL. 710 HeapWord* alloc(size_t word_sz); 711 712 // Return any unused portions of the buffer to the global pool. 713 void retire(int tid); 714 715 // Dynamic OldPLABSize sizing 716 static void compute_desired_plab_size(); 717 // When the settings are modified from default static initialization 718 static void modify_initialization(size_t n, unsigned wt); 719 }; 720 721 size_t PromotionInfo::refillSize() const { 722 const size_t CMSSpoolBlockSize = 256; 723 const size_t sz = heap_word_size(sizeof(SpoolBlock) + sizeof(markOop) 724 * CMSSpoolBlockSize); 725 return CompactibleFreeListSpace::adjustObjectSize(sz); 726 } 727 728 #endif // SHARE_VM_GC_CMS_COMPACTIBLEFREELISTSPACE_HPP