1 /*
   2  * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_CMS_COMPACTIBLEFREELISTSPACE_HPP
  26 #define SHARE_VM_GC_CMS_COMPACTIBLEFREELISTSPACE_HPP
  27 
  28 #include "gc/cms/adaptiveFreeList.hpp"
  29 #include "gc/cms/promotionInfo.hpp"
  30 #include "gc/shared/blockOffsetTable.hpp"
  31 #include "gc/shared/space.hpp"
  32 #include "memory/binaryTreeDictionary.hpp"
  33 #include "memory/freeList.hpp"
  34 
  35 // Classes in support of keeping track of promotions into a non-Contiguous
  36 // space, in this case a CompactibleFreeListSpace.
  37 
  38 // Forward declarations
  39 class CMSCollector;
  40 class CompactibleFreeListSpace;
  41 class ConcurrentMarkSweepGeneration;
  42 class BlkClosure;
  43 class BlkClosureCareful;
  44 class FreeChunk;
  45 class UpwardsObjectClosure;
  46 class ObjectClosureCareful;
  47 class Klass;
  48 
  49 class LinearAllocBlock VALUE_OBJ_CLASS_SPEC {
  50  public:
  51   LinearAllocBlock() : _ptr(0), _word_size(0), _refillSize(0),
  52     _allocation_size_limit(0) {}
  53   void set(HeapWord* ptr, size_t word_size, size_t refill_size,
  54     size_t allocation_size_limit) {
  55     _ptr = ptr;
  56     _word_size = word_size;
  57     _refillSize = refill_size;
  58     _allocation_size_limit = allocation_size_limit;
  59   }
  60   HeapWord* _ptr;
  61   size_t    _word_size;
  62   size_t    _refillSize;
  63   size_t    _allocation_size_limit;  // Largest size that will be allocated
  64 
  65   void print_on(outputStream* st) const;
  66 };
  67 
  68 // Concrete subclass of CompactibleSpace that implements
  69 // a free list space, such as used in the concurrent mark sweep
  70 // generation.
  71 
  72 class CompactibleFreeListSpace: public CompactibleSpace {
  73   friend class VMStructs;
  74   friend class ConcurrentMarkSweepGeneration;
  75   friend class CMSCollector;
  76   // Local alloc buffer for promotion into this space.
  77   friend class CFLS_LAB;
  78   // Allow scan_and_* functions to call (private) overrides of the auxiliary functions on this class
  79   template <typename SpaceType>
  80   friend void CompactibleSpace::scan_and_adjust_pointers(SpaceType* space);
  81   template <typename SpaceType>
  82   friend void CompactibleSpace::scan_and_compact(SpaceType* space);
  83   template <typename SpaceType>
  84   friend void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp);
  85 
  86   // "Size" of chunks of work (executed during parallel remark phases
  87   // of CMS collection); this probably belongs in CMSCollector, although
  88   // it's cached here because it's used in
  89   // initialize_sequential_subtasks_for_rescan() which modifies
  90   // par_seq_tasks which also lives in Space. XXX
  91   const size_t _rescan_task_size;
  92   const size_t _marking_task_size;
  93 
  94   // Yet another sequential tasks done structure. This supports
  95   // CMS GC, where we have threads dynamically
  96   // claiming sub-tasks from a larger parallel task.
  97   SequentialSubTasksDone _conc_par_seq_tasks;
  98 
  99   BlockOffsetArrayNonContigSpace _bt;
 100 
 101   CMSCollector* _collector;
 102   ConcurrentMarkSweepGeneration* _gen;
 103 
 104   // Data structures for free blocks (used during allocation/sweeping)
 105 
 106   // Allocation is done linearly from two different blocks depending on
 107   // whether the request is small or large, in an effort to reduce
 108   // fragmentation. We assume that any locking for allocation is done
 109   // by the containing generation. Thus, none of the methods in this
 110   // space are re-entrant.
 111   enum SomeConstants {
 112     SmallForLinearAlloc = 16,        // size < this then use _sLAB
 113     SmallForDictionary  = 257,       // size < this then use _indexedFreeList
 114     IndexSetSize        = SmallForDictionary  // keep this odd-sized
 115   };
 116   static size_t IndexSetStart;
 117   static size_t IndexSetStride;
 118 
 119  private:
 120   enum FitStrategyOptions {
 121     FreeBlockStrategyNone = 0,
 122     FreeBlockBestFitFirst
 123   };
 124 
 125   PromotionInfo _promoInfo;
 126 
 127   // Helps to impose a global total order on freelistLock ranks;
 128   // assumes that CFLSpace's are allocated in global total order
 129   static int   _lockRank;
 130 
 131   // A lock protecting the free lists and free blocks;
 132   // mutable because of ubiquity of locking even for otherwise const methods
 133   mutable Mutex _freelistLock;
 134   // Locking verifier convenience function
 135   void assert_locked() const PRODUCT_RETURN;
 136   void assert_locked(const Mutex* lock) const PRODUCT_RETURN;
 137 
 138   // Linear allocation blocks
 139   LinearAllocBlock _smallLinearAllocBlock;
 140 
 141   FreeBlockDictionary<FreeChunk>::DictionaryChoice _dictionaryChoice;
 142   AFLBinaryTreeDictionary* _dictionary;    // Pointer to dictionary for large size blocks
 143 
 144   // Indexed array for small size blocks
 145   AdaptiveFreeList<FreeChunk> _indexedFreeList[IndexSetSize];
 146 
 147   // Allocation strategy
 148   bool       _fitStrategy;        // Use best fit strategy
 149   bool       _adaptive_freelists; // Use adaptive freelists
 150 
 151   // This is an address close to the largest free chunk in the heap.
 152   // It is currently assumed to be at the end of the heap.  Free
 153   // chunks with addresses greater than nearLargestChunk are coalesced
 154   // in an effort to maintain a large chunk at the end of the heap.
 155   HeapWord*  _nearLargestChunk;
 156 
 157   // Used to keep track of limit of sweep for the space
 158   HeapWord* _sweep_limit;
 159 
 160   // Used to make the young collector update the mod union table
 161   MemRegionClosure* _preconsumptionDirtyCardClosure;
 162 
 163   // Support for compacting cms
 164   HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
 165   HeapWord* forward(oop q, size_t size, CompactPoint* cp, HeapWord* compact_top);
 166 
 167   // Initialization helpers.
 168   void initializeIndexedFreeListArray();
 169 
 170   // Extra stuff to manage promotion parallelism.
 171 
 172   // A lock protecting the dictionary during par promotion allocation.
 173   mutable Mutex _parDictionaryAllocLock;
 174   Mutex* parDictionaryAllocLock() const { return &_parDictionaryAllocLock; }
 175 
 176   // Locks protecting the exact lists during par promotion allocation.
 177   Mutex* _indexedFreeListParLocks[IndexSetSize];
 178 
 179   // Attempt to obtain up to "n" blocks of the size "word_sz" (which is
 180   // required to be smaller than "IndexSetSize".)  If successful,
 181   // adds them to "fl", which is required to be an empty free list.
 182   // If the count of "fl" is negative, it's absolute value indicates a
 183   // number of free chunks that had been previously "borrowed" from global
 184   // list of size "word_sz", and must now be decremented.
 185   void par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl);
 186 
 187   // Used by par_get_chunk_of_blocks() for the chunks from the
 188   // indexed_free_lists.
 189   bool par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl);
 190 
 191   // Used by par_get_chunk_of_blocks_dictionary() to get a chunk
 192   // evenly splittable into "n" "word_sz" chunks.  Returns that
 193   // evenly splittable chunk.  May split a larger chunk to get the
 194   // evenly splittable chunk.
 195   FreeChunk* get_n_way_chunk_to_split(size_t word_sz, size_t n);
 196 
 197   // Used by par_get_chunk_of_blocks() for the chunks from the
 198   // dictionary.
 199   void par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl);
 200 
 201   // Allocation helper functions
 202   // Allocate using a strategy that takes from the indexed free lists
 203   // first.  This allocation strategy assumes a companion sweeping
 204   // strategy that attempts to keep the needed number of chunks in each
 205   // indexed free lists.
 206   HeapWord* allocate_adaptive_freelists(size_t size);
 207   // Allocate from the linear allocation buffers first.  This allocation
 208   // strategy assumes maximal coalescing can maintain chunks large enough
 209   // to be used as linear allocation buffers.
 210   HeapWord* allocate_non_adaptive_freelists(size_t size);
 211 
 212   // Gets a chunk from the linear allocation block (LinAB).  If there
 213   // is not enough space in the LinAB, refills it.
 214   HeapWord*  getChunkFromLinearAllocBlock(LinearAllocBlock* blk, size_t size);
 215   HeapWord*  getChunkFromSmallLinearAllocBlock(size_t size);
 216   // Get a chunk from the space remaining in the linear allocation block.  Do
 217   // not attempt to refill if the space is not available, return NULL.  Do the
 218   // repairs on the linear allocation block as appropriate.
 219   HeapWord*  getChunkFromLinearAllocBlockRemainder(LinearAllocBlock* blk, size_t size);
 220   inline HeapWord*  getChunkFromSmallLinearAllocBlockRemainder(size_t size);
 221 
 222   // Helper function for getChunkFromIndexedFreeList.
 223   // Replenish the indexed free list for this "size".  Do not take from an
 224   // underpopulated size.
 225   FreeChunk*  getChunkFromIndexedFreeListHelper(size_t size, bool replenish = true);
 226 
 227   // Get a chunk from the indexed free list.  If the indexed free list
 228   // does not have a free chunk, try to replenish the indexed free list
 229   // then get the free chunk from the replenished indexed free list.
 230   inline FreeChunk* getChunkFromIndexedFreeList(size_t size);
 231 
 232   // The returned chunk may be larger than requested (or null).
 233   FreeChunk* getChunkFromDictionary(size_t size);
 234   // The returned chunk is the exact size requested (or null).
 235   FreeChunk* getChunkFromDictionaryExact(size_t size);
 236 
 237   // Find a chunk in the indexed free list that is the best
 238   // fit for size "numWords".
 239   FreeChunk* bestFitSmall(size_t numWords);
 240   // For free list "fl" of chunks of size > numWords,
 241   // remove a chunk, split off a chunk of size numWords
 242   // and return it.  The split off remainder is returned to
 243   // the free lists.  The old name for getFromListGreater
 244   // was lookInListGreater.
 245   FreeChunk* getFromListGreater(AdaptiveFreeList<FreeChunk>* fl, size_t numWords);
 246   // Get a chunk in the indexed free list or dictionary,
 247   // by considering a larger chunk and splitting it.
 248   FreeChunk* getChunkFromGreater(size_t numWords);
 249   //  Verify that the given chunk is in the indexed free lists.
 250   bool verifyChunkInIndexedFreeLists(FreeChunk* fc) const;
 251   // Remove the specified chunk from the indexed free lists.
 252   void       removeChunkFromIndexedFreeList(FreeChunk* fc);
 253   // Remove the specified chunk from the dictionary.
 254   void       removeChunkFromDictionary(FreeChunk* fc);
 255   // Split a free chunk into a smaller free chunk of size "new_size".
 256   // Return the smaller free chunk and return the remainder to the
 257   // free lists.
 258   FreeChunk* splitChunkAndReturnRemainder(FreeChunk* chunk, size_t new_size);
 259   // Add a chunk to the free lists.
 260   void       addChunkToFreeLists(HeapWord* chunk, size_t size);
 261   // Add a chunk to the free lists, preferring to suffix it
 262   // to the last free chunk at end of space if possible, and
 263   // updating the block census stats as well as block offset table.
 264   // Take any locks as appropriate if we are multithreaded.
 265   void       addChunkToFreeListsAtEndRecordingStats(HeapWord* chunk, size_t size);
 266   // Add a free chunk to the indexed free lists.
 267   void       returnChunkToFreeList(FreeChunk* chunk);
 268   // Add a free chunk to the dictionary.
 269   void       returnChunkToDictionary(FreeChunk* chunk);
 270 
 271   // Functions for maintaining the linear allocation buffers (LinAB).
 272   // Repairing a linear allocation block refers to operations
 273   // performed on the remainder of a LinAB after an allocation
 274   // has been made from it.
 275   void       repairLinearAllocationBlocks();
 276   void       repairLinearAllocBlock(LinearAllocBlock* blk);
 277   void       refillLinearAllocBlock(LinearAllocBlock* blk);
 278   void       refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk);
 279   void       refillLinearAllocBlocksIfNeeded();
 280 
 281   void       verify_objects_initialized() const;
 282 
 283   // Statistics reporting helper functions
 284   void       reportFreeListStatistics() const;
 285   void       reportIndexedFreeListStatistics() const;
 286   size_t     maxChunkSizeInIndexedFreeLists() const;
 287   size_t     numFreeBlocksInIndexedFreeLists() const;
 288   // Accessor
 289   HeapWord* unallocated_block() const {
 290     if (BlockOffsetArrayUseUnallocatedBlock) {
 291       HeapWord* ub = _bt.unallocated_block();
 292       assert(ub >= bottom() &&
 293              ub <= end(), "space invariant");
 294       return ub;
 295     } else {
 296       return end();
 297     }
 298   }
 299   void freed(HeapWord* start, size_t size) {
 300     _bt.freed(start, size);
 301   }
 302 
 303   // Auxiliary functions for scan_and_{forward,adjust_pointers,compact} support.
 304   // See comments for CompactibleSpace for more information.
 305   inline HeapWord* scan_limit() const {
 306     return end();
 307   }
 308 
 309   inline bool scanned_block_is_obj(const HeapWord* addr) const {
 310     return CompactibleFreeListSpace::block_is_obj(addr); // Avoid virtual call
 311   }
 312 
 313   inline size_t scanned_block_size(const HeapWord* addr) const {
 314     return CompactibleFreeListSpace::block_size(addr); // Avoid virtual call
 315   }
 316 
 317   inline size_t adjust_obj_size(size_t size) const {
 318     return adjustObjectSize(size);
 319   }
 320 
 321   inline size_t obj_size(const HeapWord* addr) const {
 322     return adjustObjectSize(oop(addr)->size());
 323   }
 324 
 325  protected:
 326   // Reset the indexed free list to its initial empty condition.
 327   void resetIndexedFreeListArray();
 328   // Reset to an initial state with a single free block described
 329   // by the MemRegion parameter.
 330   void reset(MemRegion mr);
 331   // Return the total number of words in the indexed free lists.
 332   size_t     totalSizeInIndexedFreeLists() const;
 333 
 334  public:
 335   // Constructor
 336   CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr,
 337                            bool use_adaptive_freelists,
 338                            FreeBlockDictionary<FreeChunk>::DictionaryChoice);
 339   // Accessors
 340   bool bestFitFirst() { return _fitStrategy == FreeBlockBestFitFirst; }
 341   FreeBlockDictionary<FreeChunk>* dictionary() const { return _dictionary; }
 342   HeapWord* nearLargestChunk() const { return _nearLargestChunk; }
 343   void set_nearLargestChunk(HeapWord* v) { _nearLargestChunk = v; }
 344 
 345   // Set CMS global values.
 346   static void set_cms_values();
 347 
 348   // Return the free chunk at the end of the space.  If no such
 349   // chunk exists, return NULL.
 350   FreeChunk* find_chunk_at_end();
 351 
 352   bool adaptive_freelists() const { return _adaptive_freelists; }
 353 
 354   void set_collector(CMSCollector* collector) { _collector = collector; }
 355 
 356   // Support for parallelization of rescan and marking.
 357   const size_t rescan_task_size()  const { return _rescan_task_size;  }
 358   const size_t marking_task_size() const { return _marking_task_size; }
 359   SequentialSubTasksDone* conc_par_seq_tasks() {return &_conc_par_seq_tasks; }
 360   void initialize_sequential_subtasks_for_rescan(int n_threads);
 361   void initialize_sequential_subtasks_for_marking(int n_threads,
 362          HeapWord* low = NULL);
 363 
 364   virtual MemRegionClosure* preconsumptionDirtyCardClosure() const {
 365     return _preconsumptionDirtyCardClosure;
 366   }
 367 
 368   void setPreconsumptionDirtyCardClosure(MemRegionClosure* cl) {
 369     _preconsumptionDirtyCardClosure = cl;
 370   }
 371 
 372   // Space enquiries
 373   size_t used() const;
 374   size_t free() const;
 375   size_t max_alloc_in_words() const;
 376   // XXX: should have a less conservative used_region() than that of
 377   // Space; we could consider keeping track of highest allocated
 378   // address and correcting that at each sweep, as the sweeper
 379   // goes through the entire allocated part of the generation. We
 380   // could also use that information to keep the sweeper from
 381   // sweeping more than is necessary. The allocator and sweeper will
 382   // of course need to synchronize on this, since the sweeper will
 383   // try to bump down the address and the allocator will try to bump it up.
 384   // For now, however, we'll just use the default used_region()
 385   // which overestimates the region by returning the entire
 386   // committed region (this is safe, but inefficient).
 387 
 388   // Returns a subregion of the space containing all the objects in
 389   // the space.
 390   MemRegion used_region() const {
 391     return MemRegion(bottom(),
 392                      BlockOffsetArrayUseUnallocatedBlock ?
 393                      unallocated_block() : end());
 394   }
 395 
 396   virtual bool is_free_block(const HeapWord* p) const;
 397 
 398   // Resizing support
 399   void set_end(HeapWord* value);  // override
 400 
 401   // Never mangle CompactibleFreeListSpace
 402   void mangle_unused_area() {}
 403   void mangle_unused_area_complete() {}
 404 
 405   // Mutual exclusion support
 406   Mutex* freelistLock() const { return &_freelistLock; }
 407 
 408   // Iteration support
 409   void oop_iterate(ExtendedOopClosure* cl);
 410 
 411   void object_iterate(ObjectClosure* blk);
 412   // Apply the closure to each object in the space whose references
 413   // point to objects in the heap.  The usage of CompactibleFreeListSpace
 414   // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
 415   // objects in the space with references to objects that are no longer
 416   // valid.  For example, an object may reference another object
 417   // that has already been sweep up (collected).  This method uses
 418   // obj_is_alive() to determine whether it is safe to iterate of
 419   // an object.
 420   void safe_object_iterate(ObjectClosure* blk);
 421 
 422   // Iterate over all objects that intersect with mr, calling "cl->do_object"
 423   // on each.  There is an exception to this: if this closure has already
 424   // been invoked on an object, it may skip such objects in some cases.  This is
 425   // Most likely to happen in an "upwards" (ascending address) iteration of
 426   // MemRegions.
 427   void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
 428 
 429   // Requires that "mr" be entirely within the space.
 430   // Apply "cl->do_object" to all objects that intersect with "mr".
 431   // If the iteration encounters an unparseable portion of the region,
 432   // terminate the iteration and return the address of the start of the
 433   // subregion that isn't done.  Return of "NULL" indicates that the
 434   // iteration completed.
 435   HeapWord* object_iterate_careful_m(MemRegion mr,
 436                                      ObjectClosureCareful* cl);
 437 
 438   // Override: provides a DCTO_CL specific to this kind of space.
 439   DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
 440                                      CardTableModRefBS::PrecisionStyle precision,
 441                                      HeapWord* boundary,
 442                                      bool parallel);
 443 
 444   void blk_iterate(BlkClosure* cl);
 445   void blk_iterate_careful(BlkClosureCareful* cl);
 446   HeapWord* block_start_const(const void* p) const;
 447   HeapWord* block_start_careful(const void* p) const;
 448   size_t block_size(const HeapWord* p) const;
 449   size_t block_size_no_stall(HeapWord* p, const CMSCollector* c) const;
 450   bool block_is_obj(const HeapWord* p) const;
 451   bool obj_is_alive(const HeapWord* p) const;
 452   size_t block_size_nopar(const HeapWord* p) const;
 453   bool block_is_obj_nopar(const HeapWord* p) const;
 454 
 455   // Iteration support for promotion
 456   void save_marks();
 457   bool no_allocs_since_save_marks();
 458 
 459   // Iteration support for sweeping
 460   void save_sweep_limit() {
 461     _sweep_limit = BlockOffsetArrayUseUnallocatedBlock ?
 462                    unallocated_block() : end();
 463     if (CMSTraceSweeper) {
 464       gclog_or_tty->print_cr(">>>>> Saving sweep limit " PTR_FORMAT
 465                              "  for space [" PTR_FORMAT "," PTR_FORMAT ") <<<<<<",
 466                              p2i(_sweep_limit), p2i(bottom()), p2i(end()));
 467     }
 468   }
 469   NOT_PRODUCT(
 470     void clear_sweep_limit() { _sweep_limit = NULL; }
 471   )
 472   HeapWord* sweep_limit() { return _sweep_limit; }
 473 
 474   // Apply "blk->do_oop" to the addresses of all reference fields in objects
 475   // promoted into this generation since the most recent save_marks() call.
 476   // Fields in objects allocated by applications of the closure
 477   // *are* included in the iteration. Thus, when the iteration completes
 478   // there should be no further such objects remaining.
 479   #define CFLS_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix)  \
 480     void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk);
 481   ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DECL)
 482   #undef CFLS_OOP_SINCE_SAVE_MARKS_DECL
 483 
 484   // Allocation support
 485   HeapWord* allocate(size_t size);
 486   HeapWord* par_allocate(size_t size);
 487 
 488   oop       promote(oop obj, size_t obj_size);
 489   void      gc_prologue();
 490   void      gc_epilogue();
 491 
 492   // This call is used by a containing CMS generation / collector
 493   // to inform the CFLS space that a sweep has been completed
 494   // and that the space can do any related house-keeping functions.
 495   void      sweep_completed();
 496 
 497   // For an object in this space, the mark-word's two
 498   // LSB's having the value [11] indicates that it has been
 499   // promoted since the most recent call to save_marks() on
 500   // this generation and has not subsequently been iterated
 501   // over (using oop_since_save_marks_iterate() above).
 502   // This property holds only for single-threaded collections,
 503   // and is typically used for Cheney scans; for MT scavenges,
 504   // the property holds for all objects promoted during that
 505   // scavenge for the duration of the scavenge and is used
 506   // by card-scanning to avoid scanning objects (being) promoted
 507   // during that scavenge.
 508   bool obj_allocated_since_save_marks(const oop obj) const {
 509     assert(is_in_reserved(obj), "Wrong space?");
 510     return ((PromotedObject*)obj)->hasPromotedMark();
 511   }
 512 
 513   // A worst-case estimate of the space required (in HeapWords) to expand the
 514   // heap when promoting an obj of size obj_size.
 515   size_t expansionSpaceRequired(size_t obj_size) const;
 516 
 517   FreeChunk* allocateScratch(size_t size);
 518 
 519   // Returns true if either the small or large linear allocation buffer is empty.
 520   bool       linearAllocationWouldFail() const;
 521 
 522   // Adjust the chunk for the minimum size.  This version is called in
 523   // most cases in CompactibleFreeListSpace methods.
 524   inline static size_t adjustObjectSize(size_t size) {
 525     return (size_t) align_object_size(MAX2(size, (size_t)MinChunkSize));
 526   }
 527   // This is a virtual version of adjustObjectSize() that is called
 528   // only occasionally when the compaction space changes and the type
 529   // of the new compaction space is is only known to be CompactibleSpace.
 530   size_t adjust_object_size_v(size_t size) const {
 531     return adjustObjectSize(size);
 532   }
 533   // Minimum size of a free block.
 534   virtual size_t minimum_free_block_size() const { return MinChunkSize; }
 535   void      removeFreeChunkFromFreeLists(FreeChunk* chunk);
 536   void      addChunkAndRepairOffsetTable(HeapWord* chunk, size_t size,
 537               bool coalesced);
 538 
 539   // Support for decisions regarding concurrent collection policy.
 540   bool should_concurrent_collect() const;
 541 
 542   // Support for compaction.
 543   void prepare_for_compaction(CompactPoint* cp);
 544   void adjust_pointers();
 545   void compact();
 546   // Reset the space to reflect the fact that a compaction of the
 547   // space has been done.
 548   virtual void reset_after_compaction();
 549 
 550   // Debugging support.
 551   void print()                            const;
 552   void print_on(outputStream* st)         const;
 553   void prepare_for_verify();
 554   void verify()                           const;
 555   void verifyFreeLists()                  const PRODUCT_RETURN;
 556   void verifyIndexedFreeLists()           const;
 557   void verifyIndexedFreeList(size_t size) const;
 558   // Verify that the given chunk is in the free lists:
 559   // i.e. either the binary tree dictionary, the indexed free lists
 560   // or the linear allocation block.
 561   bool verify_chunk_in_free_list(FreeChunk* fc) const;
 562   // Verify that the given chunk is the linear allocation block.
 563   bool verify_chunk_is_linear_alloc_block(FreeChunk* fc) const;
 564   // Do some basic checks on the the free lists.
 565   void check_free_list_consistency()      const PRODUCT_RETURN;
 566 
 567   // Printing support
 568   void dump_at_safepoint_with_locks(CMSCollector* c, outputStream* st);
 569   void print_indexed_free_lists(outputStream* st) const;
 570   void print_dictionary_free_lists(outputStream* st) const;
 571   void print_promo_info_blocks(outputStream* st) const;
 572 
 573   NOT_PRODUCT (
 574     void initializeIndexedFreeListArrayReturnedBytes();
 575     size_t sumIndexedFreeListArrayReturnedBytes();
 576     // Return the total number of chunks in the indexed free lists.
 577     size_t totalCountInIndexedFreeLists() const;
 578     // Return the total number of chunks in the space.
 579     size_t totalCount();
 580   )
 581 
 582   // The census consists of counts of the quantities such as
 583   // the current count of the free chunks, number of chunks
 584   // created as a result of the split of a larger chunk or
 585   // coalescing of smaller chucks, etc.  The counts in the
 586   // census is used to make decisions on splitting and
 587   // coalescing of chunks during the sweep of garbage.
 588 
 589   // Print the statistics for the free lists.
 590   void printFLCensus(size_t sweep_count) const;
 591 
 592   // Statistics functions
 593   // Initialize census for lists before the sweep.
 594   void beginSweepFLCensus(float inter_sweep_current,
 595                           float inter_sweep_estimate,
 596                           float intra_sweep_estimate);
 597   // Set the surplus for each of the free lists.
 598   void setFLSurplus();
 599   // Set the hint for each of the free lists.
 600   void setFLHints();
 601   // Clear the census for each of the free lists.
 602   void clearFLCensus();
 603   // Perform functions for the census after the end of the sweep.
 604   void endSweepFLCensus(size_t sweep_count);
 605   // Return true if the count of free chunks is greater
 606   // than the desired number of free chunks.
 607   bool coalOverPopulated(size_t size);
 608 
 609 // Record (for each size):
 610 //
 611 //   split-births = #chunks added due to splits in (prev-sweep-end,
 612 //      this-sweep-start)
 613 //   split-deaths = #chunks removed for splits in (prev-sweep-end,
 614 //      this-sweep-start)
 615 //   num-curr     = #chunks at start of this sweep
 616 //   num-prev     = #chunks at end of previous sweep
 617 //
 618 // The above are quantities that are measured. Now define:
 619 //
 620 //   num-desired := num-prev + split-births - split-deaths - num-curr
 621 //
 622 // Roughly, num-prev + split-births is the supply,
 623 // split-deaths is demand due to other sizes
 624 // and num-curr is what we have left.
 625 //
 626 // Thus, num-desired is roughly speaking the "legitimate demand"
 627 // for blocks of this size and what we are striving to reach at the
 628 // end of the current sweep.
 629 //
 630 // For a given list, let num-len be its current population.
 631 // Define, for a free list of a given size:
 632 //
 633 //   coal-overpopulated := num-len >= num-desired * coal-surplus
 634 // (coal-surplus is set to 1.05, i.e. we allow a little slop when
 635 // coalescing -- we do not coalesce unless we think that the current
 636 // supply has exceeded the estimated demand by more than 5%).
 637 //
 638 // For the set of sizes in the binary tree, which is neither dense nor
 639 // closed, it may be the case that for a particular size we have never
 640 // had, or do not now have, or did not have at the previous sweep,
 641 // chunks of that size. We need to extend the definition of
 642 // coal-overpopulated to such sizes as well:
 643 //
 644 //   For a chunk in/not in the binary tree, extend coal-overpopulated
 645 //   defined above to include all sizes as follows:
 646 //
 647 //   . a size that is non-existent is coal-overpopulated
 648 //   . a size that has a num-desired <= 0 as defined above is
 649 //     coal-overpopulated.
 650 //
 651 // Also define, for a chunk heap-offset C and mountain heap-offset M:
 652 //
 653 //   close-to-mountain := C >= 0.99 * M
 654 //
 655 // Now, the coalescing strategy is:
 656 //
 657 //    Coalesce left-hand chunk with right-hand chunk if and
 658 //    only if:
 659 //
 660 //      EITHER
 661 //        . left-hand chunk is of a size that is coal-overpopulated
 662 //      OR
 663 //        . right-hand chunk is close-to-mountain
 664   void smallCoalBirth(size_t size);
 665   void smallCoalDeath(size_t size);
 666   void coalBirth(size_t size);
 667   void coalDeath(size_t size);
 668   void smallSplitBirth(size_t size);
 669   void smallSplitDeath(size_t size);
 670   void split_birth(size_t size);
 671   void splitDeath(size_t size);
 672   void split(size_t from, size_t to1);
 673 
 674   double flsFrag() const;
 675 };
 676 
 677 // A parallel-GC-thread-local allocation buffer for allocation into a
 678 // CompactibleFreeListSpace.
 679 class CFLS_LAB : public CHeapObj<mtGC> {
 680   // The space that this buffer allocates into.
 681   CompactibleFreeListSpace* _cfls;
 682 
 683   // Our local free lists.
 684   AdaptiveFreeList<FreeChunk> _indexedFreeList[CompactibleFreeListSpace::IndexSetSize];
 685 
 686   // Initialized from a command-line arg.
 687 
 688   // Allocation statistics in support of dynamic adjustment of
 689   // #blocks to claim per get_from_global_pool() call below.
 690   static AdaptiveWeightedAverage
 691                  _blocks_to_claim  [CompactibleFreeListSpace::IndexSetSize];
 692   static size_t _global_num_blocks [CompactibleFreeListSpace::IndexSetSize];
 693   static uint   _global_num_workers[CompactibleFreeListSpace::IndexSetSize];
 694   size_t        _num_blocks        [CompactibleFreeListSpace::IndexSetSize];
 695 
 696   // Internal work method
 697   void get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl);
 698 
 699 public:
 700   static const int _default_dynamic_old_plab_size = 16;
 701   static const int _default_static_old_plab_size  = 50;
 702 
 703   CFLS_LAB(CompactibleFreeListSpace* cfls);
 704 
 705   // Allocate and return a block of the given size, or else return NULL.
 706   HeapWord* alloc(size_t word_sz);
 707 
 708   // Return any unused portions of the buffer to the global pool.
 709   void retire(int tid);
 710 
 711   // Dynamic OldPLABSize sizing
 712   static void compute_desired_plab_size();
 713   // When the settings are modified from default static initialization
 714   static void modify_initialization(size_t n, unsigned wt);
 715 };
 716 
 717 size_t PromotionInfo::refillSize() const {
 718   const size_t CMSSpoolBlockSize = 256;
 719   const size_t sz = heap_word_size(sizeof(SpoolBlock) + sizeof(markOop)
 720                                    * CMSSpoolBlockSize);
 721   return CompactibleFreeListSpace::adjustObjectSize(sz);
 722 }
 723 
 724 #endif // SHARE_VM_GC_CMS_COMPACTIBLEFREELISTSPACE_HPP