58 _refillSize = refill_size;
59 _allocation_size_limit = allocation_size_limit;
60 }
61 HeapWord* _ptr;
62 size_t _word_size;
63 size_t _refillSize;
64 size_t _allocation_size_limit; // Largest size that will be allocated
65
66 void print_on(outputStream* st) const;
67 };
68
69 // Concrete subclass of CompactibleSpace that implements
70 // a free list space, such as used in the concurrent mark sweep
71 // generation.
72
73 class CompactibleFreeListSpace: public CompactibleSpace {
74 friend class VMStructs;
75 friend class ConcurrentMarkSweepGeneration;
76 friend class CMSCollector;
77 // Local alloc buffer for promotion into this space.
78 friend class CFLS_LAB;
79 // Allow scan_and_* functions to call (private) overrides of the auxiliary functions on this class
80 template <typename SpaceType>
81 friend void CompactibleSpace::scan_and_adjust_pointers(SpaceType* space);
82 template <typename SpaceType>
83 friend void CompactibleSpace::scan_and_compact(SpaceType* space);
84 template <typename SpaceType>
85 friend void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp);
86
87 // "Size" of chunks of work (executed during parallel remark phases
88 // of CMS collection); this probably belongs in CMSCollector, although
89 // it's cached here because it's used in
90 // initialize_sequential_subtasks_for_rescan() which modifies
91 // par_seq_tasks which also lives in Space. XXX
92 const size_t _rescan_task_size;
93 const size_t _marking_task_size;
94
95 // Yet another sequential tasks done structure. This supports
96 // CMS GC, where we have threads dynamically
97 // claiming sub-tasks from a larger parallel task.
98 SequentialSubTasksDone _conc_par_seq_tasks;
645 //
646 // EITHER
647 // . left-hand chunk is of a size that is coal-overpopulated
648 // OR
649 // . right-hand chunk is close-to-mountain
650 void smallCoalBirth(size_t size);
651 void smallCoalDeath(size_t size);
652 void coalBirth(size_t size);
653 void coalDeath(size_t size);
654 void smallSplitBirth(size_t size);
655 void smallSplitDeath(size_t size);
656 void split_birth(size_t size);
657 void splitDeath(size_t size);
658 void split(size_t from, size_t to1);
659
660 double flsFrag() const;
661 };
662
663 // A parallel-GC-thread-local allocation buffer for allocation into a
664 // CompactibleFreeListSpace.
665 class CFLS_LAB : public CHeapObj<mtGC> {
666 // The space that this buffer allocates into.
667 CompactibleFreeListSpace* _cfls;
668
669 // Our local free lists.
670 AdaptiveFreeList<FreeChunk> _indexedFreeList[CompactibleFreeListSpace::IndexSetSize];
671
672 // Initialized from a command-line arg.
673
674 // Allocation statistics in support of dynamic adjustment of
675 // #blocks to claim per get_from_global_pool() call below.
676 static AdaptiveWeightedAverage
677 _blocks_to_claim [CompactibleFreeListSpace::IndexSetSize];
678 static size_t _global_num_blocks [CompactibleFreeListSpace::IndexSetSize];
679 static uint _global_num_workers[CompactibleFreeListSpace::IndexSetSize];
680 size_t _num_blocks [CompactibleFreeListSpace::IndexSetSize];
681
682 // Internal work method
683 void get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl);
684
685 public:
686 static const int _default_dynamic_old_plab_size = 16;
687 static const int _default_static_old_plab_size = 50;
688
689 CFLS_LAB(CompactibleFreeListSpace* cfls);
690
691 // Allocate and return a block of the given size, or else return NULL.
692 HeapWord* alloc(size_t word_sz);
693
694 // Return any unused portions of the buffer to the global pool.
695 void retire(int tid);
696
697 // Dynamic OldPLABSize sizing
698 static void compute_desired_plab_size();
699 // When the settings are modified from default static initialization
700 static void modify_initialization(size_t n, unsigned wt);
701 };
702
703 size_t PromotionInfo::refillSize() const {
704 const size_t CMSSpoolBlockSize = 256;
705 const size_t sz = heap_word_size(sizeof(SpoolBlock) + sizeof(markOop)
706 * CMSSpoolBlockSize);
707 return CompactibleFreeListSpace::adjustObjectSize(sz);
708 }
709
|
58 _refillSize = refill_size;
59 _allocation_size_limit = allocation_size_limit;
60 }
61 HeapWord* _ptr;
62 size_t _word_size;
63 size_t _refillSize;
64 size_t _allocation_size_limit; // Largest size that will be allocated
65
66 void print_on(outputStream* st) const;
67 };
68
69 // Concrete subclass of CompactibleSpace that implements
70 // a free list space, such as used in the concurrent mark sweep
71 // generation.
72
73 class CompactibleFreeListSpace: public CompactibleSpace {
74 friend class VMStructs;
75 friend class ConcurrentMarkSweepGeneration;
76 friend class CMSCollector;
77 // Local alloc buffer for promotion into this space.
78 friend class CompactibleFreeListSpaceLAB;
79 // Allow scan_and_* functions to call (private) overrides of the auxiliary functions on this class
80 template <typename SpaceType>
81 friend void CompactibleSpace::scan_and_adjust_pointers(SpaceType* space);
82 template <typename SpaceType>
83 friend void CompactibleSpace::scan_and_compact(SpaceType* space);
84 template <typename SpaceType>
85 friend void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp);
86
87 // "Size" of chunks of work (executed during parallel remark phases
88 // of CMS collection); this probably belongs in CMSCollector, although
89 // it's cached here because it's used in
90 // initialize_sequential_subtasks_for_rescan() which modifies
91 // par_seq_tasks which also lives in Space. XXX
92 const size_t _rescan_task_size;
93 const size_t _marking_task_size;
94
95 // Yet another sequential tasks done structure. This supports
96 // CMS GC, where we have threads dynamically
97 // claiming sub-tasks from a larger parallel task.
98 SequentialSubTasksDone _conc_par_seq_tasks;
645 //
646 // EITHER
647 // . left-hand chunk is of a size that is coal-overpopulated
648 // OR
649 // . right-hand chunk is close-to-mountain
650 void smallCoalBirth(size_t size);
651 void smallCoalDeath(size_t size);
652 void coalBirth(size_t size);
653 void coalDeath(size_t size);
654 void smallSplitBirth(size_t size);
655 void smallSplitDeath(size_t size);
656 void split_birth(size_t size);
657 void splitDeath(size_t size);
658 void split(size_t from, size_t to1);
659
660 double flsFrag() const;
661 };
662
663 // A parallel-GC-thread-local allocation buffer for allocation into a
664 // CompactibleFreeListSpace.
665 class CompactibleFreeListSpaceLAB : public CHeapObj<mtGC> {
666 // The space that this buffer allocates into.
667 CompactibleFreeListSpace* _cfls;
668
669 // Our local free lists.
670 AdaptiveFreeList<FreeChunk> _indexedFreeList[CompactibleFreeListSpace::IndexSetSize];
671
672 // Initialized from a command-line arg.
673
674 // Allocation statistics in support of dynamic adjustment of
675 // #blocks to claim per get_from_global_pool() call below.
676 static AdaptiveWeightedAverage
677 _blocks_to_claim [CompactibleFreeListSpace::IndexSetSize];
678 static size_t _global_num_blocks [CompactibleFreeListSpace::IndexSetSize];
679 static uint _global_num_workers[CompactibleFreeListSpace::IndexSetSize];
680 size_t _num_blocks [CompactibleFreeListSpace::IndexSetSize];
681
682 // Internal work method
683 void get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl);
684
685 public:
686 static const int _default_dynamic_old_plab_size = 16;
687 static const int _default_static_old_plab_size = 50;
688
689 CompactibleFreeListSpaceLAB(CompactibleFreeListSpace* cfls);
690
691 // Allocate and return a block of the given size, or else return NULL.
692 HeapWord* alloc(size_t word_sz);
693
694 // Return any unused portions of the buffer to the global pool.
695 void retire(int tid);
696
697 // Dynamic OldPLABSize sizing
698 static void compute_desired_plab_size();
699 // When the settings are modified from default static initialization
700 static void modify_initialization(size_t n, unsigned wt);
701 };
702
703 size_t PromotionInfo::refillSize() const {
704 const size_t CMSSpoolBlockSize = 256;
705 const size_t sz = heap_word_size(sizeof(SpoolBlock) + sizeof(markOop)
706 * CMSSpoolBlockSize);
707 return CompactibleFreeListSpace::adjustObjectSize(sz);
708 }
709
|