97 // committed) for generation.
98 // Used by card marking code. Must not overlap with address ranges of
99 // other generations.
100 MemRegion _reserved;
101
102 // Memory area reserved for generation
103 VirtualSpace _virtual_space;
104
105 // Level in the generation hierarchy.
106 int _level;
107
108 // ("Weak") Reference processing support
109 ReferenceProcessor* _ref_processor;
110
111 // Performance Counters
112 CollectorCounters* _gc_counters;
113
114 // Statistics for garbage collection
115 GCStats* _gc_stats;
116
117 // Returns the next generation in the configuration, or else NULL if this
118 // is the highest generation.
119 Generation* next_gen() const;
120
121 // Initialize the generation.
122 Generation(ReservedSpace rs, size_t initial_byte_size, int level);
123
124 // Apply "cl->do_oop" to (the address of) (exactly) all the ref fields in
125 // "sp" that point into younger generations.
126 // The iteration is only over objects allocated at the start of the
127 // iterations; objects allocated as a result of applying the closure are
128 // not included.
129 void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl);
130
131 public:
132 // The set of possible generation kinds.
133 enum Name {
134 DefNew,
135 ParNew,
136 MarkSweepCompact,
137 ConcurrentMarkSweep,
138 Other
139 };
140
141 enum SomePublicConstants {
142 // Generations are GenGrain-aligned and have size that are multiples of
462 // Some generations may require some cleanup actions before allowing
463 // a verification.
464 virtual void prepare_for_verify() {};
465
466 // Accessing "marks".
467
468 // This function gives a generation a chance to note a point between
469 // collections. For example, a contiguous generation might note the
470 // beginning allocation point post-collection, which might allow some later
471 // operations to be optimized.
472 virtual void save_marks() {}
473
474 // This function allows generations to initialize any "saved marks". That
475 // is, should only be called when the generation is empty.
476 virtual void reset_saved_marks() {}
477
478 // This function is "true" iff any no allocations have occurred in the
479 // generation since the last call to "save_marks".
480 virtual bool no_allocs_since_save_marks() = 0;
481
482 // Apply "cl->apply" to (the addresses of) all reference fields in objects
483 // allocated in the current generation since the last call to "save_marks".
484 // If more objects are allocated in this generation as a result of applying
485 // the closure, iterates over reference fields in those objects as well.
486 // Calls "save_marks" at the end of the iteration.
487 // General signature...
488 virtual void oop_since_save_marks_iterate_v(OopsInGenClosure* cl) = 0;
489 // ...and specializations for de-virtualization. (The general
490 // implementation of the _nv versions call the virtual version.
491 // Note that the _nv suffix is not really semantically necessary,
492 // but it avoids some not-so-useful warnings on Solaris.)
493 #define Generation_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
494 virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
495 oop_since_save_marks_iterate_v((OopsInGenClosure*)cl); \
496 }
497 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(Generation_SINCE_SAVE_MARKS_DECL)
498
499 #undef Generation_SINCE_SAVE_MARKS_DECL
500
501 // The "requestor" generation is performing some garbage collection
502 // action for which it would be useful to have scratch space. If
503 // the target is not the requestor, no gc actions will be required
504 // of the target. The requestor promises to allocate no more than
505 // "max_alloc_words" in the target generation (via promotion say,
506 // if the requestor is a young generation and the target is older).
507 // If the target generation can provide any scratch space, it adds
508 // it to "list", leaving "list" pointing to the head of the
509 // augmented list. The default is to offer no space.
510 virtual void contribute_scratch(ScratchBlock*& list, Generation* requestor,
511 size_t max_alloc_words) {}
512
513 // Give each generation an opportunity to do clean up for any
514 // contributed scratch.
515 virtual void reset_scratch() {};
516
517 // When an older generation has been collected, and perhaps resized,
518 // this method will be invoked on all younger generations (from older to
519 // younger), allowing them to resize themselves as appropriate.
622 class CardGeneration: public Generation {
623 friend class VMStructs;
624 protected:
625 // This is shared with other generations.
626 GenRemSet* _rs;
627 // This is local to this generation.
628 BlockOffsetSharedArray* _bts;
629
630 // current shrinking effect: this damps shrinking when the heap gets empty.
631 size_t _shrink_factor;
632
633 size_t _min_heap_delta_bytes; // Minimum amount to expand.
634
635 // Some statistics from before gc started.
636 // These are gathered in the gc_prologue (and should_collect)
637 // to control growing/shrinking policy in spite of promotions.
638 size_t _capacity_at_prologue;
639 size_t _used_at_prologue;
640
641 CardGeneration(ReservedSpace rs, size_t initial_byte_size, int level,
642 GenRemSet* remset);
643
644 public:
645
646 // Attempt to expand the generation by "bytes". Expand by at a
647 // minimum "expand_bytes". Return true if some amount (not
648 // necessarily the full "bytes") was done.
649 virtual bool expand(size_t bytes, size_t expand_bytes);
650
651 // Shrink generation with specified size (returns false if unable to shrink)
652 virtual void shrink(size_t bytes) = 0;
653
654 virtual void compute_new_size();
655
656 virtual void clear_remembered_set();
657
658 virtual void invalidate_remembered_set();
659
660 virtual void prepare_for_verify();
661
662 // Grow generation with specified size (returns false if unable to grow)
681 // and after last GC.
682
683 // Grow generation with specified size (returns false if unable to grow)
684 virtual bool grow_by(size_t bytes);
685 // Grow generation to reserved size.
686 virtual bool grow_to_reserved();
687 // Shrink generation with specified size (returns false if unable to shrink)
688 void shrink_by(size_t bytes);
689
690 // Allocation failure
691 virtual bool expand(size_t bytes, size_t expand_bytes);
692 void shrink(size_t bytes);
693
694 // Accessing spaces
695 ContiguousSpace* the_space() const { return _the_space; }
696
697 public:
698 OneContigSpaceCardGeneration(ReservedSpace rs, size_t initial_byte_size,
699 int level, GenRemSet* remset,
700 ContiguousSpace* space) :
701 CardGeneration(rs, initial_byte_size, level, remset),
702 _the_space(space)
703 {}
704
705 inline bool is_in(const void* p) const;
706
707 // Space enquiries
708 size_t capacity() const;
709 size_t used() const;
710 size_t free() const;
711
712 MemRegion used_region() const;
713
714 size_t unsafe_max_alloc_nogc() const;
715 size_t contiguous_available() const;
716
717 // Iteration
718 void object_iterate(ObjectClosure* blk);
719 void space_iterate(SpaceClosure* blk, bool usedOnly = false);
720
721 void younger_refs_iterate(OopsInGenClosure* blk);
722
723 inline CompactibleSpace* first_compaction_space() const;
724
725 virtual inline HeapWord* allocate(size_t word_size, bool is_tlab);
726 virtual inline HeapWord* par_allocate(size_t word_size, bool is_tlab);
727
728 // Accessing marks
729 inline WaterMark top_mark();
730 inline WaterMark bottom_mark();
731
732 #define OneContig_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
733 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
734 OneContig_SINCE_SAVE_MARKS_DECL(OopsInGenClosure,_v)
735 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(OneContig_SINCE_SAVE_MARKS_DECL)
736
737 void save_marks();
738 void reset_saved_marks();
739 bool no_allocs_since_save_marks();
740
741 inline size_t block_size(const HeapWord* addr) const;
742
743 inline bool block_is_obj(const HeapWord* addr) const;
744
745 virtual void collect(bool full,
746 bool clear_all_soft_refs,
747 size_t size,
748 bool is_tlab);
749 HeapWord* expand_and_allocate(size_t size,
750 bool is_tlab,
751 bool parallel = false);
752
753 virtual void prepare_for_verify();
754
755 virtual void gc_epilogue(bool full);
|
97 // committed) for generation.
98 // Used by card marking code. Must not overlap with address ranges of
99 // other generations.
100 MemRegion _reserved;
101
102 // Memory area reserved for generation
103 VirtualSpace _virtual_space;
104
105 // Level in the generation hierarchy.
106 int _level;
107
108 // ("Weak") Reference processing support
109 ReferenceProcessor* _ref_processor;
110
111 // Performance Counters
112 CollectorCounters* _gc_counters;
113
114 // Statistics for garbage collection
115 GCStats* _gc_stats;
116
117 enum {
118 _dispatch_index_generation_cms,
119 _dispatch_index_generation_def_new,
120 _dispatch_index_generation_one_contig
121 };
122 const jbyte _dispatch_index;
123
124 // Returns the next generation in the configuration, or else NULL if this
125 // is the highest generation.
126 Generation* next_gen() const;
127
128 // Initialize the generation.
129 Generation(ReservedSpace rs, size_t initial_byte_size, int level, jbyte dispatch_index);
130
131 // Apply "cl->do_oop" to (the address of) (exactly) all the ref fields in
132 // "sp" that point into younger generations.
133 // The iteration is only over objects allocated at the start of the
134 // iterations; objects allocated as a result of applying the closure are
135 // not included.
136 void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl);
137
138 public:
139 // The set of possible generation kinds.
140 enum Name {
141 DefNew,
142 ParNew,
143 MarkSweepCompact,
144 ConcurrentMarkSweep,
145 Other
146 };
147
148 enum SomePublicConstants {
149 // Generations are GenGrain-aligned and have size that are multiples of
469 // Some generations may require some cleanup actions before allowing
470 // a verification.
471 virtual void prepare_for_verify() {};
472
473 // Accessing "marks".
474
475 // This function gives a generation a chance to note a point between
476 // collections. For example, a contiguous generation might note the
477 // beginning allocation point post-collection, which might allow some later
478 // operations to be optimized.
479 virtual void save_marks() {}
480
481 // This function allows generations to initialize any "saved marks". That
482 // is, should only be called when the generation is empty.
483 virtual void reset_saved_marks() {}
484
485 // This function is "true" iff any no allocations have occurred in the
486 // generation since the last call to "save_marks".
487 virtual bool no_allocs_since_save_marks() = 0;
488
489 // ...and specializations for de-virtualization. (The general
490 // implementation of the _nv versions call the virtual version.
491 // Note that the _nv suffix is not really semantically necessary,
492 // but it avoids some not-so-useful warnings on Solaris.)
493 template <typename OopClosureType>
494 void oop_since_save_marks_iterate(OopClosureType* cl);
495
496 template <bool nv, typename OopClosureType>
497 void oop_since_save_marks_iterate_disp(OopClosureType* cl);
498
499 // The "requestor" generation is performing some garbage collection
500 // action for which it would be useful to have scratch space. If
501 // the target is not the requestor, no gc actions will be required
502 // of the target. The requestor promises to allocate no more than
503 // "max_alloc_words" in the target generation (via promotion say,
504 // if the requestor is a young generation and the target is older).
505 // If the target generation can provide any scratch space, it adds
506 // it to "list", leaving "list" pointing to the head of the
507 // augmented list. The default is to offer no space.
508 virtual void contribute_scratch(ScratchBlock*& list, Generation* requestor,
509 size_t max_alloc_words) {}
510
511 // Give each generation an opportunity to do clean up for any
512 // contributed scratch.
513 virtual void reset_scratch() {};
514
515 // When an older generation has been collected, and perhaps resized,
516 // this method will be invoked on all younger generations (from older to
517 // younger), allowing them to resize themselves as appropriate.
620 class CardGeneration: public Generation {
621 friend class VMStructs;
622 protected:
623 // This is shared with other generations.
624 GenRemSet* _rs;
625 // This is local to this generation.
626 BlockOffsetSharedArray* _bts;
627
628 // current shrinking effect: this damps shrinking when the heap gets empty.
629 size_t _shrink_factor;
630
631 size_t _min_heap_delta_bytes; // Minimum amount to expand.
632
633 // Some statistics from before gc started.
634 // These are gathered in the gc_prologue (and should_collect)
635 // to control growing/shrinking policy in spite of promotions.
636 size_t _capacity_at_prologue;
637 size_t _used_at_prologue;
638
639 CardGeneration(ReservedSpace rs, size_t initial_byte_size, int level,
640 GenRemSet* remset, jbyte dispatch_index);
641
642 public:
643
644 // Attempt to expand the generation by "bytes". Expand by at a
645 // minimum "expand_bytes". Return true if some amount (not
646 // necessarily the full "bytes") was done.
647 virtual bool expand(size_t bytes, size_t expand_bytes);
648
649 // Shrink generation with specified size (returns false if unable to shrink)
650 virtual void shrink(size_t bytes) = 0;
651
652 virtual void compute_new_size();
653
654 virtual void clear_remembered_set();
655
656 virtual void invalidate_remembered_set();
657
658 virtual void prepare_for_verify();
659
660 // Grow generation with specified size (returns false if unable to grow)
679 // and after last GC.
680
681 // Grow generation with specified size (returns false if unable to grow)
682 virtual bool grow_by(size_t bytes);
683 // Grow generation to reserved size.
684 virtual bool grow_to_reserved();
685 // Shrink generation with specified size (returns false if unable to shrink)
686 void shrink_by(size_t bytes);
687
688 // Allocation failure
689 virtual bool expand(size_t bytes, size_t expand_bytes);
690 void shrink(size_t bytes);
691
692 // Accessing spaces
693 ContiguousSpace* the_space() const { return _the_space; }
694
695 public:
696 OneContigSpaceCardGeneration(ReservedSpace rs, size_t initial_byte_size,
697 int level, GenRemSet* remset,
698 ContiguousSpace* space) :
699 CardGeneration(rs, initial_byte_size, level, remset, _dispatch_index_generation_one_contig),
700 _the_space(space)
701 {}
702
703 inline bool is_in(const void* p) const;
704
705 // Space enquiries
706 size_t capacity() const;
707 size_t used() const;
708 size_t free() const;
709
710 MemRegion used_region() const;
711
712 size_t unsafe_max_alloc_nogc() const;
713 size_t contiguous_available() const;
714
715 // Iteration
716 void object_iterate(ObjectClosure* blk);
717 void space_iterate(SpaceClosure* blk, bool usedOnly = false);
718
719 void younger_refs_iterate(OopsInGenClosure* blk);
720
721 inline CompactibleSpace* first_compaction_space() const;
722
723 virtual inline HeapWord* allocate(size_t word_size, bool is_tlab);
724 virtual inline HeapWord* par_allocate(size_t word_size, bool is_tlab);
725
726 // Accessing marks
727 inline WaterMark top_mark();
728 inline WaterMark bottom_mark();
729
730 template <bool nv, typename OopClosureType>
731 void oop_since_save_marks_iterate(OopClosureType* cl);
732
733 void save_marks();
734 void reset_saved_marks();
735 bool no_allocs_since_save_marks();
736
737 inline size_t block_size(const HeapWord* addr) const;
738
739 inline bool block_is_obj(const HeapWord* addr) const;
740
741 virtual void collect(bool full,
742 bool clear_all_soft_refs,
743 size_t size,
744 bool is_tlab);
745 HeapWord* expand_and_allocate(size_t size,
746 bool is_tlab,
747 bool parallel = false);
748
749 virtual void prepare_for_verify();
750
751 virtual void gc_epilogue(bool full);
|