67 class ContiguousSpace;
68 class CompactPoint;
69 class OopsInGenClosure;
70 class OopClosure;
71 class ScanClosure;
72 class FastScanClosure;
73 class GenCollectedHeap;
74 class GenRemSet;
75 class GCStats;
76
77 // A "ScratchBlock" represents a block of memory in one generation usable by
78 // another. It represents "num_words" free words, starting at and including
79 // the address of "this".
80 struct ScratchBlock {
81 ScratchBlock* next;
82 size_t num_words;
83 HeapWord scratch_space[1]; // Actually, of size "num_words-2" (assuming
84 // first two fields are word-sized.)
85 };
86
87
88 class Generation: public CHeapObj<mtGC> {
89 friend class VMStructs;
90 private:
91 jlong _time_of_last_gc; // time when last gc on this generation happened (ms)
92 MemRegion _prev_used_region; // for collectors that want to "remember" a value for
93 // used region at some specific point during collection.
94
95 protected:
96 // Minimum and maximum addresses for memory reserved (not necessarily
97 // committed) for generation.
98 // Used by card marking code. Must not overlap with address ranges of
99 // other generations.
100 MemRegion _reserved;
101
102 // Memory area reserved for generation
103 VirtualSpace _virtual_space;
104
105 // Level in the generation hierarchy.
106 int _level;
107
108 // ("Weak") Reference processing support
109 ReferenceProcessor* _ref_processor;
110
111 // Performance Counters
112 CollectorCounters* _gc_counters;
113
114 // Statistics for garbage collection
115 GCStats* _gc_stats;
116
117 // Returns the next generation in the configuration, or else NULL if this
118 // is the highest generation.
119 Generation* next_gen() const;
120
121 // Initialize the generation.
122 Generation(ReservedSpace rs, size_t initial_byte_size, int level);
123
124 // Apply "cl->do_oop" to (the address of) (exactly) all the ref fields in
125 // "sp" that point into younger generations.
126 // The iteration is only over objects allocated at the start of the
127 // iterations; objects allocated as a result of applying the closure are
128 // not included.
129 void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl);
130
131 public:
132 // The set of possible generation kinds.
133 enum Name {
134 DefNew,
135 ParNew,
136 MarkSweepCompact,
137 ConcurrentMarkSweep,
138 Other
139 };
140
141 enum SomePublicConstants {
142 // Generations are GenGrain-aligned and have size that are multiples of
143 // GenGrain.
144 // Note: on ARM we add 1 bit for card_table_base to be properly aligned
145 // (we expect its low byte to be zero - see implementation of post_barrier)
146 LogOfGenGrain = 16 ARM_ONLY(+1),
147 GenGrain = 1 << LogOfGenGrain
148 };
149
150 // allocate and initialize ("weak") refs processing support
151 virtual void ref_processor_init();
152 void set_ref_processor(ReferenceProcessor* rp) {
153 assert(_ref_processor == NULL, "clobbering existing _ref_processor");
154 _ref_processor = rp;
155 }
156
157 virtual Generation::Name kind() { return Generation::Other; }
158 GenerationSpec* spec();
159
160 // This properly belongs in the collector, but for now this
421 NOT_PRODUCT(
422 if (now < _time_of_last_gc) {
423 warning("time warp: "INT64_FORMAT" to "INT64_FORMAT, (int64_t)_time_of_last_gc, (int64_t)now);
424 }
425 )
426 return _time_of_last_gc;
427 }
428
429 virtual void update_time_of_last_gc(jlong now) {
430 _time_of_last_gc = now;
431 }
432
433 // Generations may keep statistics about collection. This
434 // method updates those statistics. current_level is
435 // the level of the collection that has most recently
436 // occurred. This allows the generation to decide what
437 // statistics are valid to collect. For example, the
438 // generation can decide to gather the amount of promoted data
439 // if the collection of the younger generations has completed.
440 GCStats* gc_stats() const { return _gc_stats; }
441 virtual void update_gc_stats(int current_level, bool full) {}
442
443 // Mark sweep support phase2
444 virtual void prepare_for_compaction(CompactPoint* cp);
445 // Mark sweep support phase3
446 virtual void adjust_pointers();
447 // Mark sweep support phase4
448 virtual void compact();
449 virtual void post_compact() {ShouldNotReachHere();}
450
451 // Support for CMS's rescan. In this general form we return a pointer
452 // to an abstract object that can be used, based on specific previously
453 // decided protocols, to exchange information between generations,
454 // information that may be useful for speeding up certain types of
455 // garbage collectors. A NULL value indicates to the client that
456 // no data recording is expected by the provider. The data-recorder is
457 // expected to be GC worker thread-local, with the worker index
458 // indicated by "thr_num".
459 virtual void* get_data_recorder(int thr_num) { return NULL; }
460 virtual void sample_eden_chunk() {}
461
506 // if the requestor is a young generation and the target is older).
507 // If the target generation can provide any scratch space, it adds
508 // it to "list", leaving "list" pointing to the head of the
509 // augmented list. The default is to offer no space.
510 virtual void contribute_scratch(ScratchBlock*& list, Generation* requestor,
511 size_t max_alloc_words) {}
512
513 // Give each generation an opportunity to do clean up for any
514 // contributed scratch.
515 virtual void reset_scratch() {};
516
517 // When an older generation has been collected, and perhaps resized,
518 // this method will be invoked on all younger generations (from older to
519 // younger), allowing them to resize themselves as appropriate.
520 virtual void compute_new_size() = 0;
521
522 // Printing
523 virtual const char* name() const = 0;
524 virtual const char* short_name() const = 0;
525
526 int level() const { return _level; }
527
528 // Attributes
529
530 // True iff the given generation may only be the youngest generation.
531 virtual bool must_be_youngest() const = 0;
532 // True iff the given generation may only be the oldest generation.
533 virtual bool must_be_oldest() const = 0;
534
535 // Reference Processing accessor
536 ReferenceProcessor* const ref_processor() { return _ref_processor; }
537
538 // Iteration.
539
540 // Iterate over all the ref-containing fields of all objects in the
541 // generation, calling "cl.do_oop" on each.
542 virtual void oop_iterate(ExtendedOopClosure* cl);
543
544 // Iterate over all objects in the generation, calling "cl.do_object" on
545 // each.
546 virtual void object_iterate(ObjectClosure* cl);
547
621
622 class CardGeneration: public Generation {
623 friend class VMStructs;
624 protected:
625 // This is shared with other generations.
626 GenRemSet* _rs;
627 // This is local to this generation.
628 BlockOffsetSharedArray* _bts;
629
630 // current shrinking effect: this damps shrinking when the heap gets empty.
631 size_t _shrink_factor;
632
633 size_t _min_heap_delta_bytes; // Minimum amount to expand.
634
635 // Some statistics from before gc started.
636 // These are gathered in the gc_prologue (and should_collect)
637 // to control growing/shrinking policy in spite of promotions.
638 size_t _capacity_at_prologue;
639 size_t _used_at_prologue;
640
641 CardGeneration(ReservedSpace rs, size_t initial_byte_size, int level,
642 GenRemSet* remset);
643
644 public:
645
646 // Attempt to expand the generation by "bytes". Expand by at a
647 // minimum "expand_bytes". Return true if some amount (not
648 // necessarily the full "bytes") was done.
649 virtual bool expand(size_t bytes, size_t expand_bytes);
650
651 // Shrink generation with specified size (returns false if unable to shrink)
652 virtual void shrink(size_t bytes) = 0;
653
654 virtual void compute_new_size();
655
656 virtual void clear_remembered_set();
657
658 virtual void invalidate_remembered_set();
659
660 virtual void prepare_for_verify();
661
662 // Grow generation with specified size (returns false if unable to grow)
679 ContiguousSpace* _the_space; // actual space holding objects
680 WaterMark _last_gc; // watermark between objects allocated before
681 // and after last GC.
682
683 // Grow generation with specified size (returns false if unable to grow)
684 virtual bool grow_by(size_t bytes);
685 // Grow generation to reserved size.
686 virtual bool grow_to_reserved();
687 // Shrink generation with specified size (returns false if unable to shrink)
688 void shrink_by(size_t bytes);
689
690 // Allocation failure
691 virtual bool expand(size_t bytes, size_t expand_bytes);
692 void shrink(size_t bytes);
693
694 // Accessing spaces
695 ContiguousSpace* the_space() const { return _the_space; }
696
697 public:
698 OneContigSpaceCardGeneration(ReservedSpace rs, size_t initial_byte_size,
699 int level, GenRemSet* remset,
700 ContiguousSpace* space) :
701 CardGeneration(rs, initial_byte_size, level, remset),
702 _the_space(space)
703 {}
704
705 inline bool is_in(const void* p) const;
706
707 // Space enquiries
708 size_t capacity() const;
709 size_t used() const;
710 size_t free() const;
711
712 MemRegion used_region() const;
713
714 size_t unsafe_max_alloc_nogc() const;
715 size_t contiguous_available() const;
716
717 // Iteration
718 void object_iterate(ObjectClosure* blk);
719 void space_iterate(SpaceClosure* blk, bool usedOnly = false);
720
721 void younger_refs_iterate(OopsInGenClosure* blk);
|
67 class ContiguousSpace;
68 class CompactPoint;
69 class OopsInGenClosure;
70 class OopClosure;
71 class ScanClosure;
72 class FastScanClosure;
73 class GenCollectedHeap;
74 class GenRemSet;
75 class GCStats;
76
77 // A "ScratchBlock" represents a block of memory in one generation usable by
78 // another. It represents "num_words" free words, starting at and including
79 // the address of "this".
80 struct ScratchBlock {
81 ScratchBlock* next;
82 size_t num_words;
83 HeapWord scratch_space[1]; // Actually, of size "num_words-2" (assuming
84 // first two fields are word-sized.)
85 };
86
87 class Generation: public CHeapObj<mtGC> {
88 friend class VMStructs;
89 private:
90 jlong _time_of_last_gc; // time when last gc on this generation happened (ms)
91 MemRegion _prev_used_region; // for collectors that want to "remember" a value for
92 // used region at some specific point during collection.
93
94 protected:
95 // Minimum and maximum addresses for memory reserved (not necessarily
96 // committed) for generation.
97 // Used by card marking code. Must not overlap with address ranges of
98 // other generations.
99 MemRegion _reserved;
100
101 // Memory area reserved for generation
102 VirtualSpace _virtual_space;
103
104 // ("Weak") Reference processing support
105 ReferenceProcessor* _ref_processor;
106
107 // Performance Counters
108 CollectorCounters* _gc_counters;
109
110 // Statistics for garbage collection
111 GCStats* _gc_stats;
112
113 // Initialize the generation.
114 Generation(ReservedSpace rs, size_t initial_byte_size);
115
116 // Apply "cl->do_oop" to (the address of) (exactly) all the ref fields in
117 // "sp" that point into younger generations.
118 // The iteration is only over objects allocated at the start of the
119 // iterations; objects allocated as a result of applying the closure are
120 // not included.
121 void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl);
122
123 public:
124 // The set of possible generation kinds.
125 enum Name {
126 DefNew,
127 ParNew,
128 MarkSweepCompact,
129 ConcurrentMarkSweep,
130 Other
131 };
132
133 enum Type {
134 Young,
135 Old
136 };
137
138 enum SomePublicConstants {
139 // Generations are GenGrain-aligned and have size that are multiples of
140 // GenGrain.
141 // Note: on ARM we add 1 bit for card_table_base to be properly aligned
142 // (we expect its low byte to be zero - see implementation of post_barrier)
143 LogOfGenGrain = 16 ARM_ONLY(+1),
144 GenGrain = 1 << LogOfGenGrain
145 };
146
147 // allocate and initialize ("weak") refs processing support
148 virtual void ref_processor_init();
149 void set_ref_processor(ReferenceProcessor* rp) {
150 assert(_ref_processor == NULL, "clobbering existing _ref_processor");
151 _ref_processor = rp;
152 }
153
154 virtual Generation::Name kind() { return Generation::Other; }
155 GenerationSpec* spec();
156
157 // This properly belongs in the collector, but for now this
418 NOT_PRODUCT(
419 if (now < _time_of_last_gc) {
420 warning("time warp: "INT64_FORMAT" to "INT64_FORMAT, (int64_t)_time_of_last_gc, (int64_t)now);
421 }
422 )
423 return _time_of_last_gc;
424 }
425
426 virtual void update_time_of_last_gc(jlong now) {
427 _time_of_last_gc = now;
428 }
429
430 // Generations may keep statistics about collection. This
431 // method updates those statistics. current_level is
432 // the level of the collection that has most recently
433 // occurred. This allows the generation to decide what
434 // statistics are valid to collect. For example, the
435 // generation can decide to gather the amount of promoted data
436 // if the collection of the younger generations has completed.
437 GCStats* gc_stats() const { return _gc_stats; }
438 virtual void update_gc_stats(Generation* current_generation, bool full) {}
439
440 // Mark sweep support phase2
441 virtual void prepare_for_compaction(CompactPoint* cp);
442 // Mark sweep support phase3
443 virtual void adjust_pointers();
444 // Mark sweep support phase4
445 virtual void compact();
446 virtual void post_compact() {ShouldNotReachHere();}
447
448 // Support for CMS's rescan. In this general form we return a pointer
449 // to an abstract object that can be used, based on specific previously
450 // decided protocols, to exchange information between generations,
451 // information that may be useful for speeding up certain types of
452 // garbage collectors. A NULL value indicates to the client that
453 // no data recording is expected by the provider. The data-recorder is
454 // expected to be GC worker thread-local, with the worker index
455 // indicated by "thr_num".
456 virtual void* get_data_recorder(int thr_num) { return NULL; }
457 virtual void sample_eden_chunk() {}
458
503 // if the requestor is a young generation and the target is older).
504 // If the target generation can provide any scratch space, it adds
505 // it to "list", leaving "list" pointing to the head of the
506 // augmented list. The default is to offer no space.
507 virtual void contribute_scratch(ScratchBlock*& list, Generation* requestor,
508 size_t max_alloc_words) {}
509
510 // Give each generation an opportunity to do clean up for any
511 // contributed scratch.
512 virtual void reset_scratch() {};
513
514 // When an older generation has been collected, and perhaps resized,
515 // this method will be invoked on all younger generations (from older to
516 // younger), allowing them to resize themselves as appropriate.
517 virtual void compute_new_size() = 0;
518
519 // Printing
520 virtual const char* name() const = 0;
521 virtual const char* short_name() const = 0;
522
523 // Attributes
524
525 // True iff the given generation may only be the youngest generation.
526 virtual bool must_be_youngest() const = 0;
527 // True iff the given generation may only be the oldest generation.
528 virtual bool must_be_oldest() const = 0;
529
530 // Reference Processing accessor
531 ReferenceProcessor* const ref_processor() { return _ref_processor; }
532
533 // Iteration.
534
535 // Iterate over all the ref-containing fields of all objects in the
536 // generation, calling "cl.do_oop" on each.
537 virtual void oop_iterate(ExtendedOopClosure* cl);
538
539 // Iterate over all objects in the generation, calling "cl.do_object" on
540 // each.
541 virtual void object_iterate(ObjectClosure* cl);
542
616
617 class CardGeneration: public Generation {
618 friend class VMStructs;
619 protected:
620 // This is shared with other generations.
621 GenRemSet* _rs;
622 // This is local to this generation.
623 BlockOffsetSharedArray* _bts;
624
625 // current shrinking effect: this damps shrinking when the heap gets empty.
626 size_t _shrink_factor;
627
628 size_t _min_heap_delta_bytes; // Minimum amount to expand.
629
630 // Some statistics from before gc started.
631 // These are gathered in the gc_prologue (and should_collect)
632 // to control growing/shrinking policy in spite of promotions.
633 size_t _capacity_at_prologue;
634 size_t _used_at_prologue;
635
636 CardGeneration(ReservedSpace rs, size_t initial_byte_size, GenRemSet* remset);
637
638 public:
639
640 // Attempt to expand the generation by "bytes". Expand by at a
641 // minimum "expand_bytes". Return true if some amount (not
642 // necessarily the full "bytes") was done.
643 virtual bool expand(size_t bytes, size_t expand_bytes);
644
645 // Shrink generation with specified size (returns false if unable to shrink)
646 virtual void shrink(size_t bytes) = 0;
647
648 virtual void compute_new_size();
649
650 virtual void clear_remembered_set();
651
652 virtual void invalidate_remembered_set();
653
654 virtual void prepare_for_verify();
655
656 // Grow generation with specified size (returns false if unable to grow)
673 ContiguousSpace* _the_space; // actual space holding objects
674 WaterMark _last_gc; // watermark between objects allocated before
675 // and after last GC.
676
677 // Grow generation with specified size (returns false if unable to grow)
678 virtual bool grow_by(size_t bytes);
679 // Grow generation to reserved size.
680 virtual bool grow_to_reserved();
681 // Shrink generation with specified size (returns false if unable to shrink)
682 void shrink_by(size_t bytes);
683
684 // Allocation failure
685 virtual bool expand(size_t bytes, size_t expand_bytes);
686 void shrink(size_t bytes);
687
688 // Accessing spaces
689 ContiguousSpace* the_space() const { return _the_space; }
690
691 public:
692 OneContigSpaceCardGeneration(ReservedSpace rs, size_t initial_byte_size,
693 GenRemSet* remset, ContiguousSpace* space) :
694 CardGeneration(rs, initial_byte_size, remset),
695 _the_space(space)
696 {}
697
698 inline bool is_in(const void* p) const;
699
700 // Space enquiries
701 size_t capacity() const;
702 size_t used() const;
703 size_t free() const;
704
705 MemRegion used_region() const;
706
707 size_t unsafe_max_alloc_nogc() const;
708 size_t contiguous_available() const;
709
710 // Iteration
711 void object_iterate(ObjectClosure* blk);
712 void space_iterate(SpaceClosure* blk, bool usedOnly = false);
713
714 void younger_refs_iterate(OopsInGenClosure* blk);
|