38 // It will contain one ore more spaces holding the actual objects.
39 //
40 // The Generation class hierarchy:
41 //
42 // Generation - abstract base class
43 // - DefNewGeneration - allocation area (copy collected)
44 // - CardGeneration - abstract class adding offset array behavior
45 // - TenuredGeneration - tenured (old object) space (markSweepCompact)
46 //
47 // The system configuration currently allowed is:
48 //
49 // DefNewGeneration + TenuredGeneration
50 //
51
52 class DefNewGeneration;
53 class GCMemoryManager;
54 class GenerationSpec;
55 class CompactibleSpace;
56 class ContiguousSpace;
57 class CompactPoint;
58 class OopsInGenClosure;
59 class OopClosure;
60 class FastScanClosure;
61 class GenCollectedHeap;
62 class GCStats;
63
64 // A "ScratchBlock" represents a block of memory in one generation usable by
65 // another. It represents "num_words" free words, starting at and including
66 // the address of "this".
67 struct ScratchBlock {
68 ScratchBlock* next;
69 size_t num_words;
70 HeapWord scratch_space[1]; // Actually, of size "num_words-2" (assuming
71 // first two fields are word-sized.)
72 };
73
74 class Generation: public CHeapObj<mtGC> {
75 friend class VMStructs;
76 private:
77 MemRegion _prev_used_region; // for collectors that want to "remember" a value for
78 // used region at some specific point during collection.
85 // Used by card marking code. Must not overlap with address ranges of
86 // other generations.
87 MemRegion _reserved;
88
89 // Memory area reserved for generation
90 VirtualSpace _virtual_space;
91
92 // ("Weak") Reference processing support
93 SpanSubjectToDiscoveryClosure _span_based_discoverer;
94 ReferenceProcessor* _ref_processor;
95
96 // Performance Counters
97 CollectorCounters* _gc_counters;
98
99 // Statistics for garbage collection
100 GCStats* _gc_stats;
101
102 // Initialize the generation.
103 Generation(ReservedSpace rs, size_t initial_byte_size);
104
105 // Apply "cl->do_oop" to (the address of) (exactly) all the ref fields in
106 // "sp" that point into younger generations.
107 // The iteration is only over objects allocated at the start of the
108 // iterations; objects allocated as a result of applying the closure are
109 // not included.
110 void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl, uint n_threads);
111
112 public:
113 // The set of possible generation kinds.
114 enum Name {
115 DefNew,
116 MarkSweepCompact,
117 Other
118 };
119
120 enum SomePublicConstants {
121 // Generations are GenGrain-aligned and have size that are multiples of
122 // GenGrain.
123 // Note: on ARM we add 1 bit for card_table_base to be properly aligned
124 // (we expect its low byte to be zero - see implementation of post_barrier)
125 LogOfGenGrain = 16 ARM32_ONLY(+1),
126 GenGrain = 1 << LogOfGenGrain
127 };
128
129 // allocate and initialize ("weak") refs processing support
130 virtual void ref_processor_init();
131 void set_ref_processor(ReferenceProcessor* rp) {
431 // this method will be invoked on all younger generations (from older to
432 // younger), allowing them to resize themselves as appropriate.
433 virtual void compute_new_size() = 0;
434
435 // Printing
436 virtual const char* name() const = 0;
437 virtual const char* short_name() const = 0;
438
439 // Reference Processing accessor
440 ReferenceProcessor* const ref_processor() { return _ref_processor; }
441
442 // Iteration.
443
444 // Iterate over all the ref-containing fields of all objects in the
445 // generation, calling "cl.do_oop" on each.
446 virtual void oop_iterate(OopIterateClosure* cl);
447
448 // Iterate over all objects in the generation, calling "cl.do_object" on
449 // each.
450 virtual void object_iterate(ObjectClosure* cl);
451
452 // Apply "cl->do_oop" to (the address of) all and only all the ref fields
453 // in the current generation that contain pointers to objects in younger
454 // generations. Objects allocated since the last "save_marks" call are
455 // excluded.
456 virtual void younger_refs_iterate(OopsInGenClosure* cl, uint n_threads) = 0;
457
458 // Inform a generation that it longer contains references to objects
459 // in any younger generation. [e.g. Because younger gens are empty,
460 // clear the card table.]
461 virtual void clear_remembered_set() { }
462
463 // Inform a generation that some of its objects have moved. [e.g. The
464 // generation's spaces were compacted, invalidating the card table.]
465 virtual void invalidate_remembered_set() { }
466
467 // Block abstraction.
468
469 // Returns the address of the start of the "block" that contains the
470 // address "addr". We say "blocks" instead of "object" since some heaps
471 // may not pack objects densely; a chunk may either be an object or a
472 // non-object.
473 virtual HeapWord* block_start(const void* addr) const;
474
475 // Requires "addr" to be the start of a chunk, and returns its size.
476 // "addr + size" is required to be the start of a new chunk, or the end
|
38 // It will contain one ore more spaces holding the actual objects.
39 //
40 // The Generation class hierarchy:
41 //
42 // Generation - abstract base class
43 // - DefNewGeneration - allocation area (copy collected)
44 // - CardGeneration - abstract class adding offset array behavior
45 // - TenuredGeneration - tenured (old object) space (markSweepCompact)
46 //
47 // The system configuration currently allowed is:
48 //
49 // DefNewGeneration + TenuredGeneration
50 //
51
52 class DefNewGeneration;
53 class GCMemoryManager;
54 class GenerationSpec;
55 class CompactibleSpace;
56 class ContiguousSpace;
57 class CompactPoint;
58 class OopClosure;
59 class FastScanClosure;
60 class GenCollectedHeap;
61 class GCStats;
62
63 // A "ScratchBlock" represents a block of memory in one generation usable by
64 // another. It represents "num_words" free words, starting at and including
65 // the address of "this".
66 struct ScratchBlock {
67 ScratchBlock* next;
68 size_t num_words;
69 HeapWord scratch_space[1]; // Actually, of size "num_words-2" (assuming
70 // first two fields are word-sized.)
71 };
72
73 class Generation: public CHeapObj<mtGC> {
74 friend class VMStructs;
75 private:
76 MemRegion _prev_used_region; // for collectors that want to "remember" a value for
77 // used region at some specific point during collection.
84 // Used by card marking code. Must not overlap with address ranges of
85 // other generations.
86 MemRegion _reserved;
87
88 // Memory area reserved for generation
89 VirtualSpace _virtual_space;
90
91 // ("Weak") Reference processing support
92 SpanSubjectToDiscoveryClosure _span_based_discoverer;
93 ReferenceProcessor* _ref_processor;
94
95 // Performance Counters
96 CollectorCounters* _gc_counters;
97
98 // Statistics for garbage collection
99 GCStats* _gc_stats;
100
101 // Initialize the generation.
102 Generation(ReservedSpace rs, size_t initial_byte_size);
103
104 public:
105 // The set of possible generation kinds.
106 enum Name {
107 DefNew,
108 MarkSweepCompact,
109 Other
110 };
111
112 enum SomePublicConstants {
113 // Generations are GenGrain-aligned and have size that are multiples of
114 // GenGrain.
115 // Note: on ARM we add 1 bit for card_table_base to be properly aligned
116 // (we expect its low byte to be zero - see implementation of post_barrier)
117 LogOfGenGrain = 16 ARM32_ONLY(+1),
118 GenGrain = 1 << LogOfGenGrain
119 };
120
121 // allocate and initialize ("weak") refs processing support
122 virtual void ref_processor_init();
123 void set_ref_processor(ReferenceProcessor* rp) {
423 // this method will be invoked on all younger generations (from older to
424 // younger), allowing them to resize themselves as appropriate.
425 virtual void compute_new_size() = 0;
426
427 // Printing
428 virtual const char* name() const = 0;
429 virtual const char* short_name() const = 0;
430
431 // Reference Processing accessor
432 ReferenceProcessor* const ref_processor() { return _ref_processor; }
433
434 // Iteration.
435
436 // Iterate over all the ref-containing fields of all objects in the
437 // generation, calling "cl.do_oop" on each.
438 virtual void oop_iterate(OopIterateClosure* cl);
439
440 // Iterate over all objects in the generation, calling "cl.do_object" on
441 // each.
442 virtual void object_iterate(ObjectClosure* cl);
443
444 // Inform a generation that it longer contains references to objects
445 // in any younger generation. [e.g. Because younger gens are empty,
446 // clear the card table.]
447 virtual void clear_remembered_set() { }
448
449 // Inform a generation that some of its objects have moved. [e.g. The
450 // generation's spaces were compacted, invalidating the card table.]
451 virtual void invalidate_remembered_set() { }
452
453 // Block abstraction.
454
455 // Returns the address of the start of the "block" that contains the
456 // address "addr". We say "blocks" instead of "object" since some heaps
457 // may not pack objects densely; a chunk may either be an object or a
458 // non-object.
459 virtual HeapWord* block_start(const void* addr) const;
460
461 // Requires "addr" to be the start of a chunk, and returns its size.
462 // "addr + size" is required to be the start of a new chunk, or the end
|