81 };
82
83
84 class Generation: public CHeapObj<mtGC> {
85 friend class VMStructs;
86 private:
87 jlong _time_of_last_gc; // time when last gc on this generation happened (ms)
88 MemRegion _prev_used_region; // for collectors that want to "remember" a value for
89 // used region at some specific point during collection.
90
91 protected:
92 // Minimum and maximum addresses for memory reserved (not necessarily
93 // committed) for generation.
94 // Used by card marking code. Must not overlap with address ranges of
95 // other generations.
96 MemRegion _reserved;
97
98 // Memory area reserved for generation
99 VirtualSpace _virtual_space;
100
101 // Level in the generation hierarchy.
102 int _level;
103
104 // ("Weak") Reference processing support
105 ReferenceProcessor* _ref_processor;
106
107 // Performance Counters
108 CollectorCounters* _gc_counters;
109
110 // Statistics for garbage collection
111 GCStats* _gc_stats;
112
113 // Returns the next generation in the configuration, or else NULL if this
114 // is the highest generation.
115 Generation* next_gen() const;
116
117 // Initialize the generation.
118 Generation(ReservedSpace rs, size_t initial_byte_size, int level);
119
120 // Apply "cl->do_oop" to (the address of) (exactly) all the ref fields in
121 // "sp" that point into younger generations.
122 // The iteration is only over objects allocated at the start of the
123 // iterations; objects allocated as a result of applying the closure are
124 // not included.
125 void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl);
126
127 public:
128 // The set of possible generation kinds.
129 enum Name {
130 DefNew,
131 ParNew,
132 MarkSweepCompact,
133 ConcurrentMarkSweep,
134 Other
135 };
136
137 enum SomePublicConstants {
138 // Generations are GenGrain-aligned and have size that are multiples of
139 // GenGrain.
140 // Note: on ARM we add 1 bit for card_table_base to be properly aligned
141 // (we expect its low byte to be zero - see implementation of post_barrier)
142 LogOfGenGrain = 16 ARM32_ONLY(+1),
143 GenGrain = 1 << LogOfGenGrain
144 };
145
146 // allocate and initialize ("weak") refs processing support
147 virtual void ref_processor_init();
148 void set_ref_processor(ReferenceProcessor* rp) {
149 assert(_ref_processor == NULL, "clobbering existing _ref_processor");
150 _ref_processor = rp;
151 }
152
153 virtual Generation::Name kind() { return Generation::Other; }
154 GenerationSpec* spec();
155
156 // This properly belongs in the collector, but for now this
392
393 // Time (in ms) when we were last collected or now if a collection is
394 // in progress.
395 virtual jlong time_of_last_gc(jlong now) {
396 // Both _time_of_last_gc and now are set using a time source
397 // that guarantees monotonically non-decreasing values provided
398 // the underlying platform provides such a source. So we still
399 // have to guard against non-monotonicity.
400 NOT_PRODUCT(
401 if (now < _time_of_last_gc) {
402 warning("time warp: " JLONG_FORMAT " to " JLONG_FORMAT, _time_of_last_gc, now);
403 }
404 )
405 return _time_of_last_gc;
406 }
407
408 virtual void update_time_of_last_gc(jlong now) {
409 _time_of_last_gc = now;
410 }
411
412 // Generations may keep statistics about collection. This
413 // method updates those statistics. current_level is
414 // the level of the collection that has most recently
415 // occurred. This allows the generation to decide what
416 // statistics are valid to collect. For example, the
417 // generation can decide to gather the amount of promoted data
418 // if the collection of the younger generations has completed.
419 GCStats* gc_stats() const { return _gc_stats; }
420 virtual void update_gc_stats(int current_level, bool full) {}
421
422 // Mark sweep support phase2
423 virtual void prepare_for_compaction(CompactPoint* cp);
424 // Mark sweep support phase3
425 virtual void adjust_pointers();
426 // Mark sweep support phase4
427 virtual void compact();
428 virtual void post_compact() {ShouldNotReachHere();}
429
430 // Support for CMS's rescan. In this general form we return a pointer
431 // to an abstract object that can be used, based on specific previously
432 // decided protocols, to exchange information between generations,
433 // information that may be useful for speeding up certain types of
434 // garbage collectors. A NULL value indicates to the client that
435 // no data recording is expected by the provider. The data-recorder is
436 // expected to be GC worker thread-local, with the worker index
437 // indicated by "thr_num".
438 virtual void* get_data_recorder(int thr_num) { return NULL; }
439 virtual void sample_eden_chunk() {}
440
485 // if the requestor is a young generation and the target is older).
486 // If the target generation can provide any scratch space, it adds
487 // it to "list", leaving "list" pointing to the head of the
488 // augmented list. The default is to offer no space.
489 virtual void contribute_scratch(ScratchBlock*& list, Generation* requestor,
490 size_t max_alloc_words) {}
491
492 // Give each generation an opportunity to do clean up for any
493 // contributed scratch.
494 virtual void reset_scratch() {};
495
496 // When an older generation has been collected, and perhaps resized,
497 // this method will be invoked on all younger generations (from older to
498 // younger), allowing them to resize themselves as appropriate.
499 virtual void compute_new_size() = 0;
500
501 // Printing
502 virtual const char* name() const = 0;
503 virtual const char* short_name() const = 0;
504
505 int level() const { return _level; }
506
507 // Reference Processing accessor
508 ReferenceProcessor* const ref_processor() { return _ref_processor; }
509
510 // Iteration.
511
512 // Iterate over all the ref-containing fields of all objects in the
513 // generation, calling "cl.do_oop" on each.
514 virtual void oop_iterate(ExtendedOopClosure* cl);
515
516 // Iterate over all objects in the generation, calling "cl.do_object" on
517 // each.
518 virtual void object_iterate(ObjectClosure* cl);
519
520 // Iterate over all safe objects in the generation, calling "cl.do_object" on
521 // each. An object is safe if its references point to other objects in
522 // the heap. This defaults to object_iterate() unless overridden.
523 virtual void safe_object_iterate(ObjectClosure* cl);
524
525 // Apply "cl->do_oop" to (the address of) all and only all the ref fields
526 // in the current generation that contain pointers to objects in younger
|
81 };
82
83
84 class Generation: public CHeapObj<mtGC> {
85 friend class VMStructs;
86 private:
87 jlong _time_of_last_gc; // time when last gc on this generation happened (ms)
88 MemRegion _prev_used_region; // for collectors that want to "remember" a value for
89 // used region at some specific point during collection.
90
91 protected:
92 // Minimum and maximum addresses for memory reserved (not necessarily
93 // committed) for generation.
94 // Used by card marking code. Must not overlap with address ranges of
95 // other generations.
96 MemRegion _reserved;
97
98 // Memory area reserved for generation
99 VirtualSpace _virtual_space;
100
101 // ("Weak") Reference processing support
102 ReferenceProcessor* _ref_processor;
103
104 // Performance Counters
105 CollectorCounters* _gc_counters;
106
107 // Statistics for garbage collection
108 GCStats* _gc_stats;
109
110 // Initialize the generation.
111 Generation(ReservedSpace rs, size_t initial_byte_size);
112
113 // Apply "cl->do_oop" to (the address of) (exactly) all the ref fields in
114 // "sp" that point into younger generations.
115 // The iteration is only over objects allocated at the start of the
116 // iterations; objects allocated as a result of applying the closure are
117 // not included.
118 void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl);
119
120 public:
121 // The set of possible generation kinds.
122 enum Name {
123 DefNew,
124 ParNew,
125 MarkSweepCompact,
126 ConcurrentMarkSweep,
127 Other
128 };
129
130 enum Type {
131 Young,
132 Old
133 };
134
135 enum SomePublicConstants {
136 // Generations are GenGrain-aligned and have size that are multiples of
137 // GenGrain.
138 // Note: on ARM we add 1 bit for card_table_base to be properly aligned
139 // (we expect its low byte to be zero - see implementation of post_barrier)
140 LogOfGenGrain = 16 ARM32_ONLY(+1),
141 GenGrain = 1 << LogOfGenGrain
142 };
143
144 // allocate and initialize ("weak") refs processing support
145 virtual void ref_processor_init();
146 void set_ref_processor(ReferenceProcessor* rp) {
147 assert(_ref_processor == NULL, "clobbering existing _ref_processor");
148 _ref_processor = rp;
149 }
150
151 virtual Generation::Name kind() { return Generation::Other; }
152 GenerationSpec* spec();
153
154 // This properly belongs in the collector, but for now this
390
391 // Time (in ms) when we were last collected or now if a collection is
392 // in progress.
393 virtual jlong time_of_last_gc(jlong now) {
394 // Both _time_of_last_gc and now are set using a time source
395 // that guarantees monotonically non-decreasing values provided
396 // the underlying platform provides such a source. So we still
397 // have to guard against non-monotonicity.
398 NOT_PRODUCT(
399 if (now < _time_of_last_gc) {
400 warning("time warp: " JLONG_FORMAT " to " JLONG_FORMAT, _time_of_last_gc, now);
401 }
402 )
403 return _time_of_last_gc;
404 }
405
406 virtual void update_time_of_last_gc(jlong now) {
407 _time_of_last_gc = now;
408 }
409
410 // Generations may keep statistics about collection. This method
411 // updates those statistics. current_generation is the generation
412 // that was most recently collected. This allows the generation to
413 // decide what statistics are valid to collect. For example, the
414 // generation can decide to gather the amount of promoted data if
415 // the collection of the younger generations has completed.
416 GCStats* gc_stats() const { return _gc_stats; }
417 virtual void update_gc_stats(Generation* current_generation, bool full) {}
418
419 // Mark sweep support phase2
420 virtual void prepare_for_compaction(CompactPoint* cp);
421 // Mark sweep support phase3
422 virtual void adjust_pointers();
423 // Mark sweep support phase4
424 virtual void compact();
425 virtual void post_compact() {ShouldNotReachHere();}
426
427 // Support for CMS's rescan. In this general form we return a pointer
428 // to an abstract object that can be used, based on specific previously
429 // decided protocols, to exchange information between generations,
430 // information that may be useful for speeding up certain types of
431 // garbage collectors. A NULL value indicates to the client that
432 // no data recording is expected by the provider. The data-recorder is
433 // expected to be GC worker thread-local, with the worker index
434 // indicated by "thr_num".
435 virtual void* get_data_recorder(int thr_num) { return NULL; }
436 virtual void sample_eden_chunk() {}
437
482 // if the requestor is a young generation and the target is older).
483 // If the target generation can provide any scratch space, it adds
484 // it to "list", leaving "list" pointing to the head of the
485 // augmented list. The default is to offer no space.
486 virtual void contribute_scratch(ScratchBlock*& list, Generation* requestor,
487 size_t max_alloc_words) {}
488
489 // Give each generation an opportunity to do clean up for any
490 // contributed scratch.
491 virtual void reset_scratch() {};
492
493 // When an older generation has been collected, and perhaps resized,
494 // this method will be invoked on all younger generations (from older to
495 // younger), allowing them to resize themselves as appropriate.
496 virtual void compute_new_size() = 0;
497
498 // Printing
499 virtual const char* name() const = 0;
500 virtual const char* short_name() const = 0;
501
502 // Reference Processing accessor
503 ReferenceProcessor* const ref_processor() { return _ref_processor; }
504
505 // Iteration.
506
507 // Iterate over all the ref-containing fields of all objects in the
508 // generation, calling "cl.do_oop" on each.
509 virtual void oop_iterate(ExtendedOopClosure* cl);
510
511 // Iterate over all objects in the generation, calling "cl.do_object" on
512 // each.
513 virtual void object_iterate(ObjectClosure* cl);
514
515 // Iterate over all safe objects in the generation, calling "cl.do_object" on
516 // each. An object is safe if its references point to other objects in
517 // the heap. This defaults to object_iterate() unless overridden.
518 virtual void safe_object_iterate(ObjectClosure* cl);
519
520 // Apply "cl->do_oop" to (the address of) all and only all the ref fields
521 // in the current generation that contain pointers to objects in younger
|