< prev index next >

src/share/vm/gc/shared/generation.hpp

Print this page




 105   ReferenceProcessor* _ref_processor;
 106 
 107   // Performance Counters
 108   CollectorCounters* _gc_counters;
 109 
 110   // Statistics for garbage collection
 111   GCStats* _gc_stats;
 112 
 113   // Returns the next generation in the configuration, or else NULL if this
 114   // is the highest generation.
 115   Generation* next_gen() const;
 116 
 117   // Initialize the generation.
 118   Generation(ReservedSpace rs, size_t initial_byte_size, int level);
 119 
 120   // Apply "cl->do_oop" to (the address of) (exactly) all the ref fields in
 121   // "sp" that point into younger generations.
 122   // The iteration is only over objects allocated at the start of the
 123   // iterations; objects allocated as a result of applying the closure are
 124   // not included.
 125   void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl);
 126 
 127  public:
 128   // The set of possible generation kinds.
 129   enum Name {
 130     DefNew,
 131     ParNew,
 132     MarkSweepCompact,
 133     ConcurrentMarkSweep,
 134     Other
 135   };
 136 
 137   enum SomePublicConstants {
 138     // Generations are GenGrain-aligned and have size that are multiples of
 139     // GenGrain.
 140     // Note: on ARM we add 1 bit for card_table_base to be properly aligned
 141     // (we expect its low byte to be zero - see implementation of post_barrier)
 142     LogOfGenGrain = 16 ARM32_ONLY(+1),
 143     GenGrain = 1 << LogOfGenGrain
 144   };
 145 


 509 
 510   // Iteration.
 511 
 512   // Iterate over all the ref-containing fields of all objects in the
 513   // generation, calling "cl.do_oop" on each.
 514   virtual void oop_iterate(ExtendedOopClosure* cl);
 515 
 516   // Iterate over all objects in the generation, calling "cl.do_object" on
 517   // each.
 518   virtual void object_iterate(ObjectClosure* cl);
 519 
 520   // Iterate over all safe objects in the generation, calling "cl.do_object" on
 521   // each.  An object is safe if its references point to other objects in
 522   // the heap.  This defaults to object_iterate() unless overridden.
 523   virtual void safe_object_iterate(ObjectClosure* cl);
 524 
 525   // Apply "cl->do_oop" to (the address of) all and only all the ref fields
 526   // in the current generation that contain pointers to objects in younger
 527   // generations. Objects allocated since the last "save_marks" call are
 528   // excluded.
 529   virtual void younger_refs_iterate(OopsInGenClosure* cl) = 0;
 530 
 531   // Inform a generation that it longer contains references to objects
 532   // in any younger generation.    [e.g. Because younger gens are empty,
 533   // clear the card table.]
 534   virtual void clear_remembered_set() { }
 535 
 536   // Inform a generation that some of its objects have moved.  [e.g. The
 537   // generation's spaces were compacted, invalidating the card table.]
 538   virtual void invalidate_remembered_set() { }
 539 
 540   // Block abstraction.
 541 
 542   // Returns the address of the start of the "block" that contains the
 543   // address "addr".  We say "blocks" instead of "object" since some heaps
 544   // may not pack objects densely; a chunk may either be an object or a
 545   // non-object.
 546   virtual HeapWord* block_start(const void* addr) const;
 547 
 548   // Requires "addr" to be the start of a chunk, and returns its size.
 549   // "addr + size" is required to be the start of a new chunk, or the end




 105   ReferenceProcessor* _ref_processor;
 106 
 107   // Performance Counters
 108   CollectorCounters* _gc_counters;
 109 
 110   // Statistics for garbage collection
 111   GCStats* _gc_stats;
 112 
 113   // Returns the next generation in the configuration, or else NULL if this
 114   // is the highest generation.
 115   Generation* next_gen() const;
 116 
 117   // Initialize the generation.
 118   Generation(ReservedSpace rs, size_t initial_byte_size, int level);
 119 
 120   // Apply "cl->do_oop" to (the address of) (exactly) all the ref fields in
 121   // "sp" that point into younger generations.
 122   // The iteration is only over objects allocated at the start of the
 123   // iterations; objects allocated as a result of applying the closure are
 124   // not included.
 125   void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl, uint n_threads);
 126 
 127  public:
 128   // The set of possible generation kinds.
 129   enum Name {
 130     DefNew,
 131     ParNew,
 132     MarkSweepCompact,
 133     ConcurrentMarkSweep,
 134     Other
 135   };
 136 
 137   enum SomePublicConstants {
 138     // Generations are GenGrain-aligned and have size that are multiples of
 139     // GenGrain.
 140     // Note: on ARM we add 1 bit for card_table_base to be properly aligned
 141     // (we expect its low byte to be zero - see implementation of post_barrier)
 142     LogOfGenGrain = 16 ARM32_ONLY(+1),
 143     GenGrain = 1 << LogOfGenGrain
 144   };
 145 


 509 
 510   // Iteration.
 511 
 512   // Iterate over all the ref-containing fields of all objects in the
 513   // generation, calling "cl.do_oop" on each.
 514   virtual void oop_iterate(ExtendedOopClosure* cl);
 515 
 516   // Iterate over all objects in the generation, calling "cl.do_object" on
 517   // each.
 518   virtual void object_iterate(ObjectClosure* cl);
 519 
 520   // Iterate over all safe objects in the generation, calling "cl.do_object" on
 521   // each.  An object is safe if its references point to other objects in
 522   // the heap.  This defaults to object_iterate() unless overridden.
 523   virtual void safe_object_iterate(ObjectClosure* cl);
 524 
 525   // Apply "cl->do_oop" to (the address of) all and only all the ref fields
 526   // in the current generation that contain pointers to objects in younger
 527   // generations. Objects allocated since the last "save_marks" call are
 528   // excluded.
 529   virtual void younger_refs_iterate(OopsInGenClosure* cl, uint n_threads) = 0;
 530 
 531   // Inform a generation that it longer contains references to objects
 532   // in any younger generation.    [e.g. Because younger gens are empty,
 533   // clear the card table.]
 534   virtual void clear_remembered_set() { }
 535 
 536   // Inform a generation that some of its objects have moved.  [e.g. The
 537   // generation's spaces were compacted, invalidating the card table.]
 538   virtual void invalidate_remembered_set() { }
 539 
 540   // Block abstraction.
 541 
 542   // Returns the address of the start of the "block" that contains the
 543   // address "addr".  We say "blocks" instead of "object" since some heaps
 544   // may not pack objects densely; a chunk may either be an object or a
 545   // non-object.
 546   virtual HeapWord* block_start(const void* addr) const;
 547 
 548   // Requires "addr" to be the start of a chunk, and returns its size.
 549   // "addr + size" is required to be the start of a new chunk, or the end


< prev index next >