166
167 // Iterate over all the ref-containing fields of all objects in the
168 // space, calling "cl.do_oop" on each. Fields in objects allocated by
169 // applications of the closure are not included in the iteration.
170 virtual void oop_iterate(ExtendedOopClosure* cl);
171
172 // Iterate over all objects in the space, calling "cl.do_object" on
173 // each. Objects allocated by applications of the closure are not
174 // included in the iteration.
175 virtual void object_iterate(ObjectClosure* blk) = 0;
176 // Similar to object_iterate() except only iterates over
177 // objects whose internal references point to objects in the space.
178 virtual void safe_object_iterate(ObjectClosure* blk) = 0;
179
180 // Create and return a new dirty card to oop closure. Can be
181 // overridden to return the appropriate type of closure
182 // depending on the type of space in which the closure will
183 // operate. ResourceArea allocated.
184 virtual DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
185 CardTableModRefBS::PrecisionStyle precision,
186 HeapWord* boundary = NULL);
187
188 // If "p" is in the space, returns the address of the start of the
189 // "block" that contains "p". We say "block" instead of "object" since
190 // some heaps may not pack objects densely; a chunk may either be an
191 // object or a non-object. If "p" is not in the space, return NULL.
192 virtual HeapWord* block_start_const(const void* p) const = 0;
193
194 // The non-const version may have benevolent side effects on the data
195 // structure supporting these calls, possibly speeding up future calls.
196 // The default implementation, however, is simply to call the const
197 // version.
198 virtual HeapWord* block_start(const void* p);
199
200 // Requires "addr" to be the start of a chunk, and returns its size.
201 // "addr + size" is required to be the start of a new chunk, or the end
202 // of the active area of the heap.
203 virtual size_t block_size(const HeapWord* addr) const = 0;
204
205 // Requires "addr" to be the start of a block, and returns "TRUE" iff
206 // the block is an object.
612 #if INCLUDE_ALL_GCS
613 // In support of parallel oop_iterate.
614 #define ContigSpace_PAR_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \
615 void par_oop_iterate(MemRegion mr, OopClosureType* blk);
616
617 ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DECL)
618 #undef ContigSpace_PAR_OOP_ITERATE_DECL
619 #endif // INCLUDE_ALL_GCS
620
621 // Compaction support
622 virtual void reset_after_compaction() {
623 assert(compaction_top() >= bottom() && compaction_top() <= end(), "should point inside space");
624 set_top(compaction_top());
625 // set new iteration safe limit
626 set_concurrent_iteration_safe_limit(compaction_top());
627 }
628
629 // Override.
630 DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
631 CardTableModRefBS::PrecisionStyle precision,
632 HeapWord* boundary = NULL);
633
634 // Apply "blk->do_oop" to the addresses of all reference fields in objects
635 // starting with the _saved_mark_word, which was noted during a generation's
636 // save_marks and is required to denote the head of an object.
637 // Fields in objects allocated by applications of the closure
638 // *are* included in the iteration.
639 // Updates _saved_mark_word to point to just after the last object
640 // iterated over.
641 #define ContigSpace_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
642 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk);
643
644 ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DECL)
645 #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DECL
646
647 // Same as object_iterate, but starting from "mark", which is required
648 // to denote the start of an object. Objects allocated by
649 // applications of the closure *are* included in the iteration.
650 virtual void object_iterate_from(WaterMark mark, ObjectClosure* blk);
651
652 // Very inefficient implementation.
|
166
167 // Iterate over all the ref-containing fields of all objects in the
168 // space, calling "cl.do_oop" on each. Fields in objects allocated by
169 // applications of the closure are not included in the iteration.
170 virtual void oop_iterate(ExtendedOopClosure* cl);
171
172 // Iterate over all objects in the space, calling "cl.do_object" on
173 // each. Objects allocated by applications of the closure are not
174 // included in the iteration.
175 virtual void object_iterate(ObjectClosure* blk) = 0;
176 // Similar to object_iterate() except only iterates over
177 // objects whose internal references point to objects in the space.
178 virtual void safe_object_iterate(ObjectClosure* blk) = 0;
179
180 // Create and return a new dirty card to oop closure. Can be
181 // overridden to return the appropriate type of closure
182 // depending on the type of space in which the closure will
183 // operate. ResourceArea allocated.
184 virtual DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
185 CardTableModRefBS::PrecisionStyle precision,
186 HeapWord* boundary,
187 bool parallel);
188
189 // If "p" is in the space, returns the address of the start of the
190 // "block" that contains "p". We say "block" instead of "object" since
191 // some heaps may not pack objects densely; a chunk may either be an
192 // object or a non-object. If "p" is not in the space, return NULL.
193 virtual HeapWord* block_start_const(const void* p) const = 0;
194
195 // The non-const version may have benevolent side effects on the data
196 // structure supporting these calls, possibly speeding up future calls.
197 // The default implementation, however, is simply to call the const
198 // version.
199 virtual HeapWord* block_start(const void* p);
200
201 // Requires "addr" to be the start of a chunk, and returns its size.
202 // "addr + size" is required to be the start of a new chunk, or the end
203 // of the active area of the heap.
204 virtual size_t block_size(const HeapWord* addr) const = 0;
205
206 // Requires "addr" to be the start of a block, and returns "TRUE" iff
207 // the block is an object.
613 #if INCLUDE_ALL_GCS
614 // In support of parallel oop_iterate.
615 #define ContigSpace_PAR_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \
616 void par_oop_iterate(MemRegion mr, OopClosureType* blk);
617
618 ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DECL)
619 #undef ContigSpace_PAR_OOP_ITERATE_DECL
620 #endif // INCLUDE_ALL_GCS
621
622 // Compaction support
623 virtual void reset_after_compaction() {
624 assert(compaction_top() >= bottom() && compaction_top() <= end(), "should point inside space");
625 set_top(compaction_top());
626 // set new iteration safe limit
627 set_concurrent_iteration_safe_limit(compaction_top());
628 }
629
630 // Override.
631 DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
632 CardTableModRefBS::PrecisionStyle precision,
633 HeapWord* boundary,
634 bool parallel);
635
636 // Apply "blk->do_oop" to the addresses of all reference fields in objects
637 // starting with the _saved_mark_word, which was noted during a generation's
638 // save_marks and is required to denote the head of an object.
639 // Fields in objects allocated by applications of the closure
640 // *are* included in the iteration.
641 // Updates _saved_mark_word to point to just after the last object
642 // iterated over.
643 #define ContigSpace_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
644 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk);
645
646 ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DECL)
647 #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DECL
648
649 // Same as object_iterate, but starting from "mark", which is required
650 // to denote the start of an object. Objects allocated by
651 // applications of the closure *are* included in the iteration.
652 virtual void object_iterate_from(WaterMark mark, ObjectClosure* blk);
653
654 // Very inefficient implementation.
|