9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_SHARED_SPACE_HPP
26 #define SHARE_VM_GC_SHARED_SPACE_HPP
27
28 #include "gc/shared/blockOffsetTable.hpp"
29 #include "gc/shared/cardTableModRefBS.hpp"
30 #include "gc/shared/workgroup.hpp"
31 #include "memory/allocation.hpp"
32 #include "memory/iterator.hpp"
33 #include "memory/memRegion.hpp"
34 #include "oops/markOop.hpp"
35 #include "runtime/mutexLocker.hpp"
36 #include "utilities/macros.hpp"
37
38 // A space is an abstraction for the "storage units" backing
39 // up the generation abstraction. It includes specific
40 // implementations for keeping track of free and used space,
41 // for iterating over objects and free blocks, etc.
42
43 // Forward decls.
44 class Space;
45 class BlockOffsetArray;
46 class BlockOffsetArrayContigSpace;
47 class Generation;
48 class CompactibleSpace;
49 class BlockOffsetTable;
163 virtual size_t free() const = 0;
164
165 // Iterate over all the ref-containing fields of all objects in the
166 // space, calling "cl.do_oop" on each. Fields in objects allocated by
167 // applications of the closure are not included in the iteration.
168 virtual void oop_iterate(ExtendedOopClosure* cl);
169
170 // Iterate over all objects in the space, calling "cl.do_object" on
171 // each. Objects allocated by applications of the closure are not
172 // included in the iteration.
173 virtual void object_iterate(ObjectClosure* blk) = 0;
174 // Similar to object_iterate() except only iterates over
175 // objects whose internal references point to objects in the space.
176 virtual void safe_object_iterate(ObjectClosure* blk) = 0;
177
178 // Create and return a new dirty card to oop closure. Can be
179 // overridden to return the appropriate type of closure
180 // depending on the type of space in which the closure will
181 // operate. ResourceArea allocated.
182 virtual DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
183 CardTableModRefBS::PrecisionStyle precision,
184 HeapWord* boundary,
185 bool parallel);
186
187 // If "p" is in the space, returns the address of the start of the
188 // "block" that contains "p". We say "block" instead of "object" since
189 // some heaps may not pack objects densely; a chunk may either be an
190 // object or a non-object. If "p" is not in the space, return NULL.
191 virtual HeapWord* block_start_const(const void* p) const = 0;
192
193 // The non-const version may have benevolent side effects on the data
194 // structure supporting these calls, possibly speeding up future calls.
195 // The default implementation, however, is simply to call the const
196 // version.
197 virtual HeapWord* block_start(const void* p);
198
199 // Requires "addr" to be the start of a chunk, and returns its size.
200 // "addr + size" is required to be the start of a new chunk, or the end
201 // of the active area of the heap.
202 virtual size_t block_size(const HeapWord* addr) const = 0;
203
235 }
236
237 // Debugging
238 virtual void verify() const = 0;
239 };
240
241 // A MemRegionClosure (ResourceObj) whose "do_MemRegion" function applies an
242 // OopClosure to (the addresses of) all the ref-containing fields that could
243 // be modified by virtue of the given MemRegion being dirty. (Note that
244 // because of the imprecise nature of the write barrier, this may iterate
245 // over oops beyond the region.)
246 // This base type for dirty card to oop closures handles memory regions
247 // in non-contiguous spaces with no boundaries, and should be sub-classed
248 // to support other space types. See ContiguousDCTOC for a sub-class
249 // that works with ContiguousSpaces.
250
251 class DirtyCardToOopClosure: public MemRegionClosureRO {
252 protected:
253 ExtendedOopClosure* _cl;
254 Space* _sp;
255 CardTableModRefBS::PrecisionStyle _precision;
256 HeapWord* _boundary; // If non-NULL, process only non-NULL oops
257 // pointing below boundary.
258 HeapWord* _min_done; // ObjHeadPreciseArray precision requires
259 // a downwards traversal; this is the
260 // lowest location already done (or,
261 // alternatively, the lowest address that
262 // shouldn't be done again. NULL means infinity.)
263 NOT_PRODUCT(HeapWord* _last_bottom;)
264 NOT_PRODUCT(HeapWord* _last_explicit_min_done;)
265
266 // Get the actual top of the area on which the closure will
267 // operate, given where the top is assumed to be (the end of the
268 // memory region passed to do_MemRegion) and where the object
269 // at the top is assumed to start. For example, an object may
270 // start at the top but actually extend past the assumed top,
271 // in which case the top becomes the end of the object.
272 virtual HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj);
273
274 // Walk the given memory region from bottom to (actual) top
275 // looking for objects and applying the oop closure (_cl) to
276 // them. The base implementation of this treats the area as
277 // blocks, where a block may or may not be an object. Sub-
278 // classes should override this to provide more accurate
279 // or possibly more efficient walking.
280 virtual void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top);
281
282 public:
283 DirtyCardToOopClosure(Space* sp, ExtendedOopClosure* cl,
284 CardTableModRefBS::PrecisionStyle precision,
285 HeapWord* boundary) :
286 _sp(sp), _cl(cl), _precision(precision), _boundary(boundary),
287 _min_done(NULL) {
288 NOT_PRODUCT(_last_bottom = NULL);
289 NOT_PRODUCT(_last_explicit_min_done = NULL);
290 }
291
292 void do_MemRegion(MemRegion mr);
293
294 void set_min_done(HeapWord* min_done) {
295 _min_done = min_done;
296 NOT_PRODUCT(_last_explicit_min_done = _min_done);
297 }
298 #ifndef PRODUCT
299 void set_last_bottom(HeapWord* last_bottom) {
300 _last_bottom = last_bottom;
301 }
302 #endif
303 };
304
598
599 #if INCLUDE_ALL_GCS
600 // In support of parallel oop_iterate.
601 #define ContigSpace_PAR_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \
602 void par_oop_iterate(MemRegion mr, OopClosureType* blk);
603
604 ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DECL)
605 #undef ContigSpace_PAR_OOP_ITERATE_DECL
606 #endif // INCLUDE_ALL_GCS
607
608 // Compaction support
609 virtual void reset_after_compaction() {
610 assert(compaction_top() >= bottom() && compaction_top() <= end(), "should point inside space");
611 set_top(compaction_top());
612 // set new iteration safe limit
613 set_concurrent_iteration_safe_limit(compaction_top());
614 }
615
616 // Override.
617 DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
618 CardTableModRefBS::PrecisionStyle precision,
619 HeapWord* boundary,
620 bool parallel);
621
622 // Apply "blk->do_oop" to the addresses of all reference fields in objects
623 // starting with the _saved_mark_word, which was noted during a generation's
624 // save_marks and is required to denote the head of an object.
625 // Fields in objects allocated by applications of the closure
626 // *are* included in the iteration.
627 // Updates _saved_mark_word to point to just after the last object
628 // iterated over.
629 #define ContigSpace_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
630 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk);
631
632 ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DECL)
633 #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DECL
634
635 // Same as object_iterate, but starting from "mark", which is required
636 // to denote the start of an object. Objects allocated by
637 // applications of the closure *are* included in the iteration.
638 virtual void object_iterate_from(HeapWord* mark, ObjectClosure* blk);
673 // Override.
674 void walk_mem_region(MemRegion mr,
675 HeapWord* bottom, HeapWord* top);
676
677 // Walk the given memory region, from bottom to top, applying
678 // the given oop closure to (possibly) all objects found. The
679 // given oop closure may or may not be the same as the oop
680 // closure with which this closure was created, as it may
681 // be a filtering closure which makes use of the _boundary.
682 // We offer two signatures, so the FilteringClosure static type is
683 // apparent.
684 virtual void walk_mem_region_with_cl(MemRegion mr,
685 HeapWord* bottom, HeapWord* top,
686 ExtendedOopClosure* cl) = 0;
687 virtual void walk_mem_region_with_cl(MemRegion mr,
688 HeapWord* bottom, HeapWord* top,
689 FilteringClosure* cl) = 0;
690
691 public:
692 FilteringDCTOC(Space* sp, ExtendedOopClosure* cl,
693 CardTableModRefBS::PrecisionStyle precision,
694 HeapWord* boundary) :
695 DirtyCardToOopClosure(sp, cl, precision, boundary) {}
696 };
697
698 // A dirty card to oop closure for contiguous spaces
699 // (ContiguousSpace and sub-classes).
700 // It is a FilteringClosure, as defined above, and it knows:
701 //
702 // 1. That the actual top of any area in a memory region
703 // contained by the space is bounded by the end of the contiguous
704 // region of the space.
705 // 2. That the space is really made up of objects and not just
706 // blocks.
707
708 class ContiguousSpaceDCTOC : public FilteringDCTOC {
709 protected:
710 // Overrides.
711 HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj);
712
713 virtual void walk_mem_region_with_cl(MemRegion mr,
714 HeapWord* bottom, HeapWord* top,
715 ExtendedOopClosure* cl);
716 virtual void walk_mem_region_with_cl(MemRegion mr,
717 HeapWord* bottom, HeapWord* top,
718 FilteringClosure* cl);
719
720 public:
721 ContiguousSpaceDCTOC(ContiguousSpace* sp, ExtendedOopClosure* cl,
722 CardTableModRefBS::PrecisionStyle precision,
723 HeapWord* boundary) :
724 FilteringDCTOC(sp, cl, precision, boundary)
725 {}
726 };
727
728 // A ContigSpace that Supports an efficient "block_start" operation via
729 // a BlockOffsetArray (whose BlockOffsetSharedArray may be shared with
730 // other spaces.) This is the abstract base class for old generation
731 // (tenured) spaces.
732
733 class OffsetTableContigSpace: public ContiguousSpace {
734 friend class VMStructs;
735 protected:
736 BlockOffsetArrayContigSpace _offsets;
737 Mutex _par_alloc_lock;
738
739 public:
740 // Constructor
741 OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray,
742 MemRegion mr);
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_SHARED_SPACE_HPP
26 #define SHARE_VM_GC_SHARED_SPACE_HPP
27
28 #include "gc/shared/blockOffsetTable.hpp"
29 #include "gc/shared/cardTable.hpp"
30 #include "gc/shared/workgroup.hpp"
31 #include "memory/allocation.hpp"
32 #include "memory/iterator.hpp"
33 #include "memory/memRegion.hpp"
34 #include "oops/markOop.hpp"
35 #include "runtime/mutexLocker.hpp"
36 #include "utilities/macros.hpp"
37
38 // A space is an abstraction for the "storage units" backing
39 // up the generation abstraction. It includes specific
40 // implementations for keeping track of free and used space,
41 // for iterating over objects and free blocks, etc.
42
43 // Forward decls.
44 class Space;
45 class BlockOffsetArray;
46 class BlockOffsetArrayContigSpace;
47 class Generation;
48 class CompactibleSpace;
49 class BlockOffsetTable;
163 virtual size_t free() const = 0;
164
165 // Iterate over all the ref-containing fields of all objects in the
166 // space, calling "cl.do_oop" on each. Fields in objects allocated by
167 // applications of the closure are not included in the iteration.
168 virtual void oop_iterate(ExtendedOopClosure* cl);
169
170 // Iterate over all objects in the space, calling "cl.do_object" on
171 // each. Objects allocated by applications of the closure are not
172 // included in the iteration.
173 virtual void object_iterate(ObjectClosure* blk) = 0;
174 // Similar to object_iterate() except only iterates over
175 // objects whose internal references point to objects in the space.
176 virtual void safe_object_iterate(ObjectClosure* blk) = 0;
177
178 // Create and return a new dirty card to oop closure. Can be
179 // overridden to return the appropriate type of closure
180 // depending on the type of space in which the closure will
181 // operate. ResourceArea allocated.
182 virtual DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
183 CardTable::PrecisionStyle precision,
184 HeapWord* boundary,
185 bool parallel);
186
187 // If "p" is in the space, returns the address of the start of the
188 // "block" that contains "p". We say "block" instead of "object" since
189 // some heaps may not pack objects densely; a chunk may either be an
190 // object or a non-object. If "p" is not in the space, return NULL.
191 virtual HeapWord* block_start_const(const void* p) const = 0;
192
193 // The non-const version may have benevolent side effects on the data
194 // structure supporting these calls, possibly speeding up future calls.
195 // The default implementation, however, is simply to call the const
196 // version.
197 virtual HeapWord* block_start(const void* p);
198
199 // Requires "addr" to be the start of a chunk, and returns its size.
200 // "addr + size" is required to be the start of a new chunk, or the end
201 // of the active area of the heap.
202 virtual size_t block_size(const HeapWord* addr) const = 0;
203
235 }
236
237 // Debugging
238 virtual void verify() const = 0;
239 };
240
241 // A MemRegionClosure (ResourceObj) whose "do_MemRegion" function applies an
242 // OopClosure to (the addresses of) all the ref-containing fields that could
243 // be modified by virtue of the given MemRegion being dirty. (Note that
244 // because of the imprecise nature of the write barrier, this may iterate
245 // over oops beyond the region.)
246 // This base type for dirty card to oop closures handles memory regions
247 // in non-contiguous spaces with no boundaries, and should be sub-classed
248 // to support other space types. See ContiguousDCTOC for a sub-class
249 // that works with ContiguousSpaces.
250
251 class DirtyCardToOopClosure: public MemRegionClosureRO {
252 protected:
253 ExtendedOopClosure* _cl;
254 Space* _sp;
255 CardTable::PrecisionStyle _precision;
256 HeapWord* _boundary; // If non-NULL, process only non-NULL oops
257 // pointing below boundary.
258 HeapWord* _min_done; // ObjHeadPreciseArray precision requires
259 // a downwards traversal; this is the
260 // lowest location already done (or,
261 // alternatively, the lowest address that
262 // shouldn't be done again. NULL means infinity.)
263 NOT_PRODUCT(HeapWord* _last_bottom;)
264 NOT_PRODUCT(HeapWord* _last_explicit_min_done;)
265
266 // Get the actual top of the area on which the closure will
267 // operate, given where the top is assumed to be (the end of the
268 // memory region passed to do_MemRegion) and where the object
269 // at the top is assumed to start. For example, an object may
270 // start at the top but actually extend past the assumed top,
271 // in which case the top becomes the end of the object.
272 virtual HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj);
273
274 // Walk the given memory region from bottom to (actual) top
275 // looking for objects and applying the oop closure (_cl) to
276 // them. The base implementation of this treats the area as
277 // blocks, where a block may or may not be an object. Sub-
278 // classes should override this to provide more accurate
279 // or possibly more efficient walking.
280 virtual void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top);
281
282 public:
283 DirtyCardToOopClosure(Space* sp, ExtendedOopClosure* cl,
284 CardTable::PrecisionStyle precision,
285 HeapWord* boundary) :
286 _sp(sp), _cl(cl), _precision(precision), _boundary(boundary),
287 _min_done(NULL) {
288 NOT_PRODUCT(_last_bottom = NULL);
289 NOT_PRODUCT(_last_explicit_min_done = NULL);
290 }
291
292 void do_MemRegion(MemRegion mr);
293
294 void set_min_done(HeapWord* min_done) {
295 _min_done = min_done;
296 NOT_PRODUCT(_last_explicit_min_done = _min_done);
297 }
298 #ifndef PRODUCT
299 void set_last_bottom(HeapWord* last_bottom) {
300 _last_bottom = last_bottom;
301 }
302 #endif
303 };
304
598
599 #if INCLUDE_ALL_GCS
600 // In support of parallel oop_iterate.
601 #define ContigSpace_PAR_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \
602 void par_oop_iterate(MemRegion mr, OopClosureType* blk);
603
604 ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DECL)
605 #undef ContigSpace_PAR_OOP_ITERATE_DECL
606 #endif // INCLUDE_ALL_GCS
607
608 // Compaction support
609 virtual void reset_after_compaction() {
610 assert(compaction_top() >= bottom() && compaction_top() <= end(), "should point inside space");
611 set_top(compaction_top());
612 // set new iteration safe limit
613 set_concurrent_iteration_safe_limit(compaction_top());
614 }
615
616 // Override.
617 DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
618 CardTable::PrecisionStyle precision,
619 HeapWord* boundary,
620 bool parallel);
621
622 // Apply "blk->do_oop" to the addresses of all reference fields in objects
623 // starting with the _saved_mark_word, which was noted during a generation's
624 // save_marks and is required to denote the head of an object.
625 // Fields in objects allocated by applications of the closure
626 // *are* included in the iteration.
627 // Updates _saved_mark_word to point to just after the last object
628 // iterated over.
629 #define ContigSpace_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
630 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk);
631
632 ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DECL)
633 #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DECL
634
635 // Same as object_iterate, but starting from "mark", which is required
636 // to denote the start of an object. Objects allocated by
637 // applications of the closure *are* included in the iteration.
638 virtual void object_iterate_from(HeapWord* mark, ObjectClosure* blk);
673 // Override.
674 void walk_mem_region(MemRegion mr,
675 HeapWord* bottom, HeapWord* top);
676
677 // Walk the given memory region, from bottom to top, applying
678 // the given oop closure to (possibly) all objects found. The
679 // given oop closure may or may not be the same as the oop
680 // closure with which this closure was created, as it may
681 // be a filtering closure which makes use of the _boundary.
682 // We offer two signatures, so the FilteringClosure static type is
683 // apparent.
684 virtual void walk_mem_region_with_cl(MemRegion mr,
685 HeapWord* bottom, HeapWord* top,
686 ExtendedOopClosure* cl) = 0;
687 virtual void walk_mem_region_with_cl(MemRegion mr,
688 HeapWord* bottom, HeapWord* top,
689 FilteringClosure* cl) = 0;
690
691 public:
692 FilteringDCTOC(Space* sp, ExtendedOopClosure* cl,
693 CardTable::PrecisionStyle precision,
694 HeapWord* boundary) :
695 DirtyCardToOopClosure(sp, cl, precision, boundary) {}
696 };
697
698 // A dirty card to oop closure for contiguous spaces
699 // (ContiguousSpace and sub-classes).
700 // It is a FilteringClosure, as defined above, and it knows:
701 //
702 // 1. That the actual top of any area in a memory region
703 // contained by the space is bounded by the end of the contiguous
704 // region of the space.
705 // 2. That the space is really made up of objects and not just
706 // blocks.
707
708 class ContiguousSpaceDCTOC : public FilteringDCTOC {
709 protected:
710 // Overrides.
711 HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj);
712
713 virtual void walk_mem_region_with_cl(MemRegion mr,
714 HeapWord* bottom, HeapWord* top,
715 ExtendedOopClosure* cl);
716 virtual void walk_mem_region_with_cl(MemRegion mr,
717 HeapWord* bottom, HeapWord* top,
718 FilteringClosure* cl);
719
720 public:
721 ContiguousSpaceDCTOC(ContiguousSpace* sp, ExtendedOopClosure* cl,
722 CardTable::PrecisionStyle precision,
723 HeapWord* boundary) :
724 FilteringDCTOC(sp, cl, precision, boundary)
725 {}
726 };
727
728 // A ContigSpace that Supports an efficient "block_start" operation via
729 // a BlockOffsetArray (whose BlockOffsetSharedArray may be shared with
730 // other spaces.) This is the abstract base class for old generation
731 // (tenured) spaces.
732
733 class OffsetTableContigSpace: public ContiguousSpace {
734 friend class VMStructs;
735 protected:
736 BlockOffsetArrayContigSpace _offsets;
737 Mutex _par_alloc_lock;
738
739 public:
740 // Constructor
741 OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray,
742 MemRegion mr);
|