203
204 // Requires "addr" to be the start of a block, and returns "TRUE" iff
205 // the block is an object.
206 virtual bool block_is_obj(const HeapWord* addr) const = 0;
207
208 // Requires "addr" to be the start of a block, and returns "TRUE" iff
209 // the block is an object and the object is alive.
210 virtual bool obj_is_alive(const HeapWord* addr) const;
211
212 // Allocation (return NULL if full). Assumes the caller has established
213 // mutually exclusive access to the space.
214 virtual HeapWord* allocate(size_t word_size) = 0;
215
216 // Allocation (return NULL if full). Enforces mutual exclusion internally.
217 virtual HeapWord* par_allocate(size_t word_size) = 0;
218
219 // Mark-sweep-compact support: all spaces can update pointers to objects
220 // moving as a part of compaction.
221 virtual void adjust_pointers() = 0;
222
223 // PrintHeapAtGC support
224 virtual void print() const;
225 virtual void print_on(outputStream* st) const;
226 virtual void print_short() const;
227 virtual void print_short_on(outputStream* st) const;
228
229
230 // Accessor for parallel sequential tasks.
231 SequentialSubTasksDone* par_seq_tasks() { return &_par_seq_tasks; }
232
233 // IF "this" is a ContiguousSpace, return it, else return NULL.
234 virtual ContiguousSpace* toContiguousSpace() {
235 return NULL;
236 }
237
238 // Debugging
239 virtual void verify() const = 0;
240 };
241
242 // A MemRegionClosure (ResourceObj) whose "do_MemRegion" function applies an
243 // OopClosure to (the addresses of) all the ref-containing fields that could
642 #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DECL
643
644 // Same as object_iterate, but starting from "mark", which is required
645 // to denote the start of an object. Objects allocated by
646 // applications of the closure *are* included in the iteration.
647 virtual void object_iterate_from(HeapWord* mark, ObjectClosure* blk);
648
649 // Very inefficient implementation.
650 virtual HeapWord* block_start_const(const void* p) const;
651 size_t block_size(const HeapWord* p) const;
652 // If a block is in the allocated area, it is an object.
653 bool block_is_obj(const HeapWord* p) const { return p < top(); }
654
655 // Addresses for inlined allocation
656 HeapWord** top_addr() { return &_top; }
657 HeapWord** end_addr() { return &_end; }
658
659 // Overrides for more efficient compaction support.
660 void prepare_for_compaction(CompactPoint* cp);
661
662 // PrintHeapAtGC support.
663 virtual void print_on(outputStream* st) const;
664
665 // Checked dynamic downcasts.
666 virtual ContiguousSpace* toContiguousSpace() {
667 return this;
668 }
669
670 // Debugging
671 virtual void verify() const;
672
673 // Used to increase collection frequency. "factor" of 0 means entire
674 // space.
675 void allocate_temporary_filler(int factor);
676 };
677
678
679 // A dirty card to oop closure that does filtering.
680 // It knows how to filter out objects that are outside of the _boundary.
681 class Filtering_DCTOC : public DirtyCardToOopClosure {
682 protected:
|
203
204 // Requires "addr" to be the start of a block, and returns "TRUE" iff
205 // the block is an object.
206 virtual bool block_is_obj(const HeapWord* addr) const = 0;
207
208 // Requires "addr" to be the start of a block, and returns "TRUE" iff
209 // the block is an object and the object is alive.
210 virtual bool obj_is_alive(const HeapWord* addr) const;
211
212 // Allocation (return NULL if full). Assumes the caller has established
213 // mutually exclusive access to the space.
214 virtual HeapWord* allocate(size_t word_size) = 0;
215
216 // Allocation (return NULL if full). Enforces mutual exclusion internally.
217 virtual HeapWord* par_allocate(size_t word_size) = 0;
218
219 // Mark-sweep-compact support: all spaces can update pointers to objects
220 // moving as a part of compaction.
221 virtual void adjust_pointers() = 0;
222
223 virtual void print() const;
224 virtual void print_on(outputStream* st) const;
225 virtual void print_short() const;
226 virtual void print_short_on(outputStream* st) const;
227
228
229 // Accessor for parallel sequential tasks.
230 SequentialSubTasksDone* par_seq_tasks() { return &_par_seq_tasks; }
231
232 // IF "this" is a ContiguousSpace, return it, else return NULL.
233 virtual ContiguousSpace* toContiguousSpace() {
234 return NULL;
235 }
236
237 // Debugging
238 virtual void verify() const = 0;
239 };
240
241 // A MemRegionClosure (ResourceObj) whose "do_MemRegion" function applies an
242 // OopClosure to (the addresses of) all the ref-containing fields that could
641 #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DECL
642
643 // Same as object_iterate, but starting from "mark", which is required
644 // to denote the start of an object. Objects allocated by
645 // applications of the closure *are* included in the iteration.
646 virtual void object_iterate_from(HeapWord* mark, ObjectClosure* blk);
647
648 // Very inefficient implementation.
649 virtual HeapWord* block_start_const(const void* p) const;
650 size_t block_size(const HeapWord* p) const;
651 // If a block is in the allocated area, it is an object.
652 bool block_is_obj(const HeapWord* p) const { return p < top(); }
653
654 // Addresses for inlined allocation
655 HeapWord** top_addr() { return &_top; }
656 HeapWord** end_addr() { return &_end; }
657
658 // Overrides for more efficient compaction support.
659 void prepare_for_compaction(CompactPoint* cp);
660
661 virtual void print_on(outputStream* st) const;
662
663 // Checked dynamic downcasts.
664 virtual ContiguousSpace* toContiguousSpace() {
665 return this;
666 }
667
668 // Debugging
669 virtual void verify() const;
670
671 // Used to increase collection frequency. "factor" of 0 means entire
672 // space.
673 void allocate_temporary_filler(int factor);
674 };
675
676
677 // A dirty card to oop closure that does filtering.
678 // It knows how to filter out objects that are outside of the _boundary.
679 class Filtering_DCTOC : public DirtyCardToOopClosure {
680 protected:
|