< prev index next >

src/hotspot/share/gc/shared/space.hpp

Print this page

        

*** 218,230 **** --- 218,232 ---- virtual HeapWord* allocate(size_t word_size) = 0; // Allocation (return NULL if full). Enforces mutual exclusion internally. virtual HeapWord* par_allocate(size_t word_size) = 0; + #if INCLUDE_SERIALGC // Mark-sweep-compact support: all spaces can update pointers to objects // moving as a part of compaction. virtual void adjust_pointers() = 0; + #endif virtual void print() const; virtual void print_on(outputStream* st) const; virtual void print_short() const; virtual void print_short_on(outputStream* st) const;
*** 403,412 **** --- 405,415 ---- void set_next_compaction_space(CompactibleSpace* csp) { _next_compaction_space = csp; } + #if INCLUDE_SERIALGC // MarkSweep support phase2 // Start the process of compaction of the current space: compute // post-compaction addresses, and insert forwarding pointers. The fields // "cp->gen" and "cp->compaction_space" are the generation and space into
*** 418,427 **** --- 421,431 ---- virtual void prepare_for_compaction(CompactPoint* cp) = 0; // MarkSweep support phase3 virtual void adjust_pointers(); // MarkSweep support phase4 virtual void compact(); + #endif // INCLUDE_SERIALGC // The maximum percentage of objects that can be dead in the compacted // live part of a compacted space ("deadwood" support.) virtual size_t allowed_dead_ratio() const { return 0; };
*** 472,484 **** --- 476,490 ---- // The space argument should be a subclass of CompactibleSpace, implementing // scan_limit(), scanned_block_is_obj(), and scanned_block_size(), // and possibly also overriding obj_size(), and adjust_obj_size(). // These functions should avoid virtual calls whenever possible. + #if INCLUDE_SERIALGC // Frequently calls adjust_obj_size(). template <class SpaceType> static inline void scan_and_adjust_pointers(SpaceType* space); + #endif // Frequently calls obj_size(). template <class SpaceType> static inline void scan_and_compact(SpaceType* space);
*** 601,618 **** assert(new_limit <= top(), "uninitialized objects in the safe range"); _concurrent_iteration_safe_limit = new_limit; } ! #if INCLUDE_ALL_GCS // In support of parallel oop_iterate. #define ContigSpace_PAR_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ void par_oop_iterate(MemRegion mr, OopClosureType* blk); ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DECL) #undef ContigSpace_PAR_OOP_ITERATE_DECL ! #endif // INCLUDE_ALL_GCS // Compaction support virtual void reset_after_compaction() { assert(compaction_top() >= bottom() && compaction_top() <= end(), "should point inside space"); set_top(compaction_top()); --- 607,624 ---- assert(new_limit <= top(), "uninitialized objects in the safe range"); _concurrent_iteration_safe_limit = new_limit; } ! #if INCLUDE_CMSGC // In support of parallel oop_iterate. #define ContigSpace_PAR_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ void par_oop_iterate(MemRegion mr, OopClosureType* blk); ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DECL) #undef ContigSpace_PAR_OOP_ITERATE_DECL ! #endif // INCLUDE_CMSGC // Compaction support virtual void reset_after_compaction() { assert(compaction_top() >= bottom() && compaction_top() <= end(), "should point inside space"); set_top(compaction_top());
*** 652,663 **** --- 658,671 ---- // Addresses for inlined allocation HeapWord** top_addr() { return &_top; } HeapWord** end_addr() { return &_end; } + #if INCLUDE_SERIALGC // Overrides for more efficient compaction support. void prepare_for_compaction(CompactPoint* cp); + #endif virtual void print_on(outputStream* st) const; // Checked dynamic downcasts. virtual ContiguousSpace* toContiguousSpace() {
< prev index next >