< prev index next >

src/hotspot/share/gc/parallel/psParallelCompact.hpp

Print this page

        

*** 237,246 **** --- 237,249 ---- HeapWord* destination() const { return _destination; } // The first region containing data destined for this region. size_t source_region() const { return _source_region; } + // Reuse _source_region to store the corresponding shadow region index + size_t shadow_region() const { return _source_region; } + // The object (if any) starting in this region and ending in a different // region that could not be updated during the main (parallel) compaction // phase. This is different from _partial_obj_addr, which is an object that // extends onto a source region. However, the two uses do not overlap in // time, so the same field is used to save space.
*** 305,314 **** --- 308,318 ---- bool completed() const { return _dc_and_los >= dc_completed; } // These are not atomic. void set_destination(HeapWord* addr) { _destination = addr; } void set_source_region(size_t region) { _source_region = region; } + void set_shadow_region(size_t region) { _source_region = region; } void set_deferred_obj_addr(HeapWord* addr) { _partial_obj_addr = addr; } void set_partial_obj_addr(HeapWord* addr) { _partial_obj_addr = addr; } void set_partial_obj_size(size_t words) { _partial_obj_size = (region_sz_t) words; }
*** 324,333 **** --- 328,361 ---- inline void add_live_obj(size_t words); inline void set_highest_ref(HeapWord* addr); inline void decrement_destination_count(); inline bool claim(); + // Possible values of _shadow_state, and transition is as follows + // Normal Path: + // UNUSED -> try_push() -> FINISHED + // Steal Path: + // UNUSED -> try_steal() -> SHADOW -> mark_filled() -> FILLED -> try_copy() -> FINISHED + static const int UNUSED; // Original state + static const int SHADOW; // Stolen by an idle thread, and a shadow region is created for it + static const int FILLED; // Its shadow region has been filled and ready to be copied back + static const int FINISH; // Work has been done + + // Preempt the region to avoid double processes + inline bool try_push(); + inline bool try_steal(); + // Mark the region as filled and ready to be copied back + inline void mark_filled(); + // Preempt the region to copy the shadow region content back + inline bool try_copy(); + // Special case: see the comment in PSParallelCompact::fill_shadow_region. + // Return to the normal path here + inline void mark_normal(); + + + int shadow_state() { return _shadow_state; } + private: // The type used to represent object sizes within a region. typedef uint region_sz_t; // Constants for manipulating the _dc_and_los field, which holds both the
*** 344,353 **** --- 372,382 ---- size_t _source_region; HeapWord* _partial_obj_addr; region_sz_t _partial_obj_size; region_sz_t volatile _dc_and_los; bool volatile _blocks_filled; + int volatile _shadow_state; #ifdef ASSERT size_t _blocks_filled_count; // Number of block table fills. // These enable optimizations that are only partially implemented. Use
*** 594,603 **** --- 623,654 ---- const region_sz_t los = static_cast<region_sz_t>(live_obj_size()); const region_sz_t old = Atomic::cmpxchg(dc_claimed | los, &_dc_and_los, los); return old == los; } + inline bool ParallelCompactData::RegionData::try_push() { + return Atomic::cmpxchg(FINISH, &_shadow_state, UNUSED) == UNUSED; + } + + inline bool ParallelCompactData::RegionData::try_steal() { + return Atomic::cmpxchg(SHADOW, &_shadow_state, UNUSED) == UNUSED; + } + + inline void ParallelCompactData::RegionData::mark_filled() { + int old = Atomic::cmpxchg(FILLED, &_shadow_state, SHADOW); + assert(old == SHADOW, "Fail to mark the region as filled"); + } + + inline bool ParallelCompactData::RegionData::try_copy() { + return Atomic::cmpxchg(FINISH, &_shadow_state, FILLED) == FILLED; + } + + void ParallelCompactData::RegionData::mark_normal() { + int old = Atomic::cmpxchg(FINISH, &_shadow_state, SHADOW); + assert(old == SHADOW, "Fail to mark the region as finish"); + } + inline ParallelCompactData::RegionData* ParallelCompactData::region(size_t region_idx) const { assert(region_idx <= region_count(), "bad arg"); return _region_data + region_idx;
*** 1177,1191 **** static void decrement_destination_counts(ParCompactionManager* cm, SpaceId src_space_id, size_t beg_region, HeapWord* end_addr); ! // Fill a region, copying objects from one or more source regions. ! static void fill_region(ParCompactionManager* cm, size_t region_idx); ! static void fill_and_update_region(ParCompactionManager* cm, size_t region) { ! fill_region(cm, region); } // Fill in the block table for the specified region. static void fill_blocks(size_t region_idx); // Update the deferred objects in the space. --- 1228,1251 ---- static void decrement_destination_counts(ParCompactionManager* cm, SpaceId src_space_id, size_t beg_region, HeapWord* end_addr); ! static void fill_region(ParCompactionManager* cm, MoveAndUpdateClosure& closure, size_t region); ! static void fill_and_update_region(ParCompactionManager* cm, size_t region); ! ! static bool steal_shadow_region(ParCompactionManager* cm, size_t& region_idx); ! static void fill_shadow_region(ParCompactionManager* cm, size_t region_idx); ! static void fill_and_update_shadow_region(ParCompactionManager* cm, size_t region) { ! fill_shadow_region(cm, region); } + // Copy the content of a shadow region back to its corresponding heap region + static void copy_back(HeapWord* shadow_addr, HeapWord* region_addr); + // Initialize the steal record of a GC thread + static void initialize_steal_record(uint which); + // Reuse the empty heap regions as shadow regions, like to-space regions + static void enqueue_shadow_region(); // Fill in the block table for the specified region. static void fill_blocks(size_t region_idx); // Update the deferred objects in the space.
*** 1228,1287 **** static void verify_complete(SpaceId space_id); #endif // #ifdef ASSERT }; class MoveAndUpdateClosure: public ParMarkBitMapClosure { public: inline MoveAndUpdateClosure(ParMarkBitMap* bitmap, ParCompactionManager* cm, ! ObjectStartArray* start_array, ! HeapWord* destination, size_t words); // Accessors. HeapWord* destination() const { return _destination; } // If the object will fit (size <= words_remaining()), copy it to the current // destination, update the interior oops and the start array and return either // full (if the closure is full) or incomplete. If the object will not fit, // return would_overflow. ! virtual IterationStatus do_addr(HeapWord* addr, size_t size); // Copy enough words to fill this closure, starting at source(). Interior // oops and the start array are not updated. Return full. IterationStatus copy_until_full(); // Copy enough words to fill this closure or to the end of an object, // whichever is smaller, starting at source(). Interior oops and the start // array are not updated. void copy_partial_obj(); ! protected: // Update variables to indicate that word_count words were processed. inline void update_state(size_t word_count); protected: - ObjectStartArray* const _start_array; HeapWord* _destination; // Next addr to be written. }; inline MoveAndUpdateClosure::MoveAndUpdateClosure(ParMarkBitMap* bitmap, ParCompactionManager* cm, ! ObjectStartArray* start_array, ! HeapWord* destination, ! size_t words) : ! ParMarkBitMapClosure(bitmap, cm, words), _start_array(start_array) ! { ! _destination = destination; ! } inline void MoveAndUpdateClosure::update_state(size_t words) { decrement_words_remaining(words); _source += words; _destination += words; } class UpdateOnlyClosure: public ParMarkBitMapClosure { private: const PSParallelCompact::SpaceId _space_id; ObjectStartArray* const _start_array; --- 1288,1390 ---- static void verify_complete(SpaceId space_id); #endif // #ifdef ASSERT }; class MoveAndUpdateClosure: public ParMarkBitMapClosure { + static inline size_t calculate_words_remaining(size_t region); public: inline MoveAndUpdateClosure(ParMarkBitMap* bitmap, ParCompactionManager* cm, ! size_t region); // Accessors. HeapWord* destination() const { return _destination; } + HeapWord* copy_destination() const { return _destination + _offset; } // If the object will fit (size <= words_remaining()), copy it to the current // destination, update the interior oops and the start array and return either // full (if the closure is full) or incomplete. If the object will not fit, // return would_overflow. ! IterationStatus do_addr(HeapWord* addr, size_t size); // Copy enough words to fill this closure, starting at source(). Interior // oops and the start array are not updated. Return full. IterationStatus copy_until_full(); // Copy enough words to fill this closure or to the end of an object, // whichever is smaller, starting at source(). Interior oops and the start // array are not updated. void copy_partial_obj(); ! virtual void complete_region(ParCompactionManager* cm, HeapWord* dest_addr, ! PSParallelCompact::RegionData* region_ptr); ! ! protected: // Update variables to indicate that word_count words were processed. inline void update_state(size_t word_count); protected: HeapWord* _destination; // Next addr to be written. + ObjectStartArray* const _start_array; + size_t _offset; }; + inline size_t MoveAndUpdateClosure::calculate_words_remaining(size_t region) { + HeapWord* dest_addr = PSParallelCompact::summary_data().region_to_addr(region); + PSParallelCompact::SpaceId dest_space_id = PSParallelCompact::space_id(dest_addr); + HeapWord* new_top = PSParallelCompact::new_top(dest_space_id); + assert(dest_addr < new_top, "sanity"); + + return MIN2(pointer_delta(new_top, dest_addr), ParallelCompactData::RegionSize); + } + inline MoveAndUpdateClosure::MoveAndUpdateClosure(ParMarkBitMap* bitmap, ParCompactionManager* cm, ! size_t region_idx) : ! ParMarkBitMapClosure(bitmap, cm, calculate_words_remaining(region_idx)), ! _destination(PSParallelCompact::summary_data().region_to_addr(region_idx)), ! _start_array(PSParallelCompact::start_array(PSParallelCompact::space_id(_destination))), ! _offset(0) { } ! inline void MoveAndUpdateClosure::update_state(size_t words) { decrement_words_remaining(words); _source += words; _destination += words; } + class ShadowClosure: public MoveAndUpdateClosure { + inline size_t calculate_shadow_offset(size_t region_idx, size_t shadow_idx); + public: + inline ShadowClosure(ParMarkBitMap* bitmap, ParCompactionManager* cm, + size_t region, size_t shadow); + + virtual void complete_region(ParCompactionManager* cm, HeapWord* dest_addr, + PSParallelCompact::RegionData* region_ptr); + + private: + size_t _shadow; + }; + + inline size_t ShadowClosure::calculate_shadow_offset(size_t region_idx, size_t shadow_idx) { + ParallelCompactData& sd = PSParallelCompact::summary_data(); + HeapWord* dest_addr = sd.region_to_addr(region_idx); + HeapWord* shadow_addr = sd.region_to_addr(shadow_idx); + return pointer_delta(shadow_addr, dest_addr); + } + + inline + ShadowClosure::ShadowClosure(ParMarkBitMap *bitmap, + ParCompactionManager *cm, + size_t region, + size_t shadow) : + MoveAndUpdateClosure(bitmap, cm, region), + _shadow(shadow) { + _offset = calculate_shadow_offset(region, shadow); + } + class UpdateOnlyClosure: public ParMarkBitMapClosure { private: const PSParallelCompact::SpaceId _space_id; ObjectStartArray* const _start_array;
< prev index next >