< prev index next >

src/hotspot/share/gc/parallel/psParallelCompact.hpp

Print this page




 222   static const size_t Log2BlockSize;
 223   static const size_t BlockSize;
 224   static const size_t BlockSizeBytes;
 225 
 226   static const size_t BlockSizeOffsetMask;
 227   static const size_t BlockAddrOffsetMask;
 228   static const size_t BlockAddrMask;
 229 
 230   static const size_t BlocksPerRegion;
 231   static const size_t Log2BlocksPerRegion;
 232 
 233   class RegionData
 234   {
 235   public:
 236     // Destination address of the region.
 237     HeapWord* destination() const { return _destination; }
 238 
 239     // The first region containing data destined for this region.
 240     size_t source_region() const { return _source_region; }
 241 



 242     // The object (if any) starting in this region and ending in a different
 243     // region that could not be updated during the main (parallel) compaction
 244     // phase.  This is different from _partial_obj_addr, which is an object that
 245     // extends onto a source region.  However, the two uses do not overlap in
 246     // time, so the same field is used to save space.
 247     HeapWord* deferred_obj_addr() const { return _partial_obj_addr; }
 248 
 249     // The starting address of the partial object extending onto the region.
 250     HeapWord* partial_obj_addr() const { return _partial_obj_addr; }
 251 
 252     // Size of the partial object extending onto the region (words).
 253     size_t partial_obj_size() const { return _partial_obj_size; }
 254 
 255     // Size of live data that lies within this region due to objects that start
 256     // in this region (words).  This does not include the partial object
 257     // extending onto the region (if any), or the part of an object that extends
 258     // onto the next region (if any).
 259     size_t live_obj_size() const { return _dc_and_los & los_mask; }
 260 
 261     // Total live data that lies within the region (words).


 290 
 291     // The location of the java heap data that corresponds to this region.
 292     inline HeapWord* data_location() const;
 293 
 294     // The highest address referenced by objects in this region.
 295     inline HeapWord* highest_ref() const;
 296 
 297     // Whether this region is available to be claimed, has been claimed, or has
 298     // been completed.
 299     //
 300     // Minor subtlety:  claimed() returns true if the region is marked
 301     // completed(), which is desirable since a region must be claimed before it
 302     // can be completed.
 303     bool available() const { return _dc_and_los < dc_one; }
 304     bool claimed()   const { return _dc_and_los >= dc_claimed; }
 305     bool completed() const { return _dc_and_los >= dc_completed; }
 306 
 307     // These are not atomic.
 308     void set_destination(HeapWord* addr)       { _destination = addr; }
 309     void set_source_region(size_t region)      { _source_region = region; }

 310     void set_deferred_obj_addr(HeapWord* addr) { _partial_obj_addr = addr; }
 311     void set_partial_obj_addr(HeapWord* addr)  { _partial_obj_addr = addr; }
 312     void set_partial_obj_size(size_t words)    {
 313       _partial_obj_size = (region_sz_t) words;
 314     }
 315     inline void set_blocks_filled();
 316 
 317     inline void set_destination_count(uint count);
 318     inline void set_live_obj_size(size_t words);
 319     inline void set_data_location(HeapWord* addr);
 320     inline void set_completed();
 321     inline bool claim_unsafe();
 322 
 323     // These are atomic.
 324     inline void add_live_obj(size_t words);
 325     inline void set_highest_ref(HeapWord* addr);
 326     inline void decrement_destination_count();
 327     inline bool claim();
 328 
























 329   private:
 330     // The type used to represent object sizes within a region.
 331     typedef uint region_sz_t;
 332 
 333     // Constants for manipulating the _dc_and_los field, which holds both the
 334     // destination count and live obj size.  The live obj size lives at the
 335     // least significant end so no masking is necessary when adding.
 336     static const region_sz_t dc_shift;           // Shift amount.
 337     static const region_sz_t dc_mask;            // Mask for destination count.
 338     static const region_sz_t dc_one;             // 1, shifted appropriately.
 339     static const region_sz_t dc_claimed;         // Region has been claimed.
 340     static const region_sz_t dc_completed;       // Region has been completed.
 341     static const region_sz_t los_mask;           // Mask for live obj size.
 342 
 343     HeapWord*            _destination;
 344     size_t               _source_region;
 345     HeapWord*            _partial_obj_addr;
 346     region_sz_t          _partial_obj_size;
 347     region_sz_t volatile _dc_and_los;
 348     bool        volatile _blocks_filled;

 349 
 350 #ifdef ASSERT
 351     size_t               _blocks_filled_count;   // Number of block table fills.
 352 
 353     // These enable optimizations that are only partially implemented.  Use
 354     // debug builds to prevent the code fragments from breaking.
 355     HeapWord*            _data_location;
 356     HeapWord*            _highest_ref;
 357 #endif  // #ifdef ASSERT
 358 
 359 #ifdef ASSERT
 360    public:
 361     uint                 _pushed;   // 0 until region is pushed onto a stack
 362    private:
 363 #endif
 364   };
 365 
 366   // "Blocks" allow shorter sections of the bitmap to be searched.  Each Block
 367   // holds an offset, which is the amount of live data in the Region to the left
 368   // of the first live object that starts in the Block.


 579   Atomic::add(static_cast<region_sz_t>(words), &_dc_and_los);
 580 }
 581 
 582 inline void ParallelCompactData::RegionData::set_highest_ref(HeapWord* addr)
 583 {
 584 #ifdef ASSERT
 585   HeapWord* tmp = _highest_ref;
 586   while (addr > tmp) {
 587     tmp = Atomic::cmpxchg(addr, &_highest_ref, tmp);
 588   }
 589 #endif  // #ifdef ASSERT
 590 }
 591 
 592 inline bool ParallelCompactData::RegionData::claim()
 593 {
 594   const region_sz_t los = static_cast<region_sz_t>(live_obj_size());
 595   const region_sz_t old = Atomic::cmpxchg(dc_claimed | los, &_dc_and_los, los);
 596   return old == los;
 597 }
 598 





















 599 inline ParallelCompactData::RegionData*
 600 ParallelCompactData::region(size_t region_idx) const
 601 {
 602   assert(region_idx <= region_count(), "bad arg");
 603   return _region_data + region_idx;
 604 }
 605 
 606 inline size_t
 607 ParallelCompactData::region(const RegionData* const region_ptr) const
 608 {
 609   assert(region_ptr >= _region_data, "bad arg");
 610   assert(region_ptr <= _region_data + region_count(), "bad arg");
 611   return pointer_delta(region_ptr, _region_data, sizeof(RegionData));
 612 }
 613 
 614 inline ParallelCompactData::BlockData*
 615 ParallelCompactData::block(size_t n) const {
 616   assert(n < block_count(), "bad arg");
 617   return _block_data + n;
 618 }


1162                                   size_t src_region_idx);
1163 
1164   // Determine the next source region, set closure.source() to the start of the
1165   // new region return the region index.  Parameter end_addr is the address one
1166   // beyond the end of source range just processed.  If necessary, switch to a
1167   // new source space and set src_space_id (in-out parameter) and src_space_top
1168   // (out parameter) accordingly.
1169   static size_t next_src_region(MoveAndUpdateClosure& closure,
1170                                 SpaceId& src_space_id,
1171                                 HeapWord*& src_space_top,
1172                                 HeapWord* end_addr);
1173 
1174   // Decrement the destination count for each non-empty source region in the
1175   // range [beg_region, region(region_align_up(end_addr))).  If the destination
1176   // count for a region goes to 0 and it needs to be filled, enqueue it.
1177   static void decrement_destination_counts(ParCompactionManager* cm,
1178                                            SpaceId src_space_id,
1179                                            size_t beg_region,
1180                                            HeapWord* end_addr);
1181 
1182   // Fill a region, copying objects from one or more source regions.
1183   static void fill_region(ParCompactionManager* cm, size_t region_idx);
1184   static void fill_and_update_region(ParCompactionManager* cm, size_t region) {
1185     fill_region(cm, region);



1186   }






1187 
1188   // Fill in the block table for the specified region.
1189   static void fill_blocks(size_t region_idx);
1190 
1191   // Update the deferred objects in the space.
1192   static void update_deferred_objects(ParCompactionManager* cm, SpaceId id);
1193 
1194   static ParMarkBitMap* mark_bitmap() { return &_mark_bitmap; }
1195   static ParallelCompactData& summary_data() { return _summary_data; }
1196 
1197   // Reference Processing
1198   static ReferenceProcessor* const ref_processor() { return _ref_processor; }
1199 
1200   static STWGCTimer* gc_timer() { return &_gc_timer; }
1201 
1202   // Return the SpaceId for the given address.
1203   static SpaceId space_id(HeapWord* addr);
1204 
1205   // Time since last full gc (in milliseconds).
1206   static jlong millis_since_last_gc();


1213   static void print_region_ranges();
1214   static void print_dense_prefix_stats(const char* const algorithm,
1215                                        const SpaceId id,
1216                                        const bool maximum_compaction,
1217                                        HeapWord* const addr);
1218   static void summary_phase_msg(SpaceId dst_space_id,
1219                                 HeapWord* dst_beg, HeapWord* dst_end,
1220                                 SpaceId src_space_id,
1221                                 HeapWord* src_beg, HeapWord* src_end);
1222 #endif  // #ifndef PRODUCT
1223 
1224 #ifdef  ASSERT
1225   // Sanity check the new location of a word in the heap.
1226   static inline void check_new_location(HeapWord* old_addr, HeapWord* new_addr);
1227   // Verify that all the regions have been emptied.
1228   static void verify_complete(SpaceId space_id);
1229 #endif  // #ifdef ASSERT
1230 };
1231 
1232 class MoveAndUpdateClosure: public ParMarkBitMapClosure {

1233  public:
1234   inline MoveAndUpdateClosure(ParMarkBitMap* bitmap, ParCompactionManager* cm,
1235                               ObjectStartArray* start_array,
1236                               HeapWord* destination, size_t words);
1237 
1238   // Accessors.
1239   HeapWord* destination() const         { return _destination; }

1240 
1241   // If the object will fit (size <= words_remaining()), copy it to the current
1242   // destination, update the interior oops and the start array and return either
1243   // full (if the closure is full) or incomplete.  If the object will not fit,
1244   // return would_overflow.
1245   virtual IterationStatus do_addr(HeapWord* addr, size_t size);
1246 
1247   // Copy enough words to fill this closure, starting at source().  Interior
1248   // oops and the start array are not updated.  Return full.
1249   IterationStatus copy_until_full();
1250 
1251   // Copy enough words to fill this closure or to the end of an object,
1252   // whichever is smaller, starting at source().  Interior oops and the start
1253   // array are not updated.
1254   void copy_partial_obj();
1255 
1256  protected:



1257   // Update variables to indicate that word_count words were processed.
1258   inline void update_state(size_t word_count);
1259 
1260  protected:
1261   ObjectStartArray* const _start_array;
1262   HeapWord*               _destination;         // Next addr to be written.


1263 };
1264 









1265 inline
1266 MoveAndUpdateClosure::MoveAndUpdateClosure(ParMarkBitMap* bitmap,
1267                                            ParCompactionManager* cm,
1268                                            ObjectStartArray* start_array,
1269                                            HeapWord* destination,
1270                                            size_t words) :
1271   ParMarkBitMapClosure(bitmap, cm, words), _start_array(start_array)
1272 {
1273   _destination = destination;
1274 }
1275 
1276 inline void MoveAndUpdateClosure::update_state(size_t words)
1277 {
1278   decrement_words_remaining(words);
1279   _source += words;
1280   _destination += words;






























1281 }
1282 
1283 class UpdateOnlyClosure: public ParMarkBitMapClosure {
1284  private:
1285   const PSParallelCompact::SpaceId _space_id;
1286   ObjectStartArray* const          _start_array;
1287 
1288  public:
1289   UpdateOnlyClosure(ParMarkBitMap* mbm,
1290                     ParCompactionManager* cm,
1291                     PSParallelCompact::SpaceId space_id);
1292 
1293   // Update the object.
1294   virtual IterationStatus do_addr(HeapWord* addr, size_t words);
1295 
1296   inline void do_addr(HeapWord* addr);
1297 };
1298 
1299 class FillClosure: public ParMarkBitMapClosure {
1300  public:


 222   static const size_t Log2BlockSize;
 223   static const size_t BlockSize;
 224   static const size_t BlockSizeBytes;
 225 
 226   static const size_t BlockSizeOffsetMask;
 227   static const size_t BlockAddrOffsetMask;
 228   static const size_t BlockAddrMask;
 229 
 230   static const size_t BlocksPerRegion;
 231   static const size_t Log2BlocksPerRegion;
 232 
 233   class RegionData
 234   {
 235   public:
 236     // Destination address of the region.
 237     HeapWord* destination() const { return _destination; }
 238 
 239     // The first region containing data destined for this region.
 240     size_t source_region() const { return _source_region; }
 241 
 242     // Reuse _source_region to store the corresponding shadow region index
 243     size_t shadow_region() const { return _source_region; }
 244 
 245     // The object (if any) starting in this region and ending in a different
 246     // region that could not be updated during the main (parallel) compaction
 247     // phase.  This is different from _partial_obj_addr, which is an object that
 248     // extends onto a source region.  However, the two uses do not overlap in
 249     // time, so the same field is used to save space.
 250     HeapWord* deferred_obj_addr() const { return _partial_obj_addr; }
 251 
 252     // The starting address of the partial object extending onto the region.
 253     HeapWord* partial_obj_addr() const { return _partial_obj_addr; }
 254 
 255     // Size of the partial object extending onto the region (words).
 256     size_t partial_obj_size() const { return _partial_obj_size; }
 257 
 258     // Size of live data that lies within this region due to objects that start
 259     // in this region (words).  This does not include the partial object
 260     // extending onto the region (if any), or the part of an object that extends
 261     // onto the next region (if any).
 262     size_t live_obj_size() const { return _dc_and_los & los_mask; }
 263 
 264     // Total live data that lies within the region (words).


 293 
 294     // The location of the java heap data that corresponds to this region.
 295     inline HeapWord* data_location() const;
 296 
 297     // The highest address referenced by objects in this region.
 298     inline HeapWord* highest_ref() const;
 299 
 300     // Whether this region is available to be claimed, has been claimed, or has
 301     // been completed.
 302     //
 303     // Minor subtlety:  claimed() returns true if the region is marked
 304     // completed(), which is desirable since a region must be claimed before it
 305     // can be completed.
 306     bool available() const { return _dc_and_los < dc_one; }
 307     bool claimed()   const { return _dc_and_los >= dc_claimed; }
 308     bool completed() const { return _dc_and_los >= dc_completed; }
 309 
 310     // These are not atomic.
 311     void set_destination(HeapWord* addr)       { _destination = addr; }
 312     void set_source_region(size_t region)      { _source_region = region; }
 313     void set_shadow_region(size_t region)      { _source_region = region; }
 314     void set_deferred_obj_addr(HeapWord* addr) { _partial_obj_addr = addr; }
 315     void set_partial_obj_addr(HeapWord* addr)  { _partial_obj_addr = addr; }
 316     void set_partial_obj_size(size_t words)    {
 317       _partial_obj_size = (region_sz_t) words;
 318     }
 319     inline void set_blocks_filled();
 320 
 321     inline void set_destination_count(uint count);
 322     inline void set_live_obj_size(size_t words);
 323     inline void set_data_location(HeapWord* addr);
 324     inline void set_completed();
 325     inline bool claim_unsafe();
 326 
 327     // These are atomic.
 328     inline void add_live_obj(size_t words);
 329     inline void set_highest_ref(HeapWord* addr);
 330     inline void decrement_destination_count();
 331     inline bool claim();
 332 
 333     // Possible values of _shadow_state, and transition is as follows
 334     // Normal Path:
 335     // UNUSED -> try_push() -> FINISHED
 336     // Steal  Path:
 337     // UNUSED -> try_steal() -> SHADOW -> mark_filled() -> FILLED -> try_copy() -> FINISHED
 338     static const int UNUSED;                     // Original state
 339     static const int SHADOW;                     // Stolen by an idle thread, and a shadow region is created for it
 340     static const int FILLED;                     // Its shadow region has been filled and ready to be copied back
 341     static const int FINISH;                     // Work has been done
 342 
 343     // Preempt the region to avoid double processes
 344     inline bool try_push();
 345     inline bool try_steal();
 346     // Mark the region as filled and ready to be copied back
 347     inline void mark_filled();
 348     // Preempt the region to copy the shadow region content back
 349     inline bool try_copy();
 350     // Special case: see the comment in PSParallelCompact::fill_shadow_region.
 351     // Return to the normal path here
 352     inline void mark_normal();
 353 
 354 
 355     int shadow_state() { return _shadow_state; }
 356 
 357   private:
 358     // The type used to represent object sizes within a region.
 359     typedef uint region_sz_t;
 360 
 361     // Constants for manipulating the _dc_and_los field, which holds both the
 362     // destination count and live obj size.  The live obj size lives at the
 363     // least significant end so no masking is necessary when adding.
 364     static const region_sz_t dc_shift;           // Shift amount.
 365     static const region_sz_t dc_mask;            // Mask for destination count.
 366     static const region_sz_t dc_one;             // 1, shifted appropriately.
 367     static const region_sz_t dc_claimed;         // Region has been claimed.
 368     static const region_sz_t dc_completed;       // Region has been completed.
 369     static const region_sz_t los_mask;           // Mask for live obj size.
 370 
 371     HeapWord*            _destination;
 372     size_t               _source_region;
 373     HeapWord*            _partial_obj_addr;
 374     region_sz_t          _partial_obj_size;
 375     region_sz_t volatile _dc_and_los;
 376     bool        volatile _blocks_filled;
 377     int         volatile _shadow_state;
 378 
 379 #ifdef ASSERT
 380     size_t               _blocks_filled_count;   // Number of block table fills.
 381 
 382     // These enable optimizations that are only partially implemented.  Use
 383     // debug builds to prevent the code fragments from breaking.
 384     HeapWord*            _data_location;
 385     HeapWord*            _highest_ref;
 386 #endif  // #ifdef ASSERT
 387 
 388 #ifdef ASSERT
 389    public:
 390     uint                 _pushed;   // 0 until region is pushed onto a stack
 391    private:
 392 #endif
 393   };
 394 
 395   // "Blocks" allow shorter sections of the bitmap to be searched.  Each Block
 396   // holds an offset, which is the amount of live data in the Region to the left
 397   // of the first live object that starts in the Block.


 608   Atomic::add(static_cast<region_sz_t>(words), &_dc_and_los);
 609 }
 610 
 611 inline void ParallelCompactData::RegionData::set_highest_ref(HeapWord* addr)
 612 {
 613 #ifdef ASSERT
 614   HeapWord* tmp = _highest_ref;
 615   while (addr > tmp) {
 616     tmp = Atomic::cmpxchg(addr, &_highest_ref, tmp);
 617   }
 618 #endif  // #ifdef ASSERT
 619 }
 620 
 621 inline bool ParallelCompactData::RegionData::claim()
 622 {
 623   const region_sz_t los = static_cast<region_sz_t>(live_obj_size());
 624   const region_sz_t old = Atomic::cmpxchg(dc_claimed | los, &_dc_and_los, los);
 625   return old == los;
 626 }
 627 
 628 inline bool ParallelCompactData::RegionData::try_push() {
 629   return Atomic::cmpxchg(FINISH, &_shadow_state, UNUSED) == UNUSED;
 630 }
 631 
 632 inline bool ParallelCompactData::RegionData::try_steal() {
 633   return Atomic::cmpxchg(SHADOW, &_shadow_state, UNUSED) == UNUSED;
 634 }
 635 
 636 inline void ParallelCompactData::RegionData::mark_filled() {
 637   int old = Atomic::cmpxchg(FILLED, &_shadow_state, SHADOW);
 638   assert(old == SHADOW, "Fail to mark the region as filled");
 639 }
 640 
 641 inline bool ParallelCompactData::RegionData::try_copy() {
 642   return Atomic::cmpxchg(FINISH, &_shadow_state, FILLED) == FILLED;
 643 }
 644 
 645 void ParallelCompactData::RegionData::mark_normal() {
 646   _shadow_state = FINISH;
 647 }
 648 
 649 inline ParallelCompactData::RegionData*
 650 ParallelCompactData::region(size_t region_idx) const
 651 {
 652   assert(region_idx <= region_count(), "bad arg");
 653   return _region_data + region_idx;
 654 }
 655 
 656 inline size_t
 657 ParallelCompactData::region(const RegionData* const region_ptr) const
 658 {
 659   assert(region_ptr >= _region_data, "bad arg");
 660   assert(region_ptr <= _region_data + region_count(), "bad arg");
 661   return pointer_delta(region_ptr, _region_data, sizeof(RegionData));
 662 }
 663 
 664 inline ParallelCompactData::BlockData*
 665 ParallelCompactData::block(size_t n) const {
 666   assert(n < block_count(), "bad arg");
 667   return _block_data + n;
 668 }


1212                                   size_t src_region_idx);
1213 
1214   // Determine the next source region, set closure.source() to the start of the
1215   // new region return the region index.  Parameter end_addr is the address one
1216   // beyond the end of source range just processed.  If necessary, switch to a
1217   // new source space and set src_space_id (in-out parameter) and src_space_top
1218   // (out parameter) accordingly.
1219   static size_t next_src_region(MoveAndUpdateClosure& closure,
1220                                 SpaceId& src_space_id,
1221                                 HeapWord*& src_space_top,
1222                                 HeapWord* end_addr);
1223 
1224   // Decrement the destination count for each non-empty source region in the
1225   // range [beg_region, region(region_align_up(end_addr))).  If the destination
1226   // count for a region goes to 0 and it needs to be filled, enqueue it.
1227   static void decrement_destination_counts(ParCompactionManager* cm,
1228                                            SpaceId src_space_id,
1229                                            size_t beg_region,
1230                                            HeapWord* end_addr);
1231 
1232   static void fill_region(ParCompactionManager* cm, MoveAndUpdateClosure& closure, size_t region);
1233   static void fill_and_update_region(ParCompactionManager* cm, size_t region);
1234 
1235   static bool steal_shadow_region(ParCompactionManager* cm, size_t& region_idx);
1236   static void fill_shadow_region(ParCompactionManager* cm, size_t region_idx);
1237   static void fill_and_update_shadow_region(ParCompactionManager* cm, size_t region) {
1238     fill_shadow_region(cm, region);
1239   }
1240   // Copy the content of a shadow region back to its corresponding heap region
1241   static void copy_back(HeapWord* shadow_addr, HeapWord* region_addr);
1242   // Initialize the steal record of a GC thread
1243   static void initialize_steal_record(uint which);
1244   // Reuse the empty heap regions as shadow regions, like to-space regions
1245   static void enqueue_shadow_region();
1246 
1247   // Fill in the block table for the specified region.
1248   static void fill_blocks(size_t region_idx);
1249 
1250   // Update the deferred objects in the space.
1251   static void update_deferred_objects(ParCompactionManager* cm, SpaceId id);
1252 
1253   static ParMarkBitMap* mark_bitmap() { return &_mark_bitmap; }
1254   static ParallelCompactData& summary_data() { return _summary_data; }
1255 
1256   // Reference Processing
1257   static ReferenceProcessor* const ref_processor() { return _ref_processor; }
1258 
1259   static STWGCTimer* gc_timer() { return &_gc_timer; }
1260 
1261   // Return the SpaceId for the given address.
1262   static SpaceId space_id(HeapWord* addr);
1263 
1264   // Time since last full gc (in milliseconds).
1265   static jlong millis_since_last_gc();


1272   static void print_region_ranges();
1273   static void print_dense_prefix_stats(const char* const algorithm,
1274                                        const SpaceId id,
1275                                        const bool maximum_compaction,
1276                                        HeapWord* const addr);
1277   static void summary_phase_msg(SpaceId dst_space_id,
1278                                 HeapWord* dst_beg, HeapWord* dst_end,
1279                                 SpaceId src_space_id,
1280                                 HeapWord* src_beg, HeapWord* src_end);
1281 #endif  // #ifndef PRODUCT
1282 
1283 #ifdef  ASSERT
1284   // Sanity check the new location of a word in the heap.
1285   static inline void check_new_location(HeapWord* old_addr, HeapWord* new_addr);
1286   // Verify that all the regions have been emptied.
1287   static void verify_complete(SpaceId space_id);
1288 #endif  // #ifdef ASSERT
1289 };
1290 
1291 class MoveAndUpdateClosure: public ParMarkBitMapClosure {
1292   static inline size_t calculate_words_remaining(size_t region);
1293  public:
1294   inline MoveAndUpdateClosure(ParMarkBitMap* bitmap, ParCompactionManager* cm,
1295                               size_t region);

1296 
1297   // Accessors.
1298   HeapWord* destination() const         { return _destination; }
1299   HeapWord* copy_destination() const    { return _destination + _offset; }
1300 
1301   // If the object will fit (size <= words_remaining()), copy it to the current
1302   // destination, update the interior oops and the start array and return either
1303   // full (if the closure is full) or incomplete.  If the object will not fit,
1304   // return would_overflow.
1305   IterationStatus do_addr(HeapWord* addr, size_t size);
1306 
1307   // Copy enough words to fill this closure, starting at source().  Interior
1308   // oops and the start array are not updated.  Return full.
1309   IterationStatus copy_until_full();
1310 
1311   // Copy enough words to fill this closure or to the end of an object,
1312   // whichever is smaller, starting at source().  Interior oops and the start
1313   // array are not updated.
1314   void copy_partial_obj();
1315 
1316   virtual void complete_region(ParCompactionManager* cm, HeapWord* dest_addr,
1317                                PSParallelCompact::RegionData* region_ptr);
1318 
1319 protected:
1320   // Update variables to indicate that word_count words were processed.
1321   inline void update_state(size_t word_count);
1322 
1323  protected:

1324   HeapWord*               _destination;         // Next addr to be written.
1325   ObjectStartArray* const _start_array;
1326   size_t                  _offset;
1327 };
1328 
1329 inline size_t MoveAndUpdateClosure::calculate_words_remaining(size_t region) {
1330   HeapWord* dest_addr = PSParallelCompact::summary_data().region_to_addr(region);
1331   PSParallelCompact::SpaceId dest_space_id = PSParallelCompact::space_id(dest_addr);
1332   HeapWord* new_top = PSParallelCompact::new_top(dest_space_id);
1333   assert(dest_addr < new_top, "sanity");
1334 
1335   return MIN2(pointer_delta(new_top, dest_addr), ParallelCompactData::RegionSize);
1336 }
1337 
1338 inline
1339 MoveAndUpdateClosure::MoveAndUpdateClosure(ParMarkBitMap* bitmap,
1340                                            ParCompactionManager* cm,
1341                                            size_t region_idx) :
1342   ParMarkBitMapClosure(bitmap, cm, calculate_words_remaining(region_idx)),
1343   _destination(PSParallelCompact::summary_data().region_to_addr(region_idx)),
1344   _start_array(PSParallelCompact::start_array(PSParallelCompact::space_id(_destination))),
1345   _offset(0) { }
1346 

1347 
1348 inline void MoveAndUpdateClosure::update_state(size_t words)
1349 {
1350   decrement_words_remaining(words);
1351   _source += words;
1352   _destination += words;
1353 }
1354 
1355 class ShadowClosure: public MoveAndUpdateClosure {
1356   inline size_t calculate_shadow_offset(size_t region_idx, size_t shadow_idx);
1357 public:
1358   inline ShadowClosure(ParMarkBitMap* bitmap, ParCompactionManager* cm,
1359                        size_t region, size_t shadow);
1360 
1361   virtual void complete_region(ParCompactionManager* cm, HeapWord* dest_addr,
1362                                PSParallelCompact::RegionData* region_ptr);
1363 
1364 private:
1365   size_t _shadow;
1366 };
1367 
1368 inline size_t ShadowClosure::calculate_shadow_offset(size_t region_idx, size_t shadow_idx) {
1369   ParallelCompactData& sd = PSParallelCompact::summary_data();
1370   HeapWord* dest_addr = sd.region_to_addr(region_idx);
1371   HeapWord* shadow_addr = sd.region_to_addr(shadow_idx);
1372   return pointer_delta(shadow_addr, dest_addr);
1373 }
1374 
1375 inline
1376 ShadowClosure::ShadowClosure(ParMarkBitMap *bitmap,
1377                              ParCompactionManager *cm,
1378                              size_t region,
1379                              size_t shadow) :
1380   MoveAndUpdateClosure(bitmap, cm, region),
1381   _shadow(shadow) {
1382   _offset = calculate_shadow_offset(region, shadow);
1383 }
1384 
1385 class UpdateOnlyClosure: public ParMarkBitMapClosure {
1386  private:
1387   const PSParallelCompact::SpaceId _space_id;
1388   ObjectStartArray* const          _start_array;
1389 
1390  public:
1391   UpdateOnlyClosure(ParMarkBitMap* mbm,
1392                     ParCompactionManager* cm,
1393                     PSParallelCompact::SpaceId space_id);
1394 
1395   // Update the object.
1396   virtual IterationStatus do_addr(HeapWord* addr, size_t words);
1397 
1398   inline void do_addr(HeapWord* addr);
1399 };
1400 
1401 class FillClosure: public ParMarkBitMapClosure {
1402  public:
< prev index next >