< prev index next >

src/hotspot/share/gc/parallel/psParallelCompact.hpp

Print this page




 222   static const size_t Log2BlockSize;
 223   static const size_t BlockSize;
 224   static const size_t BlockSizeBytes;
 225 
 226   static const size_t BlockSizeOffsetMask;
 227   static const size_t BlockAddrOffsetMask;
 228   static const size_t BlockAddrMask;
 229 
 230   static const size_t BlocksPerRegion;
 231   static const size_t Log2BlocksPerRegion;
 232 
 233   class RegionData
 234   {
 235   public:
 236     // Destination address of the region.
 237     HeapWord* destination() const { return _destination; }
 238 
 239     // The first region containing data destined for this region.
 240     size_t source_region() const { return _source_region; }
 241 



 242     // The object (if any) starting in this region and ending in a different
 243     // region that could not be updated during the main (parallel) compaction
 244     // phase.  This is different from _partial_obj_addr, which is an object that
 245     // extends onto a source region.  However, the two uses do not overlap in
 246     // time, so the same field is used to save space.
 247     HeapWord* deferred_obj_addr() const { return _partial_obj_addr; }
 248 
 249     // The starting address of the partial object extending onto the region.
 250     HeapWord* partial_obj_addr() const { return _partial_obj_addr; }
 251 
 252     // Size of the partial object extending onto the region (words).
 253     size_t partial_obj_size() const { return _partial_obj_size; }
 254 
 255     // Size of live data that lies within this region due to objects that start
 256     // in this region (words).  This does not include the partial object
 257     // extending onto the region (if any), or the part of an object that extends
 258     // onto the next region (if any).
 259     size_t live_obj_size() const { return _dc_and_los & los_mask; }
 260 
 261     // Total live data that lies within the region (words).


 290 
 291     // The location of the java heap data that corresponds to this region.
 292     inline HeapWord* data_location() const;
 293 
 294     // The highest address referenced by objects in this region.
 295     inline HeapWord* highest_ref() const;
 296 
 297     // Whether this region is available to be claimed, has been claimed, or has
 298     // been completed.
 299     //
 300     // Minor subtlety:  claimed() returns true if the region is marked
 301     // completed(), which is desirable since a region must be claimed before it
 302     // can be completed.
 303     bool available() const { return _dc_and_los < dc_one; }
 304     bool claimed()   const { return _dc_and_los >= dc_claimed; }
 305     bool completed() const { return _dc_and_los >= dc_completed; }
 306 
 307     // These are not atomic.
 308     void set_destination(HeapWord* addr)       { _destination = addr; }
 309     void set_source_region(size_t region)      { _source_region = region; }

 310     void set_deferred_obj_addr(HeapWord* addr) { _partial_obj_addr = addr; }
 311     void set_partial_obj_addr(HeapWord* addr)  { _partial_obj_addr = addr; }
 312     void set_partial_obj_size(size_t words)    {
 313       _partial_obj_size = (region_sz_t) words;
 314     }
 315     inline void set_blocks_filled();
 316 
 317     inline void set_destination_count(uint count);
 318     inline void set_live_obj_size(size_t words);
 319     inline void set_data_location(HeapWord* addr);
 320     inline void set_completed();
 321     inline bool claim_unsafe();
 322 
 323     // These are atomic.
 324     inline void add_live_obj(size_t words);
 325     inline void set_highest_ref(HeapWord* addr);
 326     inline void decrement_destination_count();
 327     inline bool claim();
 328 
























 329   private:
 330     // The type used to represent object sizes within a region.
 331     typedef uint region_sz_t;
 332 
 333     // Constants for manipulating the _dc_and_los field, which holds both the
 334     // destination count and live obj size.  The live obj size lives at the
 335     // least significant end so no masking is necessary when adding.
 336     static const region_sz_t dc_shift;           // Shift amount.
 337     static const region_sz_t dc_mask;            // Mask for destination count.
 338     static const region_sz_t dc_one;             // 1, shifted appropriately.
 339     static const region_sz_t dc_claimed;         // Region has been claimed.
 340     static const region_sz_t dc_completed;       // Region has been completed.
 341     static const region_sz_t los_mask;           // Mask for live obj size.
 342 
 343     HeapWord*            _destination;
 344     size_t               _source_region;
 345     HeapWord*            _partial_obj_addr;
 346     region_sz_t          _partial_obj_size;
 347     region_sz_t volatile _dc_and_los;
 348     bool        volatile _blocks_filled;

 349 
 350 #ifdef ASSERT
 351     size_t               _blocks_filled_count;   // Number of block table fills.
 352 
 353     // These enable optimizations that are only partially implemented.  Use
 354     // debug builds to prevent the code fragments from breaking.
 355     HeapWord*            _data_location;
 356     HeapWord*            _highest_ref;
 357 #endif  // #ifdef ASSERT
 358 
 359 #ifdef ASSERT
 360    public:
 361     uint                 _pushed;   // 0 until region is pushed onto a stack
 362    private:
 363 #endif
 364   };
 365 
 366   // "Blocks" allow shorter sections of the bitmap to be searched.  Each Block
 367   // holds an offset, which is the amount of live data in the Region to the left
 368   // of the first live object that starts in the Block.


 579   Atomic::add(static_cast<region_sz_t>(words), &_dc_and_los);
 580 }
 581 
 582 inline void ParallelCompactData::RegionData::set_highest_ref(HeapWord* addr)
 583 {
 584 #ifdef ASSERT
 585   HeapWord* tmp = _highest_ref;
 586   while (addr > tmp) {
 587     tmp = Atomic::cmpxchg(addr, &_highest_ref, tmp);
 588   }
 589 #endif  // #ifdef ASSERT
 590 }
 591 
 592 inline bool ParallelCompactData::RegionData::claim()
 593 {
 594   const region_sz_t los = static_cast<region_sz_t>(live_obj_size());
 595   const region_sz_t old = Atomic::cmpxchg(dc_claimed | los, &_dc_and_los, los);
 596   return old == los;
 597 }
 598 






















 599 inline ParallelCompactData::RegionData*
 600 ParallelCompactData::region(size_t region_idx) const
 601 {
 602   assert(region_idx <= region_count(), "bad arg");
 603   return _region_data + region_idx;
 604 }
 605 
 606 inline size_t
 607 ParallelCompactData::region(const RegionData* const region_ptr) const
 608 {
 609   assert(region_ptr >= _region_data, "bad arg");
 610   assert(region_ptr <= _region_data + region_count(), "bad arg");
 611   return pointer_delta(region_ptr, _region_data, sizeof(RegionData));
 612 }
 613 
 614 inline ParallelCompactData::BlockData*
 615 ParallelCompactData::block(size_t n) const {
 616   assert(n < block_count(), "bad arg");
 617   return _block_data + n;
 618 }


1162                                   size_t src_region_idx);
1163 
1164   // Determine the next source region, set closure.source() to the start of the
1165   // new region return the region index.  Parameter end_addr is the address one
1166   // beyond the end of source range just processed.  If necessary, switch to a
1167   // new source space and set src_space_id (in-out parameter) and src_space_top
1168   // (out parameter) accordingly.
1169   static size_t next_src_region(MoveAndUpdateClosure& closure,
1170                                 SpaceId& src_space_id,
1171                                 HeapWord*& src_space_top,
1172                                 HeapWord* end_addr);
1173 
1174   // Decrement the destination count for each non-empty source region in the
1175   // range [beg_region, region(region_align_up(end_addr))).  If the destination
1176   // count for a region goes to 0 and it needs to be filled, enqueue it.
1177   static void decrement_destination_counts(ParCompactionManager* cm,
1178                                            SpaceId src_space_id,
1179                                            size_t beg_region,
1180                                            HeapWord* end_addr);
1181 
1182   // Fill a region, copying objects from one or more source regions.
1183   static void fill_region(ParCompactionManager* cm, size_t region_idx);
1184   static void fill_and_update_region(ParCompactionManager* cm, size_t region) {
1185     fill_region(cm, region);



1186   }






1187 
1188   // Fill in the block table for the specified region.
1189   static void fill_blocks(size_t region_idx);
1190 
1191   // Update the deferred objects in the space.
1192   static void update_deferred_objects(ParCompactionManager* cm, SpaceId id);
1193 
1194   static ParMarkBitMap* mark_bitmap() { return &_mark_bitmap; }
1195   static ParallelCompactData& summary_data() { return _summary_data; }
1196 
1197   // Reference Processing
1198   static ReferenceProcessor* const ref_processor() { return _ref_processor; }
1199 
1200   static STWGCTimer* gc_timer() { return &_gc_timer; }
1201 
1202   // Return the SpaceId for the given address.
1203   static SpaceId space_id(HeapWord* addr);
1204 
1205   // Time since last full gc (in milliseconds).
1206   static jlong millis_since_last_gc();


1213   static void print_region_ranges();
1214   static void print_dense_prefix_stats(const char* const algorithm,
1215                                        const SpaceId id,
1216                                        const bool maximum_compaction,
1217                                        HeapWord* const addr);
1218   static void summary_phase_msg(SpaceId dst_space_id,
1219                                 HeapWord* dst_beg, HeapWord* dst_end,
1220                                 SpaceId src_space_id,
1221                                 HeapWord* src_beg, HeapWord* src_end);
1222 #endif  // #ifndef PRODUCT
1223 
1224 #ifdef  ASSERT
1225   // Sanity check the new location of a word in the heap.
1226   static inline void check_new_location(HeapWord* old_addr, HeapWord* new_addr);
1227   // Verify that all the regions have been emptied.
1228   static void verify_complete(SpaceId space_id);
1229 #endif  // #ifdef ASSERT
1230 };
1231 
1232 class MoveAndUpdateClosure: public ParMarkBitMapClosure {

1233  public:
1234   inline MoveAndUpdateClosure(ParMarkBitMap* bitmap, ParCompactionManager* cm,
1235                               ObjectStartArray* start_array,
1236                               HeapWord* destination, size_t words);
1237 
1238   // Accessors.
1239   HeapWord* destination() const         { return _destination; }

1240 
1241   // If the object will fit (size <= words_remaining()), copy it to the current
1242   // destination, update the interior oops and the start array and return either
1243   // full (if the closure is full) or incomplete.  If the object will not fit,
1244   // return would_overflow.
1245   virtual IterationStatus do_addr(HeapWord* addr, size_t size);
1246 
1247   // Copy enough words to fill this closure, starting at source().  Interior
1248   // oops and the start array are not updated.  Return full.
1249   IterationStatus copy_until_full();
1250 
1251   // Copy enough words to fill this closure or to the end of an object,
1252   // whichever is smaller, starting at source().  Interior oops and the start
1253   // array are not updated.
1254   void copy_partial_obj();
1255 
1256  protected:



1257   // Update variables to indicate that word_count words were processed.
1258   inline void update_state(size_t word_count);
1259 
1260  protected:
1261   ObjectStartArray* const _start_array;
1262   HeapWord*               _destination;         // Next addr to be written.


1263 };
1264 









1265 inline
1266 MoveAndUpdateClosure::MoveAndUpdateClosure(ParMarkBitMap* bitmap,
1267                                            ParCompactionManager* cm,
1268                                            ObjectStartArray* start_array,
1269                                            HeapWord* destination,
1270                                            size_t words) :
1271   ParMarkBitMapClosure(bitmap, cm, words), _start_array(start_array)
1272 {
1273   _destination = destination;
1274 }
1275 
1276 inline void MoveAndUpdateClosure::update_state(size_t words)
1277 {
1278   decrement_words_remaining(words);
1279   _source += words;
1280   _destination += words;






























1281 }
1282 
1283 class UpdateOnlyClosure: public ParMarkBitMapClosure {
1284  private:
1285   const PSParallelCompact::SpaceId _space_id;
1286   ObjectStartArray* const          _start_array;
1287 
1288  public:
1289   UpdateOnlyClosure(ParMarkBitMap* mbm,
1290                     ParCompactionManager* cm,
1291                     PSParallelCompact::SpaceId space_id);
1292 
1293   // Update the object.
1294   virtual IterationStatus do_addr(HeapWord* addr, size_t words);
1295 
1296   inline void do_addr(HeapWord* addr);
1297 };
1298 
1299 class FillClosure: public ParMarkBitMapClosure {
1300  public:


 222   static const size_t Log2BlockSize;
 223   static const size_t BlockSize;
 224   static const size_t BlockSizeBytes;
 225 
 226   static const size_t BlockSizeOffsetMask;
 227   static const size_t BlockAddrOffsetMask;
 228   static const size_t BlockAddrMask;
 229 
 230   static const size_t BlocksPerRegion;
 231   static const size_t Log2BlocksPerRegion;
 232 
 233   class RegionData
 234   {
 235   public:
 236     // Destination address of the region.
 237     HeapWord* destination() const { return _destination; }
 238 
 239     // The first region containing data destined for this region.
 240     size_t source_region() const { return _source_region; }
 241 
 242     // Reuse _source_region to store the corresponding shadow region index
 243     size_t shadow_region() const { return _source_region; }
 244 
 245     // The object (if any) starting in this region and ending in a different
 246     // region that could not be updated during the main (parallel) compaction
 247     // phase.  This is different from _partial_obj_addr, which is an object that
 248     // extends onto a source region.  However, the two uses do not overlap in
 249     // time, so the same field is used to save space.
 250     HeapWord* deferred_obj_addr() const { return _partial_obj_addr; }
 251 
 252     // The starting address of the partial object extending onto the region.
 253     HeapWord* partial_obj_addr() const { return _partial_obj_addr; }
 254 
 255     // Size of the partial object extending onto the region (words).
 256     size_t partial_obj_size() const { return _partial_obj_size; }
 257 
 258     // Size of live data that lies within this region due to objects that start
 259     // in this region (words).  This does not include the partial object
 260     // extending onto the region (if any), or the part of an object that extends
 261     // onto the next region (if any).
 262     size_t live_obj_size() const { return _dc_and_los & los_mask; }
 263 
 264     // Total live data that lies within the region (words).


 293 
 294     // The location of the java heap data that corresponds to this region.
 295     inline HeapWord* data_location() const;
 296 
 297     // The highest address referenced by objects in this region.
 298     inline HeapWord* highest_ref() const;
 299 
 300     // Whether this region is available to be claimed, has been claimed, or has
 301     // been completed.
 302     //
 303     // Minor subtlety:  claimed() returns true if the region is marked
 304     // completed(), which is desirable since a region must be claimed before it
 305     // can be completed.
 306     bool available() const { return _dc_and_los < dc_one; }
 307     bool claimed()   const { return _dc_and_los >= dc_claimed; }
 308     bool completed() const { return _dc_and_los >= dc_completed; }
 309 
 310     // These are not atomic.
 311     void set_destination(HeapWord* addr)       { _destination = addr; }
 312     void set_source_region(size_t region)      { _source_region = region; }
 313     void set_shadow_region(size_t region)      { _source_region = region; }
 314     void set_deferred_obj_addr(HeapWord* addr) { _partial_obj_addr = addr; }
 315     void set_partial_obj_addr(HeapWord* addr)  { _partial_obj_addr = addr; }
 316     void set_partial_obj_size(size_t words)    {
 317       _partial_obj_size = (region_sz_t) words;
 318     }
 319     inline void set_blocks_filled();
 320 
 321     inline void set_destination_count(uint count);
 322     inline void set_live_obj_size(size_t words);
 323     inline void set_data_location(HeapWord* addr);
 324     inline void set_completed();
 325     inline bool claim_unsafe();
 326 
 327     // These are atomic.
 328     inline void add_live_obj(size_t words);
 329     inline void set_highest_ref(HeapWord* addr);
 330     inline void decrement_destination_count();
 331     inline bool claim();
 332 
 333     // Possible values of _shadow_state, and transition is as follows
 334     // Normal Path:
 335     // UNUSED -> try_push() -> FINISHED
 336     // Steal  Path:
 337     // UNUSED -> try_steal() -> SHADOW -> mark_filled() -> FILLED -> try_copy() -> FINISHED
 338     static const int UNUSED;                     // Original state
 339     static const int SHADOW;                     // Stolen by an idle thread, and a shadow region is created for it
 340     static const int FILLED;                     // Its shadow region has been filled and ready to be copied back
 341     static const int FINISH;                     // Work has been done
 342 
 343     // Preempt the region to avoid double processes
 344     inline bool try_push();
 345     inline bool try_steal();
 346     // Mark the region as filled and ready to be copied back
 347     inline void mark_filled();
 348     // Preempt the region to copy the shadow region content back
 349     inline bool try_copy();
 350     // Special case: see the comment in PSParallelCompact::fill_shadow_region.
 351     // Return to the normal path here
 352     inline void mark_normal();
 353 
 354 
 355     int shadow_state() { return _shadow_state; }
 356 
 357   private:
 358     // The type used to represent object sizes within a region.
 359     typedef uint region_sz_t;
 360 
 361     // Constants for manipulating the _dc_and_los field, which holds both the
 362     // destination count and live obj size.  The live obj size lives at the
 363     // least significant end so no masking is necessary when adding.
 364     static const region_sz_t dc_shift;           // Shift amount.
 365     static const region_sz_t dc_mask;            // Mask for destination count.
 366     static const region_sz_t dc_one;             // 1, shifted appropriately.
 367     static const region_sz_t dc_claimed;         // Region has been claimed.
 368     static const region_sz_t dc_completed;       // Region has been completed.
 369     static const region_sz_t los_mask;           // Mask for live obj size.
 370 
 371     HeapWord*            _destination;
 372     size_t               _source_region;
 373     HeapWord*            _partial_obj_addr;
 374     region_sz_t          _partial_obj_size;
 375     region_sz_t volatile _dc_and_los;
 376     bool        volatile _blocks_filled;
 377     int         volatile _shadow_state;
 378 
 379 #ifdef ASSERT
 380     size_t               _blocks_filled_count;   // Number of block table fills.
 381 
 382     // These enable optimizations that are only partially implemented.  Use
 383     // debug builds to prevent the code fragments from breaking.
 384     HeapWord*            _data_location;
 385     HeapWord*            _highest_ref;
 386 #endif  // #ifdef ASSERT
 387 
 388 #ifdef ASSERT
 389    public:
 390     uint                 _pushed;   // 0 until region is pushed onto a stack
 391    private:
 392 #endif
 393   };
 394 
 395   // "Blocks" allow shorter sections of the bitmap to be searched.  Each Block
 396   // holds an offset, which is the amount of live data in the Region to the left
 397   // of the first live object that starts in the Block.


 608   Atomic::add(static_cast<region_sz_t>(words), &_dc_and_los);
 609 }
 610 
 611 inline void ParallelCompactData::RegionData::set_highest_ref(HeapWord* addr)
 612 {
 613 #ifdef ASSERT
 614   HeapWord* tmp = _highest_ref;
 615   while (addr > tmp) {
 616     tmp = Atomic::cmpxchg(addr, &_highest_ref, tmp);
 617   }
 618 #endif  // #ifdef ASSERT
 619 }
 620 
 621 inline bool ParallelCompactData::RegionData::claim()
 622 {
 623   const region_sz_t los = static_cast<region_sz_t>(live_obj_size());
 624   const region_sz_t old = Atomic::cmpxchg(dc_claimed | los, &_dc_and_los, los);
 625   return old == los;
 626 }
 627 
 628 inline bool ParallelCompactData::RegionData::try_push() {
 629   return Atomic::cmpxchg(FINISH, &_shadow_state, UNUSED) == UNUSED;
 630 }
 631 
 632 inline bool ParallelCompactData::RegionData::try_steal() {
 633   return Atomic::cmpxchg(SHADOW, &_shadow_state, UNUSED) == UNUSED;
 634 }
 635 
 636 inline void ParallelCompactData::RegionData::mark_filled() {
 637   int old = Atomic::cmpxchg(FILLED, &_shadow_state, SHADOW);
 638   assert(old == SHADOW, "Fail to mark the region as filled");
 639 }
 640 
 641 inline bool ParallelCompactData::RegionData::try_copy() {
 642   return Atomic::cmpxchg(FINISH, &_shadow_state, FILLED) == FILLED;
 643 }
 644 
 645 void ParallelCompactData::RegionData::mark_normal() {
 646   int old = Atomic::cmpxchg(FINISH, &_shadow_state, SHADOW);
 647   assert(old == SHADOW, "Fail to mark the region as finish");
 648 }
 649 
 650 inline ParallelCompactData::RegionData*
 651 ParallelCompactData::region(size_t region_idx) const
 652 {
 653   assert(region_idx <= region_count(), "bad arg");
 654   return _region_data + region_idx;
 655 }
 656 
 657 inline size_t
 658 ParallelCompactData::region(const RegionData* const region_ptr) const
 659 {
 660   assert(region_ptr >= _region_data, "bad arg");
 661   assert(region_ptr <= _region_data + region_count(), "bad arg");
 662   return pointer_delta(region_ptr, _region_data, sizeof(RegionData));
 663 }
 664 
 665 inline ParallelCompactData::BlockData*
 666 ParallelCompactData::block(size_t n) const {
 667   assert(n < block_count(), "bad arg");
 668   return _block_data + n;
 669 }


1213                                   size_t src_region_idx);
1214 
1215   // Determine the next source region, set closure.source() to the start of the
1216   // new region return the region index.  Parameter end_addr is the address one
1217   // beyond the end of source range just processed.  If necessary, switch to a
1218   // new source space and set src_space_id (in-out parameter) and src_space_top
1219   // (out parameter) accordingly.
1220   static size_t next_src_region(MoveAndUpdateClosure& closure,
1221                                 SpaceId& src_space_id,
1222                                 HeapWord*& src_space_top,
1223                                 HeapWord* end_addr);
1224 
1225   // Decrement the destination count for each non-empty source region in the
1226   // range [beg_region, region(region_align_up(end_addr))).  If the destination
1227   // count for a region goes to 0 and it needs to be filled, enqueue it.
1228   static void decrement_destination_counts(ParCompactionManager* cm,
1229                                            SpaceId src_space_id,
1230                                            size_t beg_region,
1231                                            HeapWord* end_addr);
1232 
1233   static void fill_region(ParCompactionManager* cm, MoveAndUpdateClosure& closure, size_t region);
1234   static void fill_and_update_region(ParCompactionManager* cm, size_t region);
1235 
1236   static bool steal_shadow_region(ParCompactionManager* cm, size_t& region_idx);
1237   static void fill_shadow_region(ParCompactionManager* cm, size_t region_idx);
1238   static void fill_and_update_shadow_region(ParCompactionManager* cm, size_t region) {
1239     fill_shadow_region(cm, region);
1240   }
1241   // Copy the content of a shadow region back to its corresponding heap region
1242   static void copy_back(HeapWord* shadow_addr, HeapWord* region_addr);
1243   // Initialize the steal record of a GC thread
1244   static void initialize_steal_record(uint which);
1245   // Reuse the empty heap regions as shadow regions, like to-space regions
1246   static void enqueue_shadow_region();
1247 
1248   // Fill in the block table for the specified region.
1249   static void fill_blocks(size_t region_idx);
1250 
1251   // Update the deferred objects in the space.
1252   static void update_deferred_objects(ParCompactionManager* cm, SpaceId id);
1253 
1254   static ParMarkBitMap* mark_bitmap() { return &_mark_bitmap; }
1255   static ParallelCompactData& summary_data() { return _summary_data; }
1256 
1257   // Reference Processing
1258   static ReferenceProcessor* const ref_processor() { return _ref_processor; }
1259 
1260   static STWGCTimer* gc_timer() { return &_gc_timer; }
1261 
1262   // Return the SpaceId for the given address.
1263   static SpaceId space_id(HeapWord* addr);
1264 
1265   // Time since last full gc (in milliseconds).
1266   static jlong millis_since_last_gc();


1273   static void print_region_ranges();
1274   static void print_dense_prefix_stats(const char* const algorithm,
1275                                        const SpaceId id,
1276                                        const bool maximum_compaction,
1277                                        HeapWord* const addr);
1278   static void summary_phase_msg(SpaceId dst_space_id,
1279                                 HeapWord* dst_beg, HeapWord* dst_end,
1280                                 SpaceId src_space_id,
1281                                 HeapWord* src_beg, HeapWord* src_end);
1282 #endif  // #ifndef PRODUCT
1283 
1284 #ifdef  ASSERT
1285   // Sanity check the new location of a word in the heap.
1286   static inline void check_new_location(HeapWord* old_addr, HeapWord* new_addr);
1287   // Verify that all the regions have been emptied.
1288   static void verify_complete(SpaceId space_id);
1289 #endif  // #ifdef ASSERT
1290 };
1291 
1292 class MoveAndUpdateClosure: public ParMarkBitMapClosure {
1293   static inline size_t calculate_words_remaining(size_t region);
1294  public:
1295   inline MoveAndUpdateClosure(ParMarkBitMap* bitmap, ParCompactionManager* cm,
1296                               size_t region);

1297 
1298   // Accessors.
1299   HeapWord* destination() const         { return _destination; }
1300   HeapWord* copy_destination() const    { return _destination + _offset; }
1301 
1302   // If the object will fit (size <= words_remaining()), copy it to the current
1303   // destination, update the interior oops and the start array and return either
1304   // full (if the closure is full) or incomplete.  If the object will not fit,
1305   // return would_overflow.
1306   IterationStatus do_addr(HeapWord* addr, size_t size);
1307 
1308   // Copy enough words to fill this closure, starting at source().  Interior
1309   // oops and the start array are not updated.  Return full.
1310   IterationStatus copy_until_full();
1311 
1312   // Copy enough words to fill this closure or to the end of an object,
1313   // whichever is smaller, starting at source().  Interior oops and the start
1314   // array are not updated.
1315   void copy_partial_obj();
1316 
1317   virtual void complete_region(ParCompactionManager* cm, HeapWord* dest_addr,
1318                                PSParallelCompact::RegionData* region_ptr);
1319 
1320 protected:
1321   // Update variables to indicate that word_count words were processed.
1322   inline void update_state(size_t word_count);
1323 
1324  protected:

1325   HeapWord*               _destination;         // Next addr to be written.
1326   ObjectStartArray* const _start_array;
1327   size_t                  _offset;
1328 };
1329 
1330 inline size_t MoveAndUpdateClosure::calculate_words_remaining(size_t region) {
1331   HeapWord* dest_addr = PSParallelCompact::summary_data().region_to_addr(region);
1332   PSParallelCompact::SpaceId dest_space_id = PSParallelCompact::space_id(dest_addr);
1333   HeapWord* new_top = PSParallelCompact::new_top(dest_space_id);
1334   assert(dest_addr < new_top, "sanity");
1335 
1336   return MIN2(pointer_delta(new_top, dest_addr), ParallelCompactData::RegionSize);
1337 }
1338 
1339 inline
1340 MoveAndUpdateClosure::MoveAndUpdateClosure(ParMarkBitMap* bitmap,
1341                                            ParCompactionManager* cm,
1342                                            size_t region_idx) :
1343   ParMarkBitMapClosure(bitmap, cm, calculate_words_remaining(region_idx)),
1344   _destination(PSParallelCompact::summary_data().region_to_addr(region_idx)),
1345   _start_array(PSParallelCompact::start_array(PSParallelCompact::space_id(_destination))),
1346   _offset(0) { }
1347 

1348 
1349 inline void MoveAndUpdateClosure::update_state(size_t words)
1350 {
1351   decrement_words_remaining(words);
1352   _source += words;
1353   _destination += words;
1354 }
1355 
1356 class ShadowClosure: public MoveAndUpdateClosure {
1357   inline size_t calculate_shadow_offset(size_t region_idx, size_t shadow_idx);
1358 public:
1359   inline ShadowClosure(ParMarkBitMap* bitmap, ParCompactionManager* cm,
1360                        size_t region, size_t shadow);
1361 
1362   virtual void complete_region(ParCompactionManager* cm, HeapWord* dest_addr,
1363                                PSParallelCompact::RegionData* region_ptr);
1364 
1365 private:
1366   size_t _shadow;
1367 };
1368 
1369 inline size_t ShadowClosure::calculate_shadow_offset(size_t region_idx, size_t shadow_idx) {
1370   ParallelCompactData& sd = PSParallelCompact::summary_data();
1371   HeapWord* dest_addr = sd.region_to_addr(region_idx);
1372   HeapWord* shadow_addr = sd.region_to_addr(shadow_idx);
1373   return pointer_delta(shadow_addr, dest_addr);
1374 }
1375 
1376 inline
1377 ShadowClosure::ShadowClosure(ParMarkBitMap *bitmap,
1378                              ParCompactionManager *cm,
1379                              size_t region,
1380                              size_t shadow) :
1381   MoveAndUpdateClosure(bitmap, cm, region),
1382   _shadow(shadow) {
1383   _offset = calculate_shadow_offset(region, shadow);
1384 }
1385 
1386 class UpdateOnlyClosure: public ParMarkBitMapClosure {
1387  private:
1388   const PSParallelCompact::SpaceId _space_id;
1389   ObjectStartArray* const          _start_array;
1390 
1391  public:
1392   UpdateOnlyClosure(ParMarkBitMap* mbm,
1393                     ParCompactionManager* cm,
1394                     PSParallelCompact::SpaceId space_id);
1395 
1396   // Update the object.
1397   virtual IterationStatus do_addr(HeapWord* addr, size_t words);
1398 
1399   inline void do_addr(HeapWord* addr);
1400 };
1401 
1402 class FillClosure: public ParMarkBitMapClosure {
1403  public:
< prev index next >