224 static const size_t Log2BlockSize;
225 static const size_t BlockSize;
226 static const size_t BlockSizeBytes;
227
228 static const size_t BlockSizeOffsetMask;
229 static const size_t BlockAddrOffsetMask;
230 static const size_t BlockAddrMask;
231
232 static const size_t BlocksPerRegion;
233 static const size_t Log2BlocksPerRegion;
234
235 class RegionData
236 {
237 public:
238 // Destination address of the region.
239 HeapWord* destination() const { return _destination; }
240
241 // The first region containing data destined for this region.
242 size_t source_region() const { return _source_region; }
243
244 // The object (if any) starting in this region and ending in a different
245 // region that could not be updated during the main (parallel) compaction
246 // phase. This is different from _partial_obj_addr, which is an object that
247 // extends onto a source region. However, the two uses do not overlap in
248 // time, so the same field is used to save space.
249 HeapWord* deferred_obj_addr() const { return _partial_obj_addr; }
250
251 // The starting address of the partial object extending onto the region.
252 HeapWord* partial_obj_addr() const { return _partial_obj_addr; }
253
254 // Size of the partial object extending onto the region (words).
255 size_t partial_obj_size() const { return _partial_obj_size; }
256
257 // Size of live data that lies within this region due to objects that start
258 // in this region (words). This does not include the partial object
259 // extending onto the region (if any), or the part of an object that extends
260 // onto the next region (if any).
261 size_t live_obj_size() const { return _dc_and_los & los_mask; }
262
263 // Total live data that lies within the region (words).
292
293 // The location of the java heap data that corresponds to this region.
294 inline HeapWord* data_location() const;
295
296 // The highest address referenced by objects in this region.
297 inline HeapWord* highest_ref() const;
298
299 // Whether this region is available to be claimed, has been claimed, or has
300 // been completed.
301 //
302 // Minor subtlety: claimed() returns true if the region is marked
303 // completed(), which is desirable since a region must be claimed before it
304 // can be completed.
305 bool available() const { return _dc_and_los < dc_one; }
306 bool claimed() const { return _dc_and_los >= dc_claimed; }
307 bool completed() const { return _dc_and_los >= dc_completed; }
308
309 // These are not atomic.
310 void set_destination(HeapWord* addr) { _destination = addr; }
311 void set_source_region(size_t region) { _source_region = region; }
312 void set_deferred_obj_addr(HeapWord* addr) { _partial_obj_addr = addr; }
313 void set_partial_obj_addr(HeapWord* addr) { _partial_obj_addr = addr; }
314 void set_partial_obj_size(size_t words) {
315 _partial_obj_size = (region_sz_t) words;
316 }
317 inline void set_blocks_filled();
318
319 inline void set_destination_count(uint count);
320 inline void set_live_obj_size(size_t words);
321 inline void set_data_location(HeapWord* addr);
322 inline void set_completed();
323 inline bool claim_unsafe();
324
325 // These are atomic.
326 inline void add_live_obj(size_t words);
327 inline void set_highest_ref(HeapWord* addr);
328 inline void decrement_destination_count();
329 inline bool claim();
330
331 private:
332 // The type used to represent object sizes within a region.
333 typedef uint region_sz_t;
334
335 // Constants for manipulating the _dc_and_los field, which holds both the
336 // destination count and live obj size. The live obj size lives at the
337 // least significant end so no masking is necessary when adding.
338 static const region_sz_t dc_shift; // Shift amount.
339 static const region_sz_t dc_mask; // Mask for destination count.
340 static const region_sz_t dc_one; // 1, shifted appropriately.
341 static const region_sz_t dc_claimed; // Region has been claimed.
342 static const region_sz_t dc_completed; // Region has been completed.
343 static const region_sz_t los_mask; // Mask for live obj size.
344
345 HeapWord* _destination;
346 size_t _source_region;
347 HeapWord* _partial_obj_addr;
348 region_sz_t _partial_obj_size;
349 region_sz_t volatile _dc_and_los;
350 bool volatile _blocks_filled;
351
352 #ifdef ASSERT
353 size_t _blocks_filled_count; // Number of block table fills.
354
355 // These enable optimizations that are only partially implemented. Use
356 // debug builds to prevent the code fragments from breaking.
357 HeapWord* _data_location;
358 HeapWord* _highest_ref;
359 #endif // #ifdef ASSERT
360
361 #ifdef ASSERT
362 public:
363 uint _pushed; // 0 until region is pushed onto a stack
364 private:
365 #endif
366 };
367
368 // "Blocks" allow shorter sections of the bitmap to be searched. Each Block
369 // holds an offset, which is the amount of live data in the Region to the left
370 // of the first live object that starts in the Block.
581 Atomic::add(&_dc_and_los, static_cast<region_sz_t>(words));
582 }
583
584 inline void ParallelCompactData::RegionData::set_highest_ref(HeapWord* addr)
585 {
586 #ifdef ASSERT
587 HeapWord* tmp = _highest_ref;
588 while (addr > tmp) {
589 tmp = Atomic::cmpxchg(&_highest_ref, tmp, addr);
590 }
591 #endif // #ifdef ASSERT
592 }
593
594 inline bool ParallelCompactData::RegionData::claim()
595 {
596 const region_sz_t los = static_cast<region_sz_t>(live_obj_size());
597 const region_sz_t old = Atomic::cmpxchg(&_dc_and_los, los, dc_claimed | los);
598 return old == los;
599 }
600
601 inline ParallelCompactData::RegionData*
602 ParallelCompactData::region(size_t region_idx) const
603 {
604 assert(region_idx <= region_count(), "bad arg");
605 return _region_data + region_idx;
606 }
607
608 inline size_t
609 ParallelCompactData::region(const RegionData* const region_ptr) const
610 {
611 assert(region_ptr >= _region_data, "bad arg");
612 assert(region_ptr <= _region_data + region_count(), "bad arg");
613 return pointer_delta(region_ptr, _region_data, sizeof(RegionData));
614 }
615
616 inline ParallelCompactData::BlockData*
617 ParallelCompactData::block(size_t n) const {
618 assert(n < block_count(), "bad arg");
619 return _block_data + n;
620 }
1164 size_t src_region_idx);
1165
1166 // Determine the next source region, set closure.source() to the start of the
1167 // new region return the region index. Parameter end_addr is the address one
1168 // beyond the end of source range just processed. If necessary, switch to a
1169 // new source space and set src_space_id (in-out parameter) and src_space_top
1170 // (out parameter) accordingly.
1171 static size_t next_src_region(MoveAndUpdateClosure& closure,
1172 SpaceId& src_space_id,
1173 HeapWord*& src_space_top,
1174 HeapWord* end_addr);
1175
1176 // Decrement the destination count for each non-empty source region in the
1177 // range [beg_region, region(region_align_up(end_addr))). If the destination
1178 // count for a region goes to 0 and it needs to be filled, enqueue it.
1179 static void decrement_destination_counts(ParCompactionManager* cm,
1180 SpaceId src_space_id,
1181 size_t beg_region,
1182 HeapWord* end_addr);
1183
1184 // Fill a region, copying objects from one or more source regions.
1185 static void fill_region(ParCompactionManager* cm, size_t region_idx);
1186 static void fill_and_update_region(ParCompactionManager* cm, size_t region) {
1187 fill_region(cm, region);
1188 }
1189
1190 // Fill in the block table for the specified region.
1191 static void fill_blocks(size_t region_idx);
1192
1193 // Update the deferred objects in the space.
1194 static void update_deferred_objects(ParCompactionManager* cm, SpaceId id);
1195
1196 static ParMarkBitMap* mark_bitmap() { return &_mark_bitmap; }
1197 static ParallelCompactData& summary_data() { return _summary_data; }
1198
1199 // Reference Processing
1200 static ReferenceProcessor* const ref_processor() { return _ref_processor; }
1201
1202 static STWGCTimer* gc_timer() { return &_gc_timer; }
1203
1204 // Return the SpaceId for the given address.
1205 static SpaceId space_id(HeapWord* addr);
1206
1207 // Time since last full gc (in milliseconds).
1208 static jlong millis_since_last_gc();
1215 static void print_region_ranges();
1216 static void print_dense_prefix_stats(const char* const algorithm,
1217 const SpaceId id,
1218 const bool maximum_compaction,
1219 HeapWord* const addr);
1220 static void summary_phase_msg(SpaceId dst_space_id,
1221 HeapWord* dst_beg, HeapWord* dst_end,
1222 SpaceId src_space_id,
1223 HeapWord* src_beg, HeapWord* src_end);
1224 #endif // #ifndef PRODUCT
1225
1226 #ifdef ASSERT
1227 // Sanity check the new location of a word in the heap.
1228 static inline void check_new_location(HeapWord* old_addr, HeapWord* new_addr);
1229 // Verify that all the regions have been emptied.
1230 static void verify_complete(SpaceId space_id);
1231 #endif // #ifdef ASSERT
1232 };
1233
1234 class MoveAndUpdateClosure: public ParMarkBitMapClosure {
1235 public:
1236 inline MoveAndUpdateClosure(ParMarkBitMap* bitmap, ParCompactionManager* cm,
1237 ObjectStartArray* start_array,
1238 HeapWord* destination, size_t words);
1239
1240 // Accessors.
1241 HeapWord* destination() const { return _destination; }
1242
1243 // If the object will fit (size <= words_remaining()), copy it to the current
1244 // destination, update the interior oops and the start array and return either
1245 // full (if the closure is full) or incomplete. If the object will not fit,
1246 // return would_overflow.
1247 virtual IterationStatus do_addr(HeapWord* addr, size_t size);
1248
1249 // Copy enough words to fill this closure, starting at source(). Interior
1250 // oops and the start array are not updated. Return full.
1251 IterationStatus copy_until_full();
1252
1253 // Copy enough words to fill this closure or to the end of an object,
1254 // whichever is smaller, starting at source(). Interior oops and the start
1255 // array are not updated.
1256 void copy_partial_obj();
1257
1258 protected:
1259 // Update variables to indicate that word_count words were processed.
1260 inline void update_state(size_t word_count);
1261
1262 protected:
1263 ObjectStartArray* const _start_array;
1264 HeapWord* _destination; // Next addr to be written.
1265 };
1266
1267 inline
1268 MoveAndUpdateClosure::MoveAndUpdateClosure(ParMarkBitMap* bitmap,
1269 ParCompactionManager* cm,
1270 ObjectStartArray* start_array,
1271 HeapWord* destination,
1272 size_t words) :
1273 ParMarkBitMapClosure(bitmap, cm, words), _start_array(start_array)
1274 {
1275 _destination = destination;
1276 }
1277
1278 inline void MoveAndUpdateClosure::update_state(size_t words)
1279 {
1280 decrement_words_remaining(words);
1281 _source += words;
1282 _destination += words;
1283 }
1284
1285 class UpdateOnlyClosure: public ParMarkBitMapClosure {
1286 private:
1287 const PSParallelCompact::SpaceId _space_id;
1288 ObjectStartArray* const _start_array;
1289
1290 public:
1291 UpdateOnlyClosure(ParMarkBitMap* mbm,
1292 ParCompactionManager* cm,
1293 PSParallelCompact::SpaceId space_id);
1294
1295 // Update the object.
1296 virtual IterationStatus do_addr(HeapWord* addr, size_t words);
1297
1298 inline void do_addr(HeapWord* addr);
1299 };
1300
1301 class FillClosure: public ParMarkBitMapClosure {
1302 public:
|
224 static const size_t Log2BlockSize;
225 static const size_t BlockSize;
226 static const size_t BlockSizeBytes;
227
228 static const size_t BlockSizeOffsetMask;
229 static const size_t BlockAddrOffsetMask;
230 static const size_t BlockAddrMask;
231
232 static const size_t BlocksPerRegion;
233 static const size_t Log2BlocksPerRegion;
234
235 class RegionData
236 {
237 public:
238 // Destination address of the region.
239 HeapWord* destination() const { return _destination; }
240
241 // The first region containing data destined for this region.
242 size_t source_region() const { return _source_region; }
243
244 // Reuse _source_region to store the corresponding shadow region index
245 size_t shadow_region() const { return _source_region; }
246
247 // The object (if any) starting in this region and ending in a different
248 // region that could not be updated during the main (parallel) compaction
249 // phase. This is different from _partial_obj_addr, which is an object that
250 // extends onto a source region. However, the two uses do not overlap in
251 // time, so the same field is used to save space.
252 HeapWord* deferred_obj_addr() const { return _partial_obj_addr; }
253
254 // The starting address of the partial object extending onto the region.
255 HeapWord* partial_obj_addr() const { return _partial_obj_addr; }
256
257 // Size of the partial object extending onto the region (words).
258 size_t partial_obj_size() const { return _partial_obj_size; }
259
260 // Size of live data that lies within this region due to objects that start
261 // in this region (words). This does not include the partial object
262 // extending onto the region (if any), or the part of an object that extends
263 // onto the next region (if any).
264 size_t live_obj_size() const { return _dc_and_los & los_mask; }
265
266 // Total live data that lies within the region (words).
295
296 // The location of the java heap data that corresponds to this region.
297 inline HeapWord* data_location() const;
298
299 // The highest address referenced by objects in this region.
300 inline HeapWord* highest_ref() const;
301
302 // Whether this region is available to be claimed, has been claimed, or has
303 // been completed.
304 //
305 // Minor subtlety: claimed() returns true if the region is marked
306 // completed(), which is desirable since a region must be claimed before it
307 // can be completed.
308 bool available() const { return _dc_and_los < dc_one; }
309 bool claimed() const { return _dc_and_los >= dc_claimed; }
310 bool completed() const { return _dc_and_los >= dc_completed; }
311
312 // These are not atomic.
313 void set_destination(HeapWord* addr) { _destination = addr; }
314 void set_source_region(size_t region) { _source_region = region; }
315 void set_shadow_region(size_t region) { _source_region = region; }
316 void set_deferred_obj_addr(HeapWord* addr) { _partial_obj_addr = addr; }
317 void set_partial_obj_addr(HeapWord* addr) { _partial_obj_addr = addr; }
318 void set_partial_obj_size(size_t words) {
319 _partial_obj_size = (region_sz_t) words;
320 }
321 inline void set_blocks_filled();
322
323 inline void set_destination_count(uint count);
324 inline void set_live_obj_size(size_t words);
325 inline void set_data_location(HeapWord* addr);
326 inline void set_completed();
327 inline bool claim_unsafe();
328
329 // These are atomic.
330 inline void add_live_obj(size_t words);
331 inline void set_highest_ref(HeapWord* addr);
332 inline void decrement_destination_count();
333 inline bool claim();
334
335 // Possible values of _shadow_state, and transition is as follows
336 // Normal Path:
337 // UnusedRegion -> mark_normal() -> NormalRegion
338 // Shadow Path:
339 // UnusedRegion -> mark_shadow() -> ShadowRegion ->
340 // mark_filled() -> FilledShadow -> mark_copied() -> CopiedShadow
341 static const int UnusedRegion = 0; // The region is not collected yet
342 static const int ShadowRegion = 1; // Stolen by an idle thread, and a shadow region is created for it
343 static const int FilledShadow = 2; // Its shadow region has been filled and ready to be copied back
344 static const int CopiedShadow = 3; // The data of the shadow region has been copied back
345 static const int NormalRegion = 4; // The region will be collected by the original parallel algorithm
346
347 // Mark the current region as normal or shadow to enter different processing paths
348 inline bool mark_normal();
349 inline bool mark_shadow();
350 // Mark the shadow region as filled and ready to be copied back
351 inline void mark_filled();
352 // Mark the shadow region as copied back to avoid double copying.
353 inline bool mark_copied();
354 // Special case: see the comment in PSParallelCompact::fill_and_update_shadow_region.
355 // Return to the normal path here
356 inline void shadow_to_normal();
357
358
359 int shadow_state() { return _shadow_state; }
360
361 private:
362 // The type used to represent object sizes within a region.
363 typedef uint region_sz_t;
364
365 // Constants for manipulating the _dc_and_los field, which holds both the
366 // destination count and live obj size. The live obj size lives at the
367 // least significant end so no masking is necessary when adding.
368 static const region_sz_t dc_shift; // Shift amount.
369 static const region_sz_t dc_mask; // Mask for destination count.
370 static const region_sz_t dc_one; // 1, shifted appropriately.
371 static const region_sz_t dc_claimed; // Region has been claimed.
372 static const region_sz_t dc_completed; // Region has been completed.
373 static const region_sz_t los_mask; // Mask for live obj size.
374
375 HeapWord* _destination;
376 size_t _source_region;
377 HeapWord* _partial_obj_addr;
378 region_sz_t _partial_obj_size;
379 region_sz_t volatile _dc_and_los;
380 bool volatile _blocks_filled;
381 int volatile _shadow_state;
382
383 #ifdef ASSERT
384 size_t _blocks_filled_count; // Number of block table fills.
385
386 // These enable optimizations that are only partially implemented. Use
387 // debug builds to prevent the code fragments from breaking.
388 HeapWord* _data_location;
389 HeapWord* _highest_ref;
390 #endif // #ifdef ASSERT
391
392 #ifdef ASSERT
393 public:
394 uint _pushed; // 0 until region is pushed onto a stack
395 private:
396 #endif
397 };
398
399 // "Blocks" allow shorter sections of the bitmap to be searched. Each Block
400 // holds an offset, which is the amount of live data in the Region to the left
401 // of the first live object that starts in the Block.
612 Atomic::add(&_dc_and_los, static_cast<region_sz_t>(words));
613 }
614
615 inline void ParallelCompactData::RegionData::set_highest_ref(HeapWord* addr)
616 {
617 #ifdef ASSERT
618 HeapWord* tmp = _highest_ref;
619 while (addr > tmp) {
620 tmp = Atomic::cmpxchg(&_highest_ref, tmp, addr);
621 }
622 #endif // #ifdef ASSERT
623 }
624
625 inline bool ParallelCompactData::RegionData::claim()
626 {
627 const region_sz_t los = static_cast<region_sz_t>(live_obj_size());
628 const region_sz_t old = Atomic::cmpxchg(&_dc_and_los, los, dc_claimed | los);
629 return old == los;
630 }
631
632 inline bool ParallelCompactData::RegionData::mark_normal() {
633 return Atomic::cmpxchg(&_shadow_state, UnusedRegion, NormalRegion, memory_order_relaxed) == UnusedRegion;
634 }
635
636 inline bool ParallelCompactData::RegionData::mark_shadow() {
637 if (_shadow_state != UnusedRegion) return false;
638 return Atomic::cmpxchg(&_shadow_state, UnusedRegion, ShadowRegion, memory_order_relaxed) == UnusedRegion;
639 }
640
641 inline void ParallelCompactData::RegionData::mark_filled() {
642 int old = Atomic::cmpxchg(&_shadow_state, ShadowRegion, FilledShadow, memory_order_relaxed);
643 assert(old == ShadowRegion, "Fail to mark the region as filled");
644 }
645
646 inline bool ParallelCompactData::RegionData::mark_copied() {
647 return Atomic::cmpxchg(&_shadow_state, FilledShadow, CopiedShadow, memory_order_relaxed) == FilledShadow;
648 }
649
650 void ParallelCompactData::RegionData::shadow_to_normal() {
651 int old = Atomic::cmpxchg(&_shadow_state, ShadowRegion, NormalRegion, memory_order_relaxed);
652 assert(old == ShadowRegion, "Fail to mark the region as finish");
653 }
654
655 inline ParallelCompactData::RegionData*
656 ParallelCompactData::region(size_t region_idx) const
657 {
658 assert(region_idx <= region_count(), "bad arg");
659 return _region_data + region_idx;
660 }
661
662 inline size_t
663 ParallelCompactData::region(const RegionData* const region_ptr) const
664 {
665 assert(region_ptr >= _region_data, "bad arg");
666 assert(region_ptr <= _region_data + region_count(), "bad arg");
667 return pointer_delta(region_ptr, _region_data, sizeof(RegionData));
668 }
669
670 inline ParallelCompactData::BlockData*
671 ParallelCompactData::block(size_t n) const {
672 assert(n < block_count(), "bad arg");
673 return _block_data + n;
674 }
1218 size_t src_region_idx);
1219
1220 // Determine the next source region, set closure.source() to the start of the
1221 // new region return the region index. Parameter end_addr is the address one
1222 // beyond the end of source range just processed. If necessary, switch to a
1223 // new source space and set src_space_id (in-out parameter) and src_space_top
1224 // (out parameter) accordingly.
1225 static size_t next_src_region(MoveAndUpdateClosure& closure,
1226 SpaceId& src_space_id,
1227 HeapWord*& src_space_top,
1228 HeapWord* end_addr);
1229
1230 // Decrement the destination count for each non-empty source region in the
1231 // range [beg_region, region(region_align_up(end_addr))). If the destination
1232 // count for a region goes to 0 and it needs to be filled, enqueue it.
1233 static void decrement_destination_counts(ParCompactionManager* cm,
1234 SpaceId src_space_id,
1235 size_t beg_region,
1236 HeapWord* end_addr);
1237
1238 static void fill_region(ParCompactionManager* cm, MoveAndUpdateClosure& closure, size_t region);
1239 static void fill_and_update_region(ParCompactionManager* cm, size_t region);
1240
1241 static bool steal_unavailable_region(ParCompactionManager* cm, size_t& region_idx);
1242 static void fill_and_update_shadow_region(ParCompactionManager* cm, size_t region);
1243 // Copy the content of a shadow region back to its corresponding heap region
1244 static void copy_back(HeapWord* shadow_addr, HeapWord* region_addr);
1245 // Collect empty regions as shadow regions and initialize the
1246 // _next_shadow_region filed for each compact manager
1247 static void initialize_shadow_regions(uint parallel_gc_threads);
1248
1249 // Fill in the block table for the specified region.
1250 static void fill_blocks(size_t region_idx);
1251
1252 // Update the deferred objects in the space.
1253 static void update_deferred_objects(ParCompactionManager* cm, SpaceId id);
1254
1255 static ParMarkBitMap* mark_bitmap() { return &_mark_bitmap; }
1256 static ParallelCompactData& summary_data() { return _summary_data; }
1257
1258 // Reference Processing
1259 static ReferenceProcessor* const ref_processor() { return _ref_processor; }
1260
1261 static STWGCTimer* gc_timer() { return &_gc_timer; }
1262
1263 // Return the SpaceId for the given address.
1264 static SpaceId space_id(HeapWord* addr);
1265
1266 // Time since last full gc (in milliseconds).
1267 static jlong millis_since_last_gc();
1274 static void print_region_ranges();
1275 static void print_dense_prefix_stats(const char* const algorithm,
1276 const SpaceId id,
1277 const bool maximum_compaction,
1278 HeapWord* const addr);
1279 static void summary_phase_msg(SpaceId dst_space_id,
1280 HeapWord* dst_beg, HeapWord* dst_end,
1281 SpaceId src_space_id,
1282 HeapWord* src_beg, HeapWord* src_end);
1283 #endif // #ifndef PRODUCT
1284
1285 #ifdef ASSERT
1286 // Sanity check the new location of a word in the heap.
1287 static inline void check_new_location(HeapWord* old_addr, HeapWord* new_addr);
1288 // Verify that all the regions have been emptied.
1289 static void verify_complete(SpaceId space_id);
1290 #endif // #ifdef ASSERT
1291 };
1292
1293 class MoveAndUpdateClosure: public ParMarkBitMapClosure {
1294 static inline size_t calculate_words_remaining(size_t region);
1295 public:
1296 inline MoveAndUpdateClosure(ParMarkBitMap* bitmap, ParCompactionManager* cm,
1297 size_t region);
1298
1299 // Accessors.
1300 HeapWord* destination() const { return _destination; }
1301 HeapWord* copy_destination() const { return _destination + _offset; }
1302
1303 // If the object will fit (size <= words_remaining()), copy it to the current
1304 // destination, update the interior oops and the start array and return either
1305 // full (if the closure is full) or incomplete. If the object will not fit,
1306 // return would_overflow.
1307 IterationStatus do_addr(HeapWord* addr, size_t size);
1308
1309 // Copy enough words to fill this closure, starting at source(). Interior
1310 // oops and the start array are not updated. Return full.
1311 IterationStatus copy_until_full();
1312
1313 // Copy enough words to fill this closure or to the end of an object,
1314 // whichever is smaller, starting at source(). Interior oops and the start
1315 // array are not updated.
1316 void copy_partial_obj();
1317
1318 virtual void complete_region(ParCompactionManager* cm, HeapWord* dest_addr,
1319 PSParallelCompact::RegionData* region_ptr);
1320
1321 protected:
1322 // Update variables to indicate that word_count words were processed.
1323 inline void update_state(size_t word_count);
1324
1325 protected:
1326 HeapWord* _destination; // Next addr to be written.
1327 ObjectStartArray* const _start_array;
1328 size_t _offset;
1329 };
1330
1331 inline size_t MoveAndUpdateClosure::calculate_words_remaining(size_t region) {
1332 HeapWord* dest_addr = PSParallelCompact::summary_data().region_to_addr(region);
1333 PSParallelCompact::SpaceId dest_space_id = PSParallelCompact::space_id(dest_addr);
1334 HeapWord* new_top = PSParallelCompact::new_top(dest_space_id);
1335 assert(dest_addr < new_top, "sanity");
1336
1337 return MIN2(pointer_delta(new_top, dest_addr), ParallelCompactData::RegionSize);
1338 }
1339
1340 inline
1341 MoveAndUpdateClosure::MoveAndUpdateClosure(ParMarkBitMap* bitmap,
1342 ParCompactionManager* cm,
1343 size_t region_idx) :
1344 ParMarkBitMapClosure(bitmap, cm, calculate_words_remaining(region_idx)),
1345 _destination(PSParallelCompact::summary_data().region_to_addr(region_idx)),
1346 _start_array(PSParallelCompact::start_array(PSParallelCompact::space_id(_destination))),
1347 _offset(0) { }
1348
1349
1350 inline void MoveAndUpdateClosure::update_state(size_t words)
1351 {
1352 decrement_words_remaining(words);
1353 _source += words;
1354 _destination += words;
1355 }
1356
1357 class MoveAndUpdateShadowClosure: public MoveAndUpdateClosure {
1358 inline size_t calculate_shadow_offset(size_t region_idx, size_t shadow_idx);
1359 public:
1360 inline MoveAndUpdateShadowClosure(ParMarkBitMap* bitmap, ParCompactionManager* cm,
1361 size_t region, size_t shadow);
1362
1363 virtual void complete_region(ParCompactionManager* cm, HeapWord* dest_addr,
1364 PSParallelCompact::RegionData* region_ptr);
1365
1366 private:
1367 size_t _shadow;
1368 };
1369
1370 inline size_t MoveAndUpdateShadowClosure::calculate_shadow_offset(size_t region_idx, size_t shadow_idx) {
1371 ParallelCompactData& sd = PSParallelCompact::summary_data();
1372 HeapWord* dest_addr = sd.region_to_addr(region_idx);
1373 HeapWord* shadow_addr = sd.region_to_addr(shadow_idx);
1374 return pointer_delta(shadow_addr, dest_addr);
1375 }
1376
1377 inline
1378 MoveAndUpdateShadowClosure::MoveAndUpdateShadowClosure(ParMarkBitMap *bitmap,
1379 ParCompactionManager *cm,
1380 size_t region,
1381 size_t shadow) :
1382 MoveAndUpdateClosure(bitmap, cm, region),
1383 _shadow(shadow) {
1384 _offset = calculate_shadow_offset(region, shadow);
1385 }
1386
1387 class UpdateOnlyClosure: public ParMarkBitMapClosure {
1388 private:
1389 const PSParallelCompact::SpaceId _space_id;
1390 ObjectStartArray* const _start_array;
1391
1392 public:
1393 UpdateOnlyClosure(ParMarkBitMap* mbm,
1394 ParCompactionManager* cm,
1395 PSParallelCompact::SpaceId space_id);
1396
1397 // Update the object.
1398 virtual IterationStatus do_addr(HeapWord* addr, size_t words);
1399
1400 inline void do_addr(HeapWord* addr);
1401 };
1402
1403 class FillClosure: public ParMarkBitMapClosure {
1404 public:
|