src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp

Print this page




 330     region_sz_t volatile _dc_and_los;
 331 #ifdef ASSERT
 332     // These enable optimizations that are only partially implemented.  Use
 333     // debug builds to prevent the code fragments from breaking.
 334     HeapWord*            _data_location;
 335     HeapWord*            _highest_ref;
 336 #endif  // #ifdef ASSERT
 337 
 338 #ifdef ASSERT
 339    public:
 340     uint            _pushed;   // 0 until region is pushed onto a worker's stack
 341    private:
 342 #endif
 343   };
 344 
 345 public:
 346   ParallelCompactData();
 347   bool initialize(MemRegion covered_region);
 348 
 349   size_t region_count() const { return _region_count; }

 350 
 351   // Convert region indices to/from RegionData pointers.
 352   inline RegionData* region(size_t region_idx) const;
 353   inline size_t     region(const RegionData* const region_ptr) const;
 354 
 355   // Returns true if the given address is contained within the region
 356   bool region_contains(size_t region_index, HeapWord* addr);
 357 
 358   void add_obj(HeapWord* addr, size_t len);
 359   void add_obj(oop p, size_t len) { add_obj((HeapWord*)p, len); }
 360 
 361   // Fill in the regions covering [beg, end) so that no data moves; i.e., the
 362   // destination of region n is simply the start of region n.  The argument beg
 363   // must be region-aligned; end need not be.
 364   void summarize_dense_prefix(HeapWord* beg, HeapWord* end);
 365 
 366   HeapWord* summarize_split_space(size_t src_region, SplitInfo& split_info,
 367                                   HeapWord* destination, HeapWord* target_end,
 368                                   HeapWord** target_next);
 369   bool summarize(SplitInfo& split_info,


 403   HeapWord* calc_new_pointer(oop p) {
 404     return calc_new_pointer((HeapWord*) p);
 405   }
 406 
 407 #ifdef  ASSERT
 408   void verify_clear(const PSVirtualSpace* vspace);
 409   void verify_clear();
 410 #endif  // #ifdef ASSERT
 411 
 412 private:
 413   bool initialize_region_data(size_t region_size);
 414   PSVirtualSpace* create_vspace(size_t count, size_t element_size);
 415 
 416 private:
 417   HeapWord*       _region_start;
 418 #ifdef  ASSERT
 419   HeapWord*       _region_end;
 420 #endif  // #ifdef ASSERT
 421 
 422   PSVirtualSpace* _region_vspace;

 423   RegionData*     _region_data;
 424   size_t          _region_count;
 425 };
 426 
 427 inline uint
 428 ParallelCompactData::RegionData::destination_count_raw() const
 429 {
 430   return _dc_and_los & dc_mask;
 431 }
 432 
 433 inline uint
 434 ParallelCompactData::RegionData::destination_count() const
 435 {
 436   return destination_count_raw() >> dc_shift;
 437 }
 438 
 439 inline void
 440 ParallelCompactData::RegionData::set_destination_count(uint count)
 441 {
 442   assert(count <= (dc_completed >> dc_shift), "count too large");




 330     region_sz_t volatile _dc_and_los;
 331 #ifdef ASSERT
 332     // These enable optimizations that are only partially implemented.  Use
 333     // debug builds to prevent the code fragments from breaking.
 334     HeapWord*            _data_location;
 335     HeapWord*            _highest_ref;
 336 #endif  // #ifdef ASSERT
 337 
 338 #ifdef ASSERT
 339    public:
 340     uint            _pushed;   // 0 until region is pushed onto a worker's stack
 341    private:
 342 #endif
 343   };
 344 
 345 public:
 346   ParallelCompactData();
 347   bool initialize(MemRegion covered_region);
 348 
 349   size_t region_count() const { return _region_count; }
 350   size_t reserved_byte_size() const { return _reserved_byte_size; }
 351 
 352   // Convert region indices to/from RegionData pointers.
 353   inline RegionData* region(size_t region_idx) const;
 354   inline size_t     region(const RegionData* const region_ptr) const;
 355 
 356   // Returns true if the given address is contained within the region
 357   bool region_contains(size_t region_index, HeapWord* addr);
 358 
 359   void add_obj(HeapWord* addr, size_t len);
 360   void add_obj(oop p, size_t len) { add_obj((HeapWord*)p, len); }
 361 
 362   // Fill in the regions covering [beg, end) so that no data moves; i.e., the
 363   // destination of region n is simply the start of region n.  The argument beg
 364   // must be region-aligned; end need not be.
 365   void summarize_dense_prefix(HeapWord* beg, HeapWord* end);
 366 
 367   HeapWord* summarize_split_space(size_t src_region, SplitInfo& split_info,
 368                                   HeapWord* destination, HeapWord* target_end,
 369                                   HeapWord** target_next);
 370   bool summarize(SplitInfo& split_info,


 404   HeapWord* calc_new_pointer(oop p) {
 405     return calc_new_pointer((HeapWord*) p);
 406   }
 407 
 408 #ifdef  ASSERT
 409   void verify_clear(const PSVirtualSpace* vspace);
 410   void verify_clear();
 411 #endif  // #ifdef ASSERT
 412 
 413 private:
 414   bool initialize_region_data(size_t region_size);
 415   PSVirtualSpace* create_vspace(size_t count, size_t element_size);
 416 
 417 private:
 418   HeapWord*       _region_start;
 419 #ifdef  ASSERT
 420   HeapWord*       _region_end;
 421 #endif  // #ifdef ASSERT
 422 
 423   PSVirtualSpace* _region_vspace;
 424   size_t          _reserved_byte_size;
 425   RegionData*     _region_data;
 426   size_t          _region_count;
 427 };
 428 
 429 inline uint
 430 ParallelCompactData::RegionData::destination_count_raw() const
 431 {
 432   return _dc_and_los & dc_mask;
 433 }
 434 
 435 inline uint
 436 ParallelCompactData::RegionData::destination_count() const
 437 {
 438   return destination_count_raw() >> dc_shift;
 439 }
 440 
 441 inline void
 442 ParallelCompactData::RegionData::set_destination_count(uint count)
 443 {
 444   assert(count <= (dc_completed >> dc_shift), "count too large");