src/hotspot/share/gc/cms/compactibleFreeListSpace.hpp

Print this page




 175   // Linear allocation blocks
 176   LinearAllocBlock _smallLinearAllocBlock;
 177 
 178   AFLBinaryTreeDictionary* _dictionary;    // Pointer to dictionary for large size blocks
 179 
 180   // Indexed array for small size blocks
 181   AdaptiveFreeList<FreeChunk> _indexedFreeList[IndexSetSize];
 182 
 183   // Allocation strategy
 184   bool _fitStrategy;  // Use best fit strategy
 185 
 186   // This is an address close to the largest free chunk in the heap.
 187   // It is currently assumed to be at the end of the heap.  Free
 188   // chunks with addresses greater than nearLargestChunk are coalesced
 189   // in an effort to maintain a large chunk at the end of the heap.
 190   HeapWord*  _nearLargestChunk;
 191 
 192   // Used to keep track of limit of sweep for the space
 193   HeapWord* _sweep_limit;
 194 



 195   // Used to make the young collector update the mod union table
 196   MemRegionClosure* _preconsumptionDirtyCardClosure;
 197 
 198   // Support for compacting cms
 199   HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
 200   HeapWord* forward(oop q, size_t size, CompactPoint* cp, HeapWord* compact_top);
 201 
 202   // Initialization helpers.
 203   void initializeIndexedFreeListArray();
 204 
 205   // Extra stuff to manage promotion parallelism.
 206 
 207   // A lock protecting the dictionary during par promotion allocation.
 208   mutable Mutex _parDictionaryAllocLock;
 209   Mutex* parDictionaryAllocLock() const { return &_parDictionaryAllocLock; }
 210 
 211   // Locks protecting the exact lists during par promotion allocation.
 212   Mutex* _indexedFreeListParLocks[IndexSetSize];
 213 
 214   // Attempt to obtain up to "n" blocks of the size "word_sz" (which is


 395   void setPreconsumptionDirtyCardClosure(MemRegionClosure* cl) {
 396     _preconsumptionDirtyCardClosure = cl;
 397   }
 398 
 399   // Space enquiries
 400   size_t used() const;
 401   size_t free() const;
 402   size_t max_alloc_in_words() const;
 403   // XXX: should have a less conservative used_region() than that of
 404   // Space; we could consider keeping track of highest allocated
 405   // address and correcting that at each sweep, as the sweeper
 406   // goes through the entire allocated part of the generation. We
 407   // could also use that information to keep the sweeper from
 408   // sweeping more than is necessary. The allocator and sweeper will
 409   // of course need to synchronize on this, since the sweeper will
 410   // try to bump down the address and the allocator will try to bump it up.
 411   // For now, however, we'll just use the default used_region()
 412   // which overestimates the region by returning the entire
 413   // committed region (this is safe, but inefficient).
 414 











 415   // Returns a subregion of the space containing all the objects in
 416   // the space.
 417   MemRegion used_region() const {
 418     return MemRegion(bottom(),
 419                      BlockOffsetArrayUseUnallocatedBlock ?
 420                      unallocated_block() : end());
 421   }
 422 
 423   virtual bool is_free_block(const HeapWord* p) const;
 424 
 425   // Resizing support
 426   void set_end(HeapWord* value);  // override
 427 
 428   // Never mangle CompactibleFreeListSpace
 429   void mangle_unused_area() {}
 430   void mangle_unused_area_complete() {}
 431 
 432   // Mutual exclusion support
 433   Mutex* freelistLock() const { return &_freelistLock; }
 434 




 175   // Linear allocation blocks
 176   LinearAllocBlock _smallLinearAllocBlock;
 177 
 178   AFLBinaryTreeDictionary* _dictionary;    // Pointer to dictionary for large size blocks
 179 
 180   // Indexed array for small size blocks
 181   AdaptiveFreeList<FreeChunk> _indexedFreeList[IndexSetSize];
 182 
 183   // Allocation strategy
 184   bool _fitStrategy;  // Use best fit strategy
 185 
 186   // This is an address close to the largest free chunk in the heap.
 187   // It is currently assumed to be at the end of the heap.  Free
 188   // chunks with addresses greater than nearLargestChunk are coalesced
 189   // in an effort to maintain a large chunk at the end of the heap.
 190   HeapWord*  _nearLargestChunk;
 191 
 192   // Used to keep track of limit of sweep for the space
 193   HeapWord* _sweep_limit;
 194 
 195   // Stable value of used().
 196   size_t _used_stable;
 197 
 198   // Used to make the young collector update the mod union table
 199   MemRegionClosure* _preconsumptionDirtyCardClosure;
 200 
 201   // Support for compacting cms
 202   HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
 203   HeapWord* forward(oop q, size_t size, CompactPoint* cp, HeapWord* compact_top);
 204 
 205   // Initialization helpers.
 206   void initializeIndexedFreeListArray();
 207 
 208   // Extra stuff to manage promotion parallelism.
 209 
 210   // A lock protecting the dictionary during par promotion allocation.
 211   mutable Mutex _parDictionaryAllocLock;
 212   Mutex* parDictionaryAllocLock() const { return &_parDictionaryAllocLock; }
 213 
 214   // Locks protecting the exact lists during par promotion allocation.
 215   Mutex* _indexedFreeListParLocks[IndexSetSize];
 216 
 217   // Attempt to obtain up to "n" blocks of the size "word_sz" (which is


 398   void setPreconsumptionDirtyCardClosure(MemRegionClosure* cl) {
 399     _preconsumptionDirtyCardClosure = cl;
 400   }
 401 
 402   // Space enquiries
 403   size_t used() const;
 404   size_t free() const;
 405   size_t max_alloc_in_words() const;
 406   // XXX: should have a less conservative used_region() than that of
 407   // Space; we could consider keeping track of highest allocated
 408   // address and correcting that at each sweep, as the sweeper
 409   // goes through the entire allocated part of the generation. We
 410   // could also use that information to keep the sweeper from
 411   // sweeping more than is necessary. The allocator and sweeper will
 412   // of course need to synchronize on this, since the sweeper will
 413   // try to bump down the address and the allocator will try to bump it up.
 414   // For now, however, we'll just use the default used_region()
 415   // which overestimates the region by returning the entire
 416   // committed region (this is safe, but inefficient).
 417 
 418   // Returns monotonically increasing stable used space bytes for CMS.
 419   // This is required for jhat and other memory monitoring tools
 420   // that might otherwise see inconsistent used space values during a garbage 
 421   // collection, promotion or allocation into compactibleFreeListSpace.
 422   // The value returned by this function might be smaller than the
 423   // actual value.
 424   size_t used_stable() const;
 425   // Recalculate and cache the current stable used() value. Only to be called
 426   // in places where we can be sure that the result is stable.
 427   void recalculate_used_stable();
 428 
 429   // Returns a subregion of the space containing all the objects in
 430   // the space.
 431   MemRegion used_region() const {
 432     return MemRegion(bottom(),
 433                      BlockOffsetArrayUseUnallocatedBlock ?
 434                      unallocated_block() : end());
 435   }
 436 
 437   virtual bool is_free_block(const HeapWord* p) const;
 438 
 439   // Resizing support
 440   void set_end(HeapWord* value);  // override
 441 
 442   // Never mangle CompactibleFreeListSpace
 443   void mangle_unused_area() {}
 444   void mangle_unused_area_complete() {}
 445 
 446   // Mutual exclusion support
 447   Mutex* freelistLock() const { return &_freelistLock; }
 448