< prev index next >

src/share/vm/gc/cms/compactibleFreeListSpace.hpp

Print this page




  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_CMS_COMPACTIBLEFREELISTSPACE_HPP
  26 #define SHARE_VM_GC_CMS_COMPACTIBLEFREELISTSPACE_HPP
  27 
  28 #include "gc/cms/adaptiveFreeList.hpp"
  29 #include "gc/cms/promotionInfo.hpp"
  30 #include "gc/shared/blockOffsetTable.hpp"
  31 #include "gc/shared/space.hpp"

  32 #include "memory/binaryTreeDictionary.hpp"
  33 #include "memory/freeList.hpp"
  34 
  35 // Classes in support of keeping track of promotions into a non-Contiguous
  36 // space, in this case a CompactibleFreeListSpace.
  37 
  38 // Forward declarations
  39 class CMSCollector;
  40 class CompactibleFreeListSpace;
  41 class ConcurrentMarkSweepGeneration;
  42 class BlkClosure;
  43 class BlkClosureCareful;
  44 class FreeChunk;
  45 class UpwardsObjectClosure;
  46 class ObjectClosureCareful;
  47 class Klass;
  48 
  49 class LinearAllocBlock VALUE_OBJ_CLASS_SPEC {
  50  public:
  51   LinearAllocBlock() : _ptr(0), _word_size(0), _refillSize(0),


 258   // Take any locks as appropriate if we are multithreaded.
 259   void       addChunkToFreeListsAtEndRecordingStats(HeapWord* chunk, size_t size);
 260   // Add a free chunk to the indexed free lists.
 261   void       returnChunkToFreeList(FreeChunk* chunk);
 262   // Add a free chunk to the dictionary.
 263   void       returnChunkToDictionary(FreeChunk* chunk);
 264 
 265   // Functions for maintaining the linear allocation buffers (LinAB).
 266   // Repairing a linear allocation block refers to operations
 267   // performed on the remainder of a LinAB after an allocation
 268   // has been made from it.
 269   void       repairLinearAllocationBlocks();
 270   void       repairLinearAllocBlock(LinearAllocBlock* blk);
 271   void       refillLinearAllocBlock(LinearAllocBlock* blk);
 272   void       refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk);
 273   void       refillLinearAllocBlocksIfNeeded();
 274 
 275   void       verify_objects_initialized() const;
 276 
 277   // Statistics reporting helper functions
 278   void       reportFreeListStatistics() const;
 279   void       reportIndexedFreeListStatistics() const;
 280   size_t     maxChunkSizeInIndexedFreeLists() const;
 281   size_t     numFreeBlocksInIndexedFreeLists() const;
 282   // Accessor
 283   HeapWord* unallocated_block() const {
 284     if (BlockOffsetArrayUseUnallocatedBlock) {
 285       HeapWord* ub = _bt.unallocated_block();
 286       assert(ub >= bottom() &&
 287              ub <= end(), "space invariant");
 288       return ub;
 289     } else {
 290       return end();
 291     }
 292   }
 293   void freed(HeapWord* start, size_t size) {
 294     _bt.freed(start, size);
 295   }
 296 
 297   // Auxiliary functions for scan_and_{forward,adjust_pointers,compact} support.
 298   // See comments for CompactibleSpace for more information.
 299   inline HeapWord* scan_limit() const {


 433 
 434   void blk_iterate(BlkClosure* cl);
 435   void blk_iterate_careful(BlkClosureCareful* cl);
 436   HeapWord* block_start_const(const void* p) const;
 437   HeapWord* block_start_careful(const void* p) const;
 438   size_t block_size(const HeapWord* p) const;
 439   size_t block_size_no_stall(HeapWord* p, const CMSCollector* c) const;
 440   bool block_is_obj(const HeapWord* p) const;
 441   bool obj_is_alive(const HeapWord* p) const;
 442   size_t block_size_nopar(const HeapWord* p) const;
 443   bool block_is_obj_nopar(const HeapWord* p) const;
 444 
 445   // Iteration support for promotion
 446   void save_marks();
 447   bool no_allocs_since_save_marks();
 448 
 449   // Iteration support for sweeping
 450   void save_sweep_limit() {
 451     _sweep_limit = BlockOffsetArrayUseUnallocatedBlock ?
 452                    unallocated_block() : end();
 453     if (CMSTraceSweeper) {
 454       gclog_or_tty->print_cr(">>>>> Saving sweep limit " PTR_FORMAT
 455                              "  for space [" PTR_FORMAT "," PTR_FORMAT ") <<<<<<",
 456                              p2i(_sweep_limit), p2i(bottom()), p2i(end()));
 457     }
 458   }
 459   NOT_PRODUCT(
 460     void clear_sweep_limit() { _sweep_limit = NULL; }
 461   )
 462   HeapWord* sweep_limit() { return _sweep_limit; }
 463 
 464   // Apply "blk->do_oop" to the addresses of all reference fields in objects
 465   // promoted into this generation since the most recent save_marks() call.
 466   // Fields in objects allocated by applications of the closure
 467   // *are* included in the iteration. Thus, when the iteration completes
 468   // there should be no further such objects remaining.
 469   #define CFLS_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix)  \
 470     void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk);
 471   ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DECL)
 472   #undef CFLS_OOP_SINCE_SAVE_MARKS_DECL
 473 
 474   // Allocation support
 475   HeapWord* allocate(size_t size);
 476   HeapWord* par_allocate(size_t size);
 477 




  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_CMS_COMPACTIBLEFREELISTSPACE_HPP
  26 #define SHARE_VM_GC_CMS_COMPACTIBLEFREELISTSPACE_HPP
  27 
  28 #include "gc/cms/adaptiveFreeList.hpp"
  29 #include "gc/cms/promotionInfo.hpp"
  30 #include "gc/shared/blockOffsetTable.hpp"
  31 #include "gc/shared/space.hpp"
  32 #include "logging/log.hpp"
  33 #include "memory/binaryTreeDictionary.hpp"
  34 #include "memory/freeList.hpp"
  35 
  36 // Classes in support of keeping track of promotions into a non-Contiguous
  37 // space, in this case a CompactibleFreeListSpace.
  38 
  39 // Forward declarations
  40 class CMSCollector;
  41 class CompactibleFreeListSpace;
  42 class ConcurrentMarkSweepGeneration;
  43 class BlkClosure;
  44 class BlkClosureCareful;
  45 class FreeChunk;
  46 class UpwardsObjectClosure;
  47 class ObjectClosureCareful;
  48 class Klass;
  49 
  50 class LinearAllocBlock VALUE_OBJ_CLASS_SPEC {
  51  public:
  52   LinearAllocBlock() : _ptr(0), _word_size(0), _refillSize(0),


 259   // Take any locks as appropriate if we are multithreaded.
 260   void       addChunkToFreeListsAtEndRecordingStats(HeapWord* chunk, size_t size);
 261   // Add a free chunk to the indexed free lists.
 262   void       returnChunkToFreeList(FreeChunk* chunk);
 263   // Add a free chunk to the dictionary.
 264   void       returnChunkToDictionary(FreeChunk* chunk);
 265 
 266   // Functions for maintaining the linear allocation buffers (LinAB).
 267   // Repairing a linear allocation block refers to operations
 268   // performed on the remainder of a LinAB after an allocation
 269   // has been made from it.
 270   void       repairLinearAllocationBlocks();
 271   void       repairLinearAllocBlock(LinearAllocBlock* blk);
 272   void       refillLinearAllocBlock(LinearAllocBlock* blk);
 273   void       refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk);
 274   void       refillLinearAllocBlocksIfNeeded();
 275 
 276   void       verify_objects_initialized() const;
 277 
 278   // Statistics reporting helper functions
 279   void       reportFreeListStatistics(const char* title) const;
 280   void       reportIndexedFreeListStatistics(outputStream* st) const;
 281   size_t     maxChunkSizeInIndexedFreeLists() const;
 282   size_t     numFreeBlocksInIndexedFreeLists() const;
 283   // Accessor
 284   HeapWord* unallocated_block() const {
 285     if (BlockOffsetArrayUseUnallocatedBlock) {
 286       HeapWord* ub = _bt.unallocated_block();
 287       assert(ub >= bottom() &&
 288              ub <= end(), "space invariant");
 289       return ub;
 290     } else {
 291       return end();
 292     }
 293   }
 294   void freed(HeapWord* start, size_t size) {
 295     _bt.freed(start, size);
 296   }
 297 
 298   // Auxiliary functions for scan_and_{forward,adjust_pointers,compact} support.
 299   // See comments for CompactibleSpace for more information.
 300   inline HeapWord* scan_limit() const {


 434 
 435   void blk_iterate(BlkClosure* cl);
 436   void blk_iterate_careful(BlkClosureCareful* cl);
 437   HeapWord* block_start_const(const void* p) const;
 438   HeapWord* block_start_careful(const void* p) const;
 439   size_t block_size(const HeapWord* p) const;
 440   size_t block_size_no_stall(HeapWord* p, const CMSCollector* c) const;
 441   bool block_is_obj(const HeapWord* p) const;
 442   bool obj_is_alive(const HeapWord* p) const;
 443   size_t block_size_nopar(const HeapWord* p) const;
 444   bool block_is_obj_nopar(const HeapWord* p) const;
 445 
 446   // Iteration support for promotion
 447   void save_marks();
 448   bool no_allocs_since_save_marks();
 449 
 450   // Iteration support for sweeping
 451   void save_sweep_limit() {
 452     _sweep_limit = BlockOffsetArrayUseUnallocatedBlock ?
 453                    unallocated_block() : end();
 454     log_develop_trace(gc, sweep)(">>>>> Saving sweep limit " PTR_FORMAT

 455                                  "  for space [" PTR_FORMAT "," PTR_FORMAT ") <<<<<<",
 456                                  p2i(_sweep_limit), p2i(bottom()), p2i(end()));

 457   }
 458   NOT_PRODUCT(
 459     void clear_sweep_limit() { _sweep_limit = NULL; }
 460   )
 461   HeapWord* sweep_limit() { return _sweep_limit; }
 462 
 463   // Apply "blk->do_oop" to the addresses of all reference fields in objects
 464   // promoted into this generation since the most recent save_marks() call.
 465   // Fields in objects allocated by applications of the closure
 466   // *are* included in the iteration. Thus, when the iteration completes
 467   // there should be no further such objects remaining.
 468   #define CFLS_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix)  \
 469     void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk);
 470   ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DECL)
 471   #undef CFLS_OOP_SINCE_SAVE_MARKS_DECL
 472 
 473   // Allocation support
 474   HeapWord* allocate(size_t size);
 475   HeapWord* par_allocate(size_t size);
 476 


< prev index next >