< prev index next >

src/hotspot/share/gc/cms/compactibleFreeListSpace.hpp

Print this page




 416   // the space.
 417   MemRegion used_region() const {
 418     return MemRegion(bottom(),
 419                      BlockOffsetArrayUseUnallocatedBlock ?
 420                      unallocated_block() : end());
 421   }
 422 
 423   virtual bool is_free_block(const HeapWord* p) const;
 424 
 425   // Resizing support
 426   void set_end(HeapWord* value);  // override
 427 
 428   // Never mangle CompactibleFreeListSpace
 429   void mangle_unused_area() {}
 430   void mangle_unused_area_complete() {}
 431 
 432   // Mutual exclusion support
 433   Mutex* freelistLock() const { return &_freelistLock; }
 434 
 435   // Iteration support
 436   void oop_iterate(ExtendedOopClosure* cl);
 437 
 438   void object_iterate(ObjectClosure* blk);
 439   // Apply the closure to each object in the space whose references
 440   // point to objects in the heap.  The usage of CompactibleFreeListSpace
 441   // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
 442   // objects in the space with references to objects that are no longer
 443   // valid.  For example, an object may reference another object
 444   // that has already been sweep up (collected).  This method uses
 445   // obj_is_alive() to determine whether it is safe to iterate of
 446   // an object.
 447   void safe_object_iterate(ObjectClosure* blk);
 448 
 449   // Iterate over all objects that intersect with mr, calling "cl->do_object"
 450   // on each.  There is an exception to this: if this closure has already
 451   // been invoked on an object, it may skip such objects in some cases.  This is
 452   // Most likely to happen in an "upwards" (ascending address) iteration of
 453   // MemRegions.
 454   void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
 455 
 456   // Requires that "mr" be entirely within the space.
 457   // Apply "cl->do_object" to all objects that intersect with "mr".
 458   // If the iteration encounters an unparseable portion of the region,
 459   // terminate the iteration and return the address of the start of the
 460   // subregion that isn't done.  Return of "NULL" indicates that the
 461   // iteration completed.
 462   HeapWord* object_iterate_careful_m(MemRegion mr,
 463                                      ObjectClosureCareful* cl);
 464 
 465   // Override: provides a DCTO_CL specific to this kind of space.
 466   DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
 467                                      CardTable::PrecisionStyle precision,
 468                                      HeapWord* boundary,
 469                                      bool parallel);
 470 
 471   void blk_iterate(BlkClosure* cl);
 472   void blk_iterate_careful(BlkClosureCareful* cl);
 473   HeapWord* block_start_const(const void* p) const;
 474   HeapWord* block_start_careful(const void* p) const;
 475   size_t block_size(const HeapWord* p) const;
 476   size_t block_size_no_stall(HeapWord* p, const CMSCollector* c) const;
 477   bool block_is_obj(const HeapWord* p) const;
 478   bool obj_is_alive(const HeapWord* p) const;
 479   size_t block_size_nopar(const HeapWord* p) const;
 480   bool block_is_obj_nopar(const HeapWord* p) const;
 481 
 482   // Iteration support for promotion
 483   void save_marks();
 484   bool no_allocs_since_save_marks();
 485 
 486   // Iteration support for sweeping




 416   // the space.
 417   MemRegion used_region() const {
 418     return MemRegion(bottom(),
 419                      BlockOffsetArrayUseUnallocatedBlock ?
 420                      unallocated_block() : end());
 421   }
 422 
 423   virtual bool is_free_block(const HeapWord* p) const;
 424 
 425   // Resizing support
 426   void set_end(HeapWord* value);  // override
 427 
 428   // Never mangle CompactibleFreeListSpace
 429   void mangle_unused_area() {}
 430   void mangle_unused_area_complete() {}
 431 
 432   // Mutual exclusion support
 433   Mutex* freelistLock() const { return &_freelistLock; }
 434 
 435   // Iteration support
 436   void oop_iterate(OopIterateClosure* cl);
 437 
 438   void object_iterate(ObjectClosure* blk);
 439   // Apply the closure to each object in the space whose references
 440   // point to objects in the heap.  The usage of CompactibleFreeListSpace
 441   // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
 442   // objects in the space with references to objects that are no longer
 443   // valid.  For example, an object may reference another object
 444   // that has already been sweep up (collected).  This method uses
 445   // obj_is_alive() to determine whether it is safe to iterate of
 446   // an object.
 447   void safe_object_iterate(ObjectClosure* blk);
 448 
 449   // Iterate over all objects that intersect with mr, calling "cl->do_object"
 450   // on each.  There is an exception to this: if this closure has already
 451   // been invoked on an object, it may skip such objects in some cases.  This is
 452   // Most likely to happen in an "upwards" (ascending address) iteration of
 453   // MemRegions.
 454   void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
 455 
 456   // Requires that "mr" be entirely within the space.
 457   // Apply "cl->do_object" to all objects that intersect with "mr".
 458   // If the iteration encounters an unparseable portion of the region,
 459   // terminate the iteration and return the address of the start of the
 460   // subregion that isn't done.  Return of "NULL" indicates that the
 461   // iteration completed.
 462   HeapWord* object_iterate_careful_m(MemRegion mr,
 463                                      ObjectClosureCareful* cl);
 464 
 465   // Override: provides a DCTO_CL specific to this kind of space.
 466   DirtyCardToOopClosure* new_dcto_cl(OopIterateClosure* cl,
 467                                      CardTable::PrecisionStyle precision,
 468                                      HeapWord* boundary,
 469                                      bool parallel);
 470 
 471   void blk_iterate(BlkClosure* cl);
 472   void blk_iterate_careful(BlkClosureCareful* cl);
 473   HeapWord* block_start_const(const void* p) const;
 474   HeapWord* block_start_careful(const void* p) const;
 475   size_t block_size(const HeapWord* p) const;
 476   size_t block_size_no_stall(HeapWord* p, const CMSCollector* c) const;
 477   bool block_is_obj(const HeapWord* p) const;
 478   bool obj_is_alive(const HeapWord* p) const;
 479   size_t block_size_nopar(const HeapWord* p) const;
 480   bool block_is_obj_nopar(const HeapWord* p) const;
 481 
 482   // Iteration support for promotion
 483   void save_marks();
 484   bool no_allocs_since_save_marks();
 485 
 486   // Iteration support for sweeping


< prev index next >