src/share/vm/memory/space.hpp

Print this page
rev 7084 : [mq]: demacro


  29 #include "memory/blockOffsetTable.hpp"
  30 #include "memory/cardTableModRefBS.hpp"
  31 #include "memory/iterator.hpp"
  32 #include "memory/memRegion.hpp"
  33 #include "memory/watermark.hpp"
  34 #include "oops/markOop.hpp"
  35 #include "runtime/mutexLocker.hpp"
  36 #include "utilities/macros.hpp"
  37 #include "utilities/workgroup.hpp"
  38 
  39 // A space is an abstraction for the "storage units" backing
  40 // up the generation abstraction. It includes specific
  41 // implementations for keeping track of free and used space,
  42 // for iterating over objects and free blocks, etc.
  43 
  44 // Here's the Space hierarchy:
  45 //
  46 // - Space               -- an abstract base class describing a heap area
  47 //   - CompactibleSpace  -- a space supporting compaction
  48 //     - CompactibleFreeListSpace -- (used for CMS generation)

  49 //     - ContiguousSpace -- a compactible space in which all free space
  50 //                          is contiguous
  51 //       - EdenSpace     -- contiguous space used as nursery
  52 //         - ConcEdenSpace -- contiguous space with a 'soft end safe' allocation
  53 //       - OffsetTableContigSpace -- contiguous space with a block offset array
  54 //                          that allows "fast" block_start calls
  55 //         - TenuredSpace -- (used for TenuredGeneration)
  56 
  57 // Forward decls.
  58 class Space;
  59 class BlockOffsetArray;
  60 class BlockOffsetArrayContigSpace;
  61 class Generation;
  62 class CompactibleSpace;
  63 class BlockOffsetTable;
  64 class GenRemSet;
  65 class CardTableRS;
  66 class DirtyCardToOopClosure;
  67 
  68 // A Space describes a heap area. Class Space is an abstract


 374   // space into which to compact.
 375 
 376   virtual CompactibleSpace* next_compaction_space() const {
 377     return _next_compaction_space;
 378   }
 379 
 380   void set_next_compaction_space(CompactibleSpace* csp) {
 381     _next_compaction_space = csp;
 382   }
 383 
 384   // MarkSweep support phase2
 385 
 386   // Start the process of compaction of the current space: compute
 387   // post-compaction addresses, and insert forwarding pointers.  The fields
 388   // "cp->gen" and "cp->compaction_space" are the generation and space into
 389   // which we are currently compacting.  This call updates "cp" as necessary,
 390   // and leaves the "compaction_top" of the final value of
 391   // "cp->compaction_space" up-to-date.  Offset tables may be updated in
 392   // this phase as if the final copy had occurred; if so, "cp->threshold"
 393   // indicates when the next such action should be taken.
 394   virtual void prepare_for_compaction(CompactPoint* cp);
 395   // MarkSweep support phase3
 396   virtual void adjust_pointers();
 397   // MarkSweep support phase4
 398   virtual void compact();
 399 
 400   // The maximum percentage of objects that can be dead in the compacted
 401   // live part of a compacted space ("deadwood" support.)
 402   virtual size_t allowed_dead_ratio() const { return 0; };
 403 
 404   // Some contiguous spaces may maintain some data structures that should
 405   // be updated whenever an allocation crosses a boundary.  This function
 406   // returns the first such boundary.
 407   // (The default implementation returns the end of the space, so the
 408   // boundary is never crossed.)
 409   virtual HeapWord* initialize_threshold() { return end(); }
 410 
 411   // "q" is an object of the given "size" that should be forwarded;
 412   // "cp" names the generation ("gen") and containing "this" (which must
 413   // also equal "cp->space").  "compact_top" is where in "this" the
 414   // next object should be forwarded to.  If there is room in "this" for
 415   // the object, insert an appropriate forwarding pointer in "q".
 416   // If not, go to the next compaction space (there must
 417   // be one, since compaction must succeed -- we go to the first space of
 418   // the previous generation if necessary, updating "cp"), reset compact_top
 419   // and then forward.  In either case, returns the new value of "compact_top".
 420   // If the forwarding crosses "cp->threshold", invokes the "cross_threshold"
 421   // function of the then-current compaction space, and updates "cp->threshold
 422   // accordingly".
 423   virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp,
 424                     HeapWord* compact_top);
 425 
 426   // Return a size with adjustments as required of the space.
 427   virtual size_t adjust_object_size_v(size_t size) const { return size; }
 428 















 429 protected:
 430   // Used during compaction.
 431   HeapWord* _first_dead;
 432   HeapWord* _end_of_live;
 433 
 434   // Minimum size of a free block.
 435   virtual size_t minimum_free_block_size() const { return 0; }
 436 
 437   // This the function is invoked when an allocation of an object covering
 438   // "start" to "end occurs crosses the threshold; returns the next
 439   // threshold.  (The default implementation does nothing.)
 440   virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* the_end) {
 441     return end();
 442   }
 443 
 444   // Requires "allowed_deadspace_words > 0", that "q" is the start of a
 445   // free block of the given "word_len", and that "q", were it an object,
 446   // would not move if forwarded.  If the size allows, fill the free
 447   // block with an object, to prevent excessive compaction.  Returns "true"
 448   // iff the free region was made deadspace, and modifies
 449   // "allowed_deadspace_words" to reflect the number of available deadspace
 450   // words remaining after this operation.
 451   bool insert_deadspace(size_t& allowed_deadspace_words, HeapWord* q,
 452                         size_t word_len);



















 453 };
 454 
 455 class GenSpaceMangler;
 456 
 457 // A space in which the free area is contiguous.  It therefore supports
 458 // faster allocation, and compaction.
 459 class ContiguousSpace: public CompactibleSpace {
 460   friend class OneContigSpaceCardGeneration;
 461   friend class VMStructs;
 462  protected:
 463   HeapWord* _top;
 464   HeapWord* _concurrent_iteration_safe_limit;
 465   // A helper for mangling the unused area of the space in debug builds.
 466   GenSpaceMangler* _mangler;
 467 
 468   GenSpaceMangler* mangler() { return _mangler; }
 469 
 470   // Allocation helpers (return NULL if full).
 471   inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value);
 472   inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value);


 607   HeapWord** end_addr() { return &_end; }
 608 
 609   // Overrides for more efficient compaction support.
 610   void prepare_for_compaction(CompactPoint* cp);
 611 
 612   // PrintHeapAtGC support.
 613   virtual void print_on(outputStream* st) const;
 614 
 615   // Checked dynamic downcasts.
 616   virtual ContiguousSpace* toContiguousSpace() {
 617     return this;
 618   }
 619 
 620   // Debugging
 621   virtual void verify() const;
 622 
 623   // Used to increase collection frequency.  "factor" of 0 means entire
 624   // space.
 625   void allocate_temporary_filler(int factor);
 626 












 627 };
 628 
 629 
 630 // A dirty card to oop closure that does filtering.
 631 // It knows how to filter out objects that are outside of the _boundary.
 632 class Filtering_DCTOC : public DirtyCardToOopClosure {
 633 protected:
 634   // Override.
 635   void walk_mem_region(MemRegion mr,
 636                        HeapWord* bottom, HeapWord* top);
 637 
 638   // Walk the given memory region, from bottom to top, applying
 639   // the given oop closure to (possibly) all objects found. The
 640   // given oop closure may or may not be the same as the oop
 641   // closure with which this closure was created, as it may
 642   // be a filtering closure which makes use of the _boundary.
 643   // We offer two signatures, so the FilteringClosure static type is
 644   // apparent.
 645   virtual void walk_mem_region_with_cl(MemRegion mr,
 646                                        HeapWord* bottom, HeapWord* top,




  29 #include "memory/blockOffsetTable.hpp"
  30 #include "memory/cardTableModRefBS.hpp"
  31 #include "memory/iterator.hpp"
  32 #include "memory/memRegion.hpp"
  33 #include "memory/watermark.hpp"
  34 #include "oops/markOop.hpp"
  35 #include "runtime/mutexLocker.hpp"
  36 #include "utilities/macros.hpp"
  37 #include "utilities/workgroup.hpp"
  38 
  39 // A space is an abstraction for the "storage units" backing
  40 // up the generation abstraction. It includes specific
  41 // implementations for keeping track of free and used space,
  42 // for iterating over objects and free blocks, etc.
  43 
  44 // Here's the Space hierarchy:
  45 //
  46 // - Space               -- an abstract base class describing a heap area
  47 //   - CompactibleSpace  -- a space supporting compaction
  48 //     - CompactibleFreeListSpace -- (used for CMS generation)
  49 //     - G1OffsetTableContigSpace -- G1 version of OffsetTableContigSpace
  50 //     - ContiguousSpace -- a compactible space in which all free space
  51 //                          is contiguous
  52 //       - EdenSpace     -- contiguous space used as nursery
  53 //         - ConcEdenSpace -- contiguous space with a 'soft end safe' allocation
  54 //       - OffsetTableContigSpace -- contiguous space with a block offset array
  55 //                          that allows "fast" block_start calls
  56 //         - TenuredSpace -- (used for TenuredGeneration)
  57 
  58 // Forward decls.
  59 class Space;
  60 class BlockOffsetArray;
  61 class BlockOffsetArrayContigSpace;
  62 class Generation;
  63 class CompactibleSpace;
  64 class BlockOffsetTable;
  65 class GenRemSet;
  66 class CardTableRS;
  67 class DirtyCardToOopClosure;
  68 
  69 // A Space describes a heap area. Class Space is an abstract


 375   // space into which to compact.
 376 
 377   virtual CompactibleSpace* next_compaction_space() const {
 378     return _next_compaction_space;
 379   }
 380 
 381   void set_next_compaction_space(CompactibleSpace* csp) {
 382     _next_compaction_space = csp;
 383   }
 384 
 385   // MarkSweep support phase2
 386 
 387   // Start the process of compaction of the current space: compute
 388   // post-compaction addresses, and insert forwarding pointers.  The fields
 389   // "cp->gen" and "cp->compaction_space" are the generation and space into
 390   // which we are currently compacting.  This call updates "cp" as necessary,
 391   // and leaves the "compaction_top" of the final value of
 392   // "cp->compaction_space" up-to-date.  Offset tables may be updated in
 393   // this phase as if the final copy had occurred; if so, "cp->threshold"
 394   // indicates when the next such action should be taken.
 395   virtual void prepare_for_compaction(CompactPoint* cp) = 0;
 396   // MarkSweep support phase3
 397   virtual void adjust_pointers();
 398   // MarkSweep support phase4
 399   virtual void compact();
 400 
 401   // The maximum percentage of objects that can be dead in the compacted
 402   // live part of a compacted space ("deadwood" support.)
 403   virtual size_t allowed_dead_ratio() const { return 0; };
 404 
 405   // Some contiguous spaces may maintain some data structures that should
 406   // be updated whenever an allocation crosses a boundary.  This function
 407   // returns the first such boundary.
 408   // (The default implementation returns the end of the space, so the
 409   // boundary is never crossed.)
 410   virtual HeapWord* initialize_threshold() { return end(); }
 411 
 412   // "q" is an object of the given "size" that should be forwarded;
 413   // "cp" names the generation ("gen") and containing "this" (which must
 414   // also equal "cp->space").  "compact_top" is where in "this" the
 415   // next object should be forwarded to.  If there is room in "this" for
 416   // the object, insert an appropriate forwarding pointer in "q".
 417   // If not, go to the next compaction space (there must
 418   // be one, since compaction must succeed -- we go to the first space of
 419   // the previous generation if necessary, updating "cp"), reset compact_top
 420   // and then forward.  In either case, returns the new value of "compact_top".
 421   // If the forwarding crosses "cp->threshold", invokes the "cross_threshold"
 422   // function of the then-current compaction space, and updates "cp->threshold
 423   // accordingly".
 424   virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp,
 425                     HeapWord* compact_top);
 426 
 427   // Return a size with adjustments as required of the space.
 428   virtual size_t adjust_object_size_v(size_t size) const { return size; }
 429 
 430   // Functions for scan_and_{forward,adjust_pointers,compact} support.
 431   inline bool scanned_block_is_obj(const HeapWord* addr) const {
 432     // Perform virtual call. This is currently not a problem since this
 433     // function is only used in an assert (from scan_and_adjust_pointers).
 434     return block_is_obj(addr);
 435   }
 436 
 437   inline size_t adjust_obj_size(size_t size) const {
 438     return size;
 439   }
 440 
 441   inline size_t obj_size(const HeapWord* addr) const {
 442     return oop(addr)->size();
 443   }
 444 
 445 protected:
 446   // Used during compaction.
 447   HeapWord* _first_dead;
 448   HeapWord* _end_of_live;
 449 
 450   // Minimum size of a free block.
 451   virtual size_t minimum_free_block_size() const { return 0; }
 452 
 453   // This the function is invoked when an allocation of an object covering
 454   // "start" to "end occurs crosses the threshold; returns the next
 455   // threshold.  (The default implementation does nothing.)
 456   virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* the_end) {
 457     return end();
 458   }
 459 
 460   // Requires "allowed_deadspace_words > 0", that "q" is the start of a
 461   // free block of the given "word_len", and that "q", were it an object,
 462   // would not move if forwarded.  If the size allows, fill the free
 463   // block with an object, to prevent excessive compaction.  Returns "true"
 464   // iff the free region was made deadspace, and modifies
 465   // "allowed_deadspace_words" to reflect the number of available deadspace
 466   // words remaining after this operation.
 467   bool insert_deadspace(size_t& allowed_deadspace_words, HeapWord* q,
 468                         size_t word_len);
 469 
 470   // Below are template functions for scan_and_* algorithms (avoiding virtual calls).
 471   // The space argument should be a subclass of CompactibleSpace, implementing
 472   // scan_limit(), scanned_block_is_obj(), and scanned_block_size(),
 473   // and possibly also overriding obj_size(), and adjust_obj_size().
 474   // These functions should avoid virtual calls whenever possible.
 475 
 476   // Frequently calls adjust_obj_size(). (Asserts on scanned_block_is_obj().)
 477   template <class SpaceType>
 478   static inline void scan_and_adjust_pointers(SpaceType* space);
 479 
 480   // Frequently calls obj_size().
 481   template <class SpaceType>
 482   static inline void scan_and_compact(SpaceType* space);
 483 
 484   // Frequently calls scanned_block_is_obj() and scanned_block_size().
 485   // Requires the scan_limit() function.
 486   template <class SpaceType>
 487   static inline void scan_and_forward(SpaceType* space, CompactPoint* cp);
 488 };
 489 
 490 class GenSpaceMangler;
 491 
 492 // A space in which the free area is contiguous.  It therefore supports
 493 // faster allocation, and compaction.
 494 class ContiguousSpace: public CompactibleSpace {
 495   friend class OneContigSpaceCardGeneration;
 496   friend class VMStructs;
 497  protected:
 498   HeapWord* _top;
 499   HeapWord* _concurrent_iteration_safe_limit;
 500   // A helper for mangling the unused area of the space in debug builds.
 501   GenSpaceMangler* _mangler;
 502 
 503   GenSpaceMangler* mangler() { return _mangler; }
 504 
 505   // Allocation helpers (return NULL if full).
 506   inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value);
 507   inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value);


 642   HeapWord** end_addr() { return &_end; }
 643 
 644   // Overrides for more efficient compaction support.
 645   void prepare_for_compaction(CompactPoint* cp);
 646 
 647   // PrintHeapAtGC support.
 648   virtual void print_on(outputStream* st) const;
 649 
 650   // Checked dynamic downcasts.
 651   virtual ContiguousSpace* toContiguousSpace() {
 652     return this;
 653   }
 654 
 655   // Debugging
 656   virtual void verify() const;
 657 
 658   // Used to increase collection frequency.  "factor" of 0 means entire
 659   // space.
 660   void allocate_temporary_filler(int factor);
 661 
 662   // Functions for scan_and_{forward,adjust_pointers,compact} support.
 663   inline HeapWord* scan_limit() const {
 664     return top();
 665   }
 666 
 667   inline bool scanned_block_is_obj(const HeapWord* addr) const {
 668     return true; // Always true, since scan_limit is top
 669   }
 670 
 671   inline size_t scanned_block_size(const HeapWord* addr) const {
 672     return oop(addr)->size();
 673   }
 674 };
 675 
 676 
 677 // A dirty card to oop closure that does filtering.
 678 // It knows how to filter out objects that are outside of the _boundary.
 679 class Filtering_DCTOC : public DirtyCardToOopClosure {
 680 protected:
 681   // Override.
 682   void walk_mem_region(MemRegion mr,
 683                        HeapWord* bottom, HeapWord* top);
 684 
 685   // Walk the given memory region, from bottom to top, applying
 686   // the given oop closure to (possibly) all objects found. The
 687   // given oop closure may or may not be the same as the oop
 688   // closure with which this closure was created, as it may
 689   // be a filtering closure which makes use of the _boundary.
 690   // We offer two signatures, so the FilteringClosure static type is
 691   // apparent.
 692   virtual void walk_mem_region_with_cl(MemRegion mr,
 693                                        HeapWord* bottom, HeapWord* top,