< prev index next >

src/hotspot/share/gc/shared/space.hpp

Print this page




 203   // Requires "addr" to be the start of a chunk, and returns its size.
 204   // "addr + size" is required to be the start of a new chunk, or the end
 205   // of the active area of the heap.
 206   virtual size_t block_size(const HeapWord* addr) const = 0;
 207 
 208   // Requires "addr" to be the start of a block, and returns "TRUE" iff
 209   // the block is an object.
 210   virtual bool block_is_obj(const HeapWord* addr) const = 0;
 211 
 212   // Requires "addr" to be the start of a block, and returns "TRUE" iff
 213   // the block is an object and the object is alive.
 214   virtual bool obj_is_alive(const HeapWord* addr) const;
 215 
 216   // Allocation (return NULL if full).  Assumes the caller has established
 217   // mutually exclusive access to the space.
 218   virtual HeapWord* allocate(size_t word_size) = 0;
 219 
 220   // Allocation (return NULL if full).  Enforces mutual exclusion internally.
 221   virtual HeapWord* par_allocate(size_t word_size) = 0;
 222 

 223   // Mark-sweep-compact support: all spaces can update pointers to objects
 224   // moving as a part of compaction.
 225   virtual void adjust_pointers() = 0;

 226 
 227   virtual void print() const;
 228   virtual void print_on(outputStream* st) const;
 229   virtual void print_short() const;
 230   virtual void print_short_on(outputStream* st) const;
 231 
 232 
 233   // Accessor for parallel sequential tasks.
 234   SequentialSubTasksDone* par_seq_tasks() { return &_par_seq_tasks; }
 235 
 236   // IF "this" is a ContiguousSpace, return it, else return NULL.
 237   virtual ContiguousSpace* toContiguousSpace() {
 238     return NULL;
 239   }
 240 
 241   // Debugging
 242   virtual void verify() const = 0;
 243 };
 244 
 245 // A MemRegionClosure (ResourceObj) whose "do_MemRegion" function applies an


 388       "should point inside space");
 389     _compaction_top = value;
 390   }
 391 
 392   // Perform operations on the space needed after a compaction
 393   // has been performed.
 394   virtual void reset_after_compaction() = 0;
 395 
 396   // Returns the next space (in the current generation) to be compacted in
 397   // the global compaction order.  Also is used to select the next
 398   // space into which to compact.
 399 
 400   virtual CompactibleSpace* next_compaction_space() const {
 401     return _next_compaction_space;
 402   }
 403 
 404   void set_next_compaction_space(CompactibleSpace* csp) {
 405     _next_compaction_space = csp;
 406   }
 407 

 408   // MarkSweep support phase2
 409 
 410   // Start the process of compaction of the current space: compute
 411   // post-compaction addresses, and insert forwarding pointers.  The fields
 412   // "cp->gen" and "cp->compaction_space" are the generation and space into
 413   // which we are currently compacting.  This call updates "cp" as necessary,
 414   // and leaves the "compaction_top" of the final value of
 415   // "cp->compaction_space" up-to-date.  Offset tables may be updated in
 416   // this phase as if the final copy had occurred; if so, "cp->threshold"
 417   // indicates when the next such action should be taken.
 418   virtual void prepare_for_compaction(CompactPoint* cp) = 0;
 419   // MarkSweep support phase3
 420   virtual void adjust_pointers();
 421   // MarkSweep support phase4
 422   virtual void compact();

 423 
 424   // The maximum percentage of objects that can be dead in the compacted
 425   // live part of a compacted space ("deadwood" support.)
 426   virtual size_t allowed_dead_ratio() const { return 0; };
 427 
 428   // Some contiguous spaces may maintain some data structures that should
 429   // be updated whenever an allocation crosses a boundary.  This function
 430   // returns the first such boundary.
 431   // (The default implementation returns the end of the space, so the
 432   // boundary is never crossed.)
 433   virtual HeapWord* initialize_threshold() { return end(); }
 434 
 435   // "q" is an object of the given "size" that should be forwarded;
 436   // "cp" names the generation ("gen") and containing "this" (which must
 437   // also equal "cp->space").  "compact_top" is where in "this" the
 438   // next object should be forwarded to.  If there is room in "this" for
 439   // the object, insert an appropriate forwarding pointer in "q".
 440   // If not, go to the next compaction space (there must
 441   // be one, since compaction must succeed -- we go to the first space of
 442   // the previous generation if necessary, updating "cp"), reset compact_top


 457   // Used during compaction.
 458   HeapWord* _first_dead;
 459   HeapWord* _end_of_live;
 460 
 461   // Minimum size of a free block.
 462   virtual size_t minimum_free_block_size() const { return 0; }
 463 
 464   // This the function is invoked when an allocation of an object covering
 465   // "start" to "end occurs crosses the threshold; returns the next
 466   // threshold.  (The default implementation does nothing.)
 467   virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* the_end) {
 468     return end();
 469   }
 470 
 471   // Below are template functions for scan_and_* algorithms (avoiding virtual calls).
 472   // The space argument should be a subclass of CompactibleSpace, implementing
 473   // scan_limit(), scanned_block_is_obj(), and scanned_block_size(),
 474   // and possibly also overriding obj_size(), and adjust_obj_size().
 475   // These functions should avoid virtual calls whenever possible.
 476 

 477   // Frequently calls adjust_obj_size().
 478   template <class SpaceType>
 479   static inline void scan_and_adjust_pointers(SpaceType* space);

 480 
 481   // Frequently calls obj_size().
 482   template <class SpaceType>
 483   static inline void scan_and_compact(SpaceType* space);
 484 
 485   // Frequently calls scanned_block_is_obj() and scanned_block_size().
 486   // Requires the scan_limit() function.
 487   template <class SpaceType>
 488   static inline void scan_and_forward(SpaceType* space, CompactPoint* cp);
 489 };
 490 
 491 class GenSpaceMangler;
 492 
 493 // A space in which the free area is contiguous.  It therefore supports
 494 // faster allocation, and compaction.
 495 class ContiguousSpace: public CompactibleSpace {
 496   friend class VMStructs;
 497   // Allow scan_and_forward function to call (private) overrides for auxiliary functions on this class
 498   template <typename SpaceType>
 499   friend void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp);


 586   // calling "cl.do_object_careful" on each. Return NULL if all objects
 587   // in the space (at the start of the iteration) were iterated over.
 588   // Return an address indicating the extent of the iteration in the
 589   // event that the iteration had to return because of finding an
 590   // uninitialized object in the space, or if the closure "cl"
 591   // signaled early termination.
 592   HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
 593   HeapWord* concurrent_iteration_safe_limit() {
 594     assert(_concurrent_iteration_safe_limit <= top(),
 595            "_concurrent_iteration_safe_limit update missed");
 596     return _concurrent_iteration_safe_limit;
 597   }
 598   // changes the safe limit, all objects from bottom() to the new
 599   // limit should be properly initialized
 600   void set_concurrent_iteration_safe_limit(HeapWord* new_limit) {
 601     assert(new_limit <= top(), "uninitialized objects in the safe range");
 602     _concurrent_iteration_safe_limit = new_limit;
 603   }
 604 
 605 
 606 #if INCLUDE_ALL_GCS
 607   // In support of parallel oop_iterate.
 608   #define ContigSpace_PAR_OOP_ITERATE_DECL(OopClosureType, nv_suffix)  \
 609     void par_oop_iterate(MemRegion mr, OopClosureType* blk);
 610 
 611     ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DECL)
 612   #undef ContigSpace_PAR_OOP_ITERATE_DECL
 613 #endif // INCLUDE_ALL_GCS
 614 
 615   // Compaction support
 616   virtual void reset_after_compaction() {
 617     assert(compaction_top() >= bottom() && compaction_top() <= end(), "should point inside space");
 618     set_top(compaction_top());
 619     // set new iteration safe limit
 620     set_concurrent_iteration_safe_limit(compaction_top());
 621   }
 622 
 623   // Override.
 624   DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
 625                                      CardTable::PrecisionStyle precision,
 626                                      HeapWord* boundary,
 627                                      bool parallel);
 628 
 629   // Apply "blk->do_oop" to the addresses of all reference fields in objects
 630   // starting with the _saved_mark_word, which was noted during a generation's
 631   // save_marks and is required to denote the head of an object.
 632   // Fields in objects allocated by applications of the closure
 633   // *are* included in the iteration.


 637   void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk);
 638 
 639   ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DECL)
 640 #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DECL
 641 
 642   // Same as object_iterate, but starting from "mark", which is required
 643   // to denote the start of an object.  Objects allocated by
 644   // applications of the closure *are* included in the iteration.
 645   virtual void object_iterate_from(HeapWord* mark, ObjectClosure* blk);
 646 
 647   // Very inefficient implementation.
 648   virtual HeapWord* block_start_const(const void* p) const;
 649   size_t block_size(const HeapWord* p) const;
 650   // If a block is in the allocated area, it is an object.
 651   bool block_is_obj(const HeapWord* p) const { return p < top(); }
 652 
 653   // Addresses for inlined allocation
 654   HeapWord** top_addr() { return &_top; }
 655   HeapWord** end_addr() { return &_end; }
 656 

 657   // Overrides for more efficient compaction support.
 658   void prepare_for_compaction(CompactPoint* cp);

 659 
 660   virtual void print_on(outputStream* st) const;
 661 
 662   // Checked dynamic downcasts.
 663   virtual ContiguousSpace* toContiguousSpace() {
 664     return this;
 665   }
 666 
 667   // Debugging
 668   virtual void verify() const;
 669 
 670   // Used to increase collection frequency.  "factor" of 0 means entire
 671   // space.
 672   void allocate_temporary_filler(int factor);
 673 };
 674 
 675 
 676 // A dirty card to oop closure that does filtering.
 677 // It knows how to filter out objects that are outside of the _boundary.
 678 class FilteringDCTOC : public DirtyCardToOopClosure {




 203   // Requires "addr" to be the start of a chunk, and returns its size.
 204   // "addr + size" is required to be the start of a new chunk, or the end
 205   // of the active area of the heap.
 206   virtual size_t block_size(const HeapWord* addr) const = 0;
 207 
 208   // Requires "addr" to be the start of a block, and returns "TRUE" iff
 209   // the block is an object.
 210   virtual bool block_is_obj(const HeapWord* addr) const = 0;
 211 
 212   // Requires "addr" to be the start of a block, and returns "TRUE" iff
 213   // the block is an object and the object is alive.
 214   virtual bool obj_is_alive(const HeapWord* addr) const;
 215 
 216   // Allocation (return NULL if full).  Assumes the caller has established
 217   // mutually exclusive access to the space.
 218   virtual HeapWord* allocate(size_t word_size) = 0;
 219 
 220   // Allocation (return NULL if full).  Enforces mutual exclusion internally.
 221   virtual HeapWord* par_allocate(size_t word_size) = 0;
 222 
 223 #if INCLUDE_SERIALGC
 224   // Mark-sweep-compact support: all spaces can update pointers to objects
 225   // moving as a part of compaction.
 226   virtual void adjust_pointers() = 0;
 227 #endif
 228 
 229   virtual void print() const;
 230   virtual void print_on(outputStream* st) const;
 231   virtual void print_short() const;
 232   virtual void print_short_on(outputStream* st) const;
 233 
 234 
 235   // Accessor for parallel sequential tasks.
 236   SequentialSubTasksDone* par_seq_tasks() { return &_par_seq_tasks; }
 237 
 238   // IF "this" is a ContiguousSpace, return it, else return NULL.
 239   virtual ContiguousSpace* toContiguousSpace() {
 240     return NULL;
 241   }
 242 
 243   // Debugging
 244   virtual void verify() const = 0;
 245 };
 246 
 247 // A MemRegionClosure (ResourceObj) whose "do_MemRegion" function applies an


 390       "should point inside space");
 391     _compaction_top = value;
 392   }
 393 
 394   // Perform operations on the space needed after a compaction
 395   // has been performed.
 396   virtual void reset_after_compaction() = 0;
 397 
 398   // Returns the next space (in the current generation) to be compacted in
 399   // the global compaction order.  Also is used to select the next
 400   // space into which to compact.
 401 
 402   virtual CompactibleSpace* next_compaction_space() const {
 403     return _next_compaction_space;
 404   }
 405 
 406   void set_next_compaction_space(CompactibleSpace* csp) {
 407     _next_compaction_space = csp;
 408   }
 409 
 410 #if INCLUDE_SERIALGC
 411   // MarkSweep support phase2
 412 
 413   // Start the process of compaction of the current space: compute
 414   // post-compaction addresses, and insert forwarding pointers.  The fields
 415   // "cp->gen" and "cp->compaction_space" are the generation and space into
 416   // which we are currently compacting.  This call updates "cp" as necessary,
 417   // and leaves the "compaction_top" of the final value of
 418   // "cp->compaction_space" up-to-date.  Offset tables may be updated in
 419   // this phase as if the final copy had occurred; if so, "cp->threshold"
 420   // indicates when the next such action should be taken.
 421   virtual void prepare_for_compaction(CompactPoint* cp) = 0;
 422   // MarkSweep support phase3
 423   virtual void adjust_pointers();
 424   // MarkSweep support phase4
 425   virtual void compact();
 426 #endif // INCLUDE_SERIALGC
 427 
 428   // The maximum percentage of objects that can be dead in the compacted
 429   // live part of a compacted space ("deadwood" support.)
 430   virtual size_t allowed_dead_ratio() const { return 0; };
 431 
 432   // Some contiguous spaces may maintain some data structures that should
 433   // be updated whenever an allocation crosses a boundary.  This function
 434   // returns the first such boundary.
 435   // (The default implementation returns the end of the space, so the
 436   // boundary is never crossed.)
 437   virtual HeapWord* initialize_threshold() { return end(); }
 438 
 439   // "q" is an object of the given "size" that should be forwarded;
 440   // "cp" names the generation ("gen") and containing "this" (which must
 441   // also equal "cp->space").  "compact_top" is where in "this" the
 442   // next object should be forwarded to.  If there is room in "this" for
 443   // the object, insert an appropriate forwarding pointer in "q".
 444   // If not, go to the next compaction space (there must
 445   // be one, since compaction must succeed -- we go to the first space of
 446   // the previous generation if necessary, updating "cp"), reset compact_top


 461   // Used during compaction.
 462   HeapWord* _first_dead;
 463   HeapWord* _end_of_live;
 464 
 465   // Minimum size of a free block.
 466   virtual size_t minimum_free_block_size() const { return 0; }
 467 
 468   // This the function is invoked when an allocation of an object covering
 469   // "start" to "end occurs crosses the threshold; returns the next
 470   // threshold.  (The default implementation does nothing.)
 471   virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* the_end) {
 472     return end();
 473   }
 474 
 475   // Below are template functions for scan_and_* algorithms (avoiding virtual calls).
 476   // The space argument should be a subclass of CompactibleSpace, implementing
 477   // scan_limit(), scanned_block_is_obj(), and scanned_block_size(),
 478   // and possibly also overriding obj_size(), and adjust_obj_size().
 479   // These functions should avoid virtual calls whenever possible.
 480 
 481 #if INCLUDE_SERIALGC
 482   // Frequently calls adjust_obj_size().
 483   template <class SpaceType>
 484   static inline void scan_and_adjust_pointers(SpaceType* space);
 485 #endif
 486 
 487   // Frequently calls obj_size().
 488   template <class SpaceType>
 489   static inline void scan_and_compact(SpaceType* space);
 490 
 491   // Frequently calls scanned_block_is_obj() and scanned_block_size().
 492   // Requires the scan_limit() function.
 493   template <class SpaceType>
 494   static inline void scan_and_forward(SpaceType* space, CompactPoint* cp);
 495 };
 496 
 497 class GenSpaceMangler;
 498 
 499 // A space in which the free area is contiguous.  It therefore supports
 500 // faster allocation, and compaction.
 501 class ContiguousSpace: public CompactibleSpace {
 502   friend class VMStructs;
 503   // Allow scan_and_forward function to call (private) overrides for auxiliary functions on this class
 504   template <typename SpaceType>
 505   friend void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp);


 592   // calling "cl.do_object_careful" on each. Return NULL if all objects
 593   // in the space (at the start of the iteration) were iterated over.
 594   // Return an address indicating the extent of the iteration in the
 595   // event that the iteration had to return because of finding an
 596   // uninitialized object in the space, or if the closure "cl"
 597   // signaled early termination.
 598   HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
 599   HeapWord* concurrent_iteration_safe_limit() {
 600     assert(_concurrent_iteration_safe_limit <= top(),
 601            "_concurrent_iteration_safe_limit update missed");
 602     return _concurrent_iteration_safe_limit;
 603   }
 604   // changes the safe limit, all objects from bottom() to the new
 605   // limit should be properly initialized
 606   void set_concurrent_iteration_safe_limit(HeapWord* new_limit) {
 607     assert(new_limit <= top(), "uninitialized objects in the safe range");
 608     _concurrent_iteration_safe_limit = new_limit;
 609   }
 610 
 611 
 612 #if INCLUDE_CMSGC
 613   // In support of parallel oop_iterate.
 614   #define ContigSpace_PAR_OOP_ITERATE_DECL(OopClosureType, nv_suffix)  \
 615     void par_oop_iterate(MemRegion mr, OopClosureType* blk);
 616 
 617     ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DECL)
 618   #undef ContigSpace_PAR_OOP_ITERATE_DECL
 619 #endif // INCLUDE_CMSGC
 620 
 621   // Compaction support
 622   virtual void reset_after_compaction() {
 623     assert(compaction_top() >= bottom() && compaction_top() <= end(), "should point inside space");
 624     set_top(compaction_top());
 625     // set new iteration safe limit
 626     set_concurrent_iteration_safe_limit(compaction_top());
 627   }
 628 
 629   // Override.
 630   DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
 631                                      CardTable::PrecisionStyle precision,
 632                                      HeapWord* boundary,
 633                                      bool parallel);
 634 
 635   // Apply "blk->do_oop" to the addresses of all reference fields in objects
 636   // starting with the _saved_mark_word, which was noted during a generation's
 637   // save_marks and is required to denote the head of an object.
 638   // Fields in objects allocated by applications of the closure
 639   // *are* included in the iteration.


 643   void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk);
 644 
 645   ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DECL)
 646 #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DECL
 647 
 648   // Same as object_iterate, but starting from "mark", which is required
 649   // to denote the start of an object.  Objects allocated by
 650   // applications of the closure *are* included in the iteration.
 651   virtual void object_iterate_from(HeapWord* mark, ObjectClosure* blk);
 652 
 653   // Very inefficient implementation.
 654   virtual HeapWord* block_start_const(const void* p) const;
 655   size_t block_size(const HeapWord* p) const;
 656   // If a block is in the allocated area, it is an object.
 657   bool block_is_obj(const HeapWord* p) const { return p < top(); }
 658 
 659   // Addresses for inlined allocation
 660   HeapWord** top_addr() { return &_top; }
 661   HeapWord** end_addr() { return &_end; }
 662 
 663 #if INCLUDE_SERIALGC
 664   // Overrides for more efficient compaction support.
 665   void prepare_for_compaction(CompactPoint* cp);
 666 #endif
 667 
 668   virtual void print_on(outputStream* st) const;
 669 
 670   // Checked dynamic downcasts.
 671   virtual ContiguousSpace* toContiguousSpace() {
 672     return this;
 673   }
 674 
 675   // Debugging
 676   virtual void verify() const;
 677 
 678   // Used to increase collection frequency.  "factor" of 0 means entire
 679   // space.
 680   void allocate_temporary_filler(int factor);
 681 };
 682 
 683 
 684 // A dirty card to oop closure that does filtering.
 685 // It knows how to filter out objects that are outside of the _boundary.
 686 class FilteringDCTOC : public DirtyCardToOopClosure {


< prev index next >