< prev index next >

src/share/vm/gc/shared/space.hpp

Print this page
rev 8961 : [mq]: diff-shenandoah.patch


 350 //
 351 // Additionally, this also means that changes to block_size() or block_is_obj() that
 352 // should be effective during the compaction operations must provide a corresponding
 353 // definition of scanned_block_size/scanned_block_is_obj respectively.
 354 class CompactibleSpace: public Space {
 355   friend class VMStructs;
 356   friend class CompactibleFreeListSpace;
 357 private:
 358   HeapWord* _compaction_top;
 359   CompactibleSpace* _next_compaction_space;
 360 
 361   // Auxiliary functions for scan_and_{forward,adjust_pointers,compact} support.
 362   inline size_t adjust_obj_size(size_t size) const {
 363     return size;
 364   }
 365 
 366   inline size_t obj_size(const HeapWord* addr) const {
 367     return oop(addr)->size();
 368   }
 369 




 370 public:
 371   CompactibleSpace() :
 372    _compaction_top(NULL), _next_compaction_space(NULL) {}
 373 
 374   virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
 375   virtual void clear(bool mangle_space);
 376 
 377   // Used temporarily during a compaction phase to hold the value
 378   // top should have when compaction is complete.
 379   HeapWord* compaction_top() const { return _compaction_top;    }
 380 
 381   void set_compaction_top(HeapWord* value) {
 382     assert(value == NULL || (value >= bottom() && value <= end()),
 383       "should point inside space");
 384     _compaction_top = value;
 385   }
 386 
 387   // Perform operations on the space needed after a compaction
 388   // has been performed.
 389   virtual void reset_after_compaction() = 0;


 425   // returns the first such boundary.
 426   // (The default implementation returns the end of the space, so the
 427   // boundary is never crossed.)
 428   virtual HeapWord* initialize_threshold() { return end(); }
 429 
 430   // "q" is an object of the given "size" that should be forwarded;
 431   // "cp" names the generation ("gen") and containing "this" (which must
 432   // also equal "cp->space").  "compact_top" is where in "this" the
 433   // next object should be forwarded to.  If there is room in "this" for
 434   // the object, insert an appropriate forwarding pointer in "q".
 435   // If not, go to the next compaction space (there must
 436   // be one, since compaction must succeed -- we go to the first space of
 437   // the previous generation if necessary, updating "cp"), reset compact_top
 438   // and then forward.  In either case, returns the new value of "compact_top".
 439   // If the forwarding crosses "cp->threshold", invokes the "cross_threshold"
 440   // function of the then-current compaction space, and updates "cp->threshold
 441   // accordingly".
 442   virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp,
 443                     HeapWord* compact_top);
 444 




 445   // Return a size with adjustments as required of the space.
 446   virtual size_t adjust_object_size_v(size_t size) const { return size; }
 447 
 448 protected:
 449   // Used during compaction.
 450   HeapWord* _first_dead;
 451   HeapWord* _end_of_live;
 452 
 453   // Minimum size of a free block.
 454   virtual size_t minimum_free_block_size() const { return 0; }
 455 
 456   // This the function is invoked when an allocation of an object covering
 457   // "start" to "end occurs crosses the threshold; returns the next
 458   // threshold.  (The default implementation does nothing.)
 459   virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* the_end) {
 460     return end();
 461   }
 462 
 463   // Requires "allowed_deadspace_words > 0", that "q" is the start of a
 464   // free block of the given "word_len", and that "q", were it an object,


 567   void check_mangled_unused_area_complete() PRODUCT_RETURN;
 568 
 569   // Size computations: sizes in bytes.
 570   size_t capacity() const        { return byte_size(bottom(), end()); }
 571   size_t used() const            { return byte_size(bottom(), top()); }
 572   size_t free() const            { return byte_size(top(),    end()); }
 573 
 574   virtual bool is_free_block(const HeapWord* p) const;
 575 
 576   // In a contiguous space we have a more obvious bound on what parts
 577   // contain objects.
 578   MemRegion used_region() const { return MemRegion(bottom(), top()); }
 579 
 580   // Allocation (return NULL if full)
 581   virtual HeapWord* allocate(size_t word_size);
 582   virtual HeapWord* par_allocate(size_t word_size);
 583   HeapWord* allocate_aligned(size_t word_size);
 584 
 585   // Iteration
 586   void oop_iterate(ExtendedOopClosure* cl);
 587   void object_iterate(ObjectClosure* blk);
 588   // For contiguous spaces this method will iterate safely over objects
 589   // in the space (i.e., between bottom and top) when at a safepoint.
 590   void safe_object_iterate(ObjectClosure* blk);
 591 
 592   // Iterate over as many initialized objects in the space as possible,
 593   // calling "cl.do_object_careful" on each. Return NULL if all objects
 594   // in the space (at the start of the iteration) were iterated over.
 595   // Return an address indicating the extent of the iteration in the
 596   // event that the iteration had to return because of finding an
 597   // uninitialized object in the space, or if the closure "cl"
 598   // signaled early termination.
 599   HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
 600   HeapWord* concurrent_iteration_safe_limit() {
 601     assert(_concurrent_iteration_safe_limit <= top(),
 602            "_concurrent_iteration_safe_limit update missed");
 603     return _concurrent_iteration_safe_limit;
 604   }
 605   // changes the safe limit, all objects from bottom() to the new
 606   // limit should be properly initialized
 607   void set_concurrent_iteration_safe_limit(HeapWord* new_limit) {




 350 //
 351 // Additionally, this also means that changes to block_size() or block_is_obj() that
 352 // should be effective during the compaction operations must provide a corresponding
 353 // definition of scanned_block_size/scanned_block_is_obj respectively.
 354 class CompactibleSpace: public Space {
 355   friend class VMStructs;
 356   friend class CompactibleFreeListSpace;
 357 private:
 358   HeapWord* _compaction_top;
 359   CompactibleSpace* _next_compaction_space;
 360 
 361   // Auxiliary functions for scan_and_{forward,adjust_pointers,compact} support.
 362   inline size_t adjust_obj_size(size_t size) const {
 363     return size;
 364   }
 365 
 366   inline size_t obj_size(const HeapWord* addr) const {
 367     return oop(addr)->size();
 368   }
 369 
 370   inline oop make_oop(HeapWord* addr) const {
 371     return oop(addr);
 372   }
 373 
 374 public:
 375   CompactibleSpace() :
 376    _compaction_top(NULL), _next_compaction_space(NULL) {}
 377 
 378   virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
 379   virtual void clear(bool mangle_space);
 380 
 381   // Used temporarily during a compaction phase to hold the value
 382   // top should have when compaction is complete.
 383   HeapWord* compaction_top() const { return _compaction_top;    }
 384 
 385   void set_compaction_top(HeapWord* value) {
 386     assert(value == NULL || (value >= bottom() && value <= end()),
 387       "should point inside space");
 388     _compaction_top = value;
 389   }
 390 
 391   // Perform operations on the space needed after a compaction
 392   // has been performed.
 393   virtual void reset_after_compaction() = 0;


 429   // returns the first such boundary.
 430   // (The default implementation returns the end of the space, so the
 431   // boundary is never crossed.)
 432   virtual HeapWord* initialize_threshold() { return end(); }
 433 
 434   // "q" is an object of the given "size" that should be forwarded;
 435   // "cp" names the generation ("gen") and containing "this" (which must
 436   // also equal "cp->space").  "compact_top" is where in "this" the
 437   // next object should be forwarded to.  If there is room in "this" for
 438   // the object, insert an appropriate forwarding pointer in "q".
 439   // If not, go to the next compaction space (there must
 440   // be one, since compaction must succeed -- we go to the first space of
 441   // the previous generation if necessary, updating "cp"), reset compact_top
 442   // and then forward.  In either case, returns the new value of "compact_top".
 443   // If the forwarding crosses "cp->threshold", invokes the "cross_threshold"
 444   // function of the then-current compaction space, and updates "cp->threshold
 445   // accordingly".
 446   virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp,
 447                     HeapWord* compact_top);
 448 
 449   virtual oop compact_oop(HeapWord* addr) const {
 450     return oop(addr);
 451   }
 452 
 453   // Return a size with adjustments as required of the space.
 454   virtual size_t adjust_object_size_v(size_t size) const { return size; }
 455 
 456 protected:
 457   // Used during compaction.
 458   HeapWord* _first_dead;
 459   HeapWord* _end_of_live;
 460 
 461   // Minimum size of a free block.
 462   virtual size_t minimum_free_block_size() const { return 0; }
 463 
 464   // This the function is invoked when an allocation of an object covering
 465   // "start" to "end occurs crosses the threshold; returns the next
 466   // threshold.  (The default implementation does nothing.)
 467   virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* the_end) {
 468     return end();
 469   }
 470 
 471   // Requires "allowed_deadspace_words > 0", that "q" is the start of a
 472   // free block of the given "word_len", and that "q", were it an object,


 575   void check_mangled_unused_area_complete() PRODUCT_RETURN;
 576 
 577   // Size computations: sizes in bytes.
 578   size_t capacity() const        { return byte_size(bottom(), end()); }
 579   size_t used() const            { return byte_size(bottom(), top()); }
 580   size_t free() const            { return byte_size(top(),    end()); }
 581 
 582   virtual bool is_free_block(const HeapWord* p) const;
 583 
 584   // In a contiguous space we have a more obvious bound on what parts
 585   // contain objects.
 586   MemRegion used_region() const { return MemRegion(bottom(), top()); }
 587 
 588   // Allocation (return NULL if full)
 589   virtual HeapWord* allocate(size_t word_size);
 590   virtual HeapWord* par_allocate(size_t word_size);
 591   HeapWord* allocate_aligned(size_t word_size);
 592 
 593   // Iteration
 594   void oop_iterate(ExtendedOopClosure* cl);
 595   virtual void object_iterate(ObjectClosure* blk);
 596   // For contiguous spaces this method will iterate safely over objects
 597   // in the space (i.e., between bottom and top) when at a safepoint.
 598   void safe_object_iterate(ObjectClosure* blk);
 599 
 600   // Iterate over as many initialized objects in the space as possible,
 601   // calling "cl.do_object_careful" on each. Return NULL if all objects
 602   // in the space (at the start of the iteration) were iterated over.
 603   // Return an address indicating the extent of the iteration in the
 604   // event that the iteration had to return because of finding an
 605   // uninitialized object in the space, or if the closure "cl"
 606   // signaled early termination.
 607   HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
 608   HeapWord* concurrent_iteration_safe_limit() {
 609     assert(_concurrent_iteration_safe_limit <= top(),
 610            "_concurrent_iteration_safe_limit update missed");
 611     return _concurrent_iteration_safe_limit;
 612   }
 613   // changes the safe limit, all objects from bottom() to the new
 614   // limit should be properly initialized
 615   void set_concurrent_iteration_safe_limit(HeapWord* new_limit) {


< prev index next >