< prev index next >

src/share/vm/gc/g1/heapRegion.hpp

Print this page




  26 #define SHARE_VM_GC_G1_HEAPREGION_HPP
  27 
  28 #include "gc/g1/g1AllocationContext.hpp"
  29 #include "gc/g1/g1BlockOffsetTable.hpp"
  30 #include "gc/g1/heapRegionType.hpp"
  31 #include "gc/g1/survRateGroup.hpp"
  32 #include "gc/shared/ageTable.hpp"
  33 #include "gc/shared/spaceDecorator.hpp"
  34 #include "utilities/macros.hpp"
  35 
  36 // A HeapRegion is the smallest piece of a G1CollectedHeap that
  37 // can be collected independently.
  38 
  39 // NOTE: Although a HeapRegion is a Space, its
  40 // Space::initDirtyCardClosure method must not be called.
  41 // The problem is that the existence of this method breaks
  42 // the independence of barrier sets from remembered sets.
  43 // The solution is to remove this method from the definition
  44 // of a Space.
  45 









  46 class G1CollectedHeap;
  47 class HeapRegionRemSet;
  48 class HeapRegionRemSetIterator;
  49 class HeapRegion;
  50 class HeapRegionSetBase;
  51 class nmethod;
  52 
  53 #define HR_FORMAT "%u:(%s)[" PTR_FORMAT "," PTR_FORMAT "," PTR_FORMAT "]"
  54 #define HR_FORMAT_PARAMS(_hr_) \
  55                 (_hr_)->hrm_index(), \
  56                 (_hr_)->get_short_type_str(), \
  57                 p2i((_hr_)->bottom()), p2i((_hr_)->top()), p2i((_hr_)->end())
  58 
  59 // sentinel value for hrm_index
  60 #define G1_NO_HRM_INDEX ((uint) -1)
  61 
  62 // A dirty card to oop closure for heap regions. It
  63 // knows how to get the G1 heap and how to use the bitmap
  64 // in the concurrent marker used by G1 to filter remembered
  65 // sets.


 372   uint hrm_index() const { return _hrm_index; }
 373 
 374   // The number of bytes marked live in the region in the last marking phase.
 375   size_t marked_bytes()    { return _prev_marked_bytes; }
 376   size_t live_bytes() {
 377     return (top() - prev_top_at_mark_start()) * HeapWordSize + marked_bytes();
 378   }
 379 
 380   // The number of bytes counted in the next marking.
 381   size_t next_marked_bytes() { return _next_marked_bytes; }
 382   // The number of bytes live wrt the next marking.
 383   size_t next_live_bytes() {
 384     return
 385       (top() - next_top_at_mark_start()) * HeapWordSize + next_marked_bytes();
 386   }
 387 
 388   // A lower bound on the amount of garbage bytes in the region.
 389   size_t garbage_bytes() {
 390     size_t used_at_mark_start_bytes =
 391       (prev_top_at_mark_start() - bottom()) * HeapWordSize;
 392     assert(used_at_mark_start_bytes >= marked_bytes(),
 393            "Can't mark more than we have.");
 394     return used_at_mark_start_bytes - marked_bytes();
 395   }
 396 
 397   // Return the amount of bytes we'll reclaim if we collect this
 398   // region. This includes not only the known garbage bytes in the
 399   // region but also any unallocated space in it, i.e., [top, end),
 400   // since it will also be reclaimed if we collect the region.
 401   size_t reclaimable_bytes() {
 402     size_t known_live_bytes = live_bytes();
 403     assert(known_live_bytes <= capacity(), "sanity");
 404     return capacity() - known_live_bytes;
 405   }
 406 
 407   // An upper bound on the number of live bytes in the region.
 408   size_t max_live_bytes() { return used() - garbage_bytes(); }
 409 
 410   void add_to_marked_bytes(size_t incr_bytes) {
 411     _next_marked_bytes = _next_marked_bytes + incr_bytes;
 412     assert(_next_marked_bytes <= used(), "invariant" );
 413   }
 414 
 415   void zero_marked_bytes()      {
 416     _prev_marked_bytes = _next_marked_bytes = 0;
 417   }
 418 
 419   const char* get_type_str() const { return _type.get_str(); }
 420   const char* get_short_type_str() const { return _type.get_short_str(); }
 421 
 422   bool is_free() const { return _type.is_free(); }
 423 
 424   bool is_young()    const { return _type.is_young();    }
 425   bool is_eden()     const { return _type.is_eden();     }
 426   bool is_survivor() const { return _type.is_survivor(); }
 427 
 428   bool is_humongous() const { return _type.is_humongous(); }
 429   bool is_starts_humongous() const { return _type.is_starts_humongous(); }
 430   bool is_continues_humongous() const { return _type.is_continues_humongous();   }
 431 
 432   bool is_old() const { return _type.is_old(); }
 433 
 434   // A pinned region contains objects which are not moved by garbage collections.
 435   // Humongous regions and archive regions are pinned.
 436   bool is_pinned() const { return _type.is_pinned(); }
 437 
 438   // An archive region is a pinned region, also tagged as old, which
 439   // should not be marked during mark/sweep. This allows the address
 440   // space to be shared by JVM instances.
 441   bool is_archive() const { return _type.is_archive(); }
 442 
 443   // For a humongous region, region in which it starts.
 444   HeapRegion* humongous_start_region() const {
 445     return _humongous_start_region;
 446   }
 447 
 448   // Return the number of distinct regions that are covered by this region:
 449   // 1 if the region is not humongous, >= 1 if the region is humongous.
 450   uint region_num() const {
 451     if (!is_humongous()) {
 452       return 1U;
 453     } else {
 454       assert(is_starts_humongous(), "doesn't make sense on HC regions");
 455       assert(capacity() % HeapRegion::GrainBytes == 0, "sanity");
 456       return (uint) (capacity() >> HeapRegion::LogOfHRGrainBytes);
 457     }
 458   }
 459 
 460   // Return the index + 1 of the last HC regions that's associated
 461   // with this HS region.
 462   uint last_hc_index() const {
 463     assert(is_starts_humongous(), "don't call this otherwise");
 464     return hrm_index() + region_num();
 465   }
 466 
 467   // Same as Space::is_in_reserved, but will use the original size of the region.
 468   // The original size is different only for start humongous regions. They get
 469   // their _end set up to be the end of the last continues region of the
 470   // corresponding humongous object.
 471   bool is_in_reserved_raw(const void* p) const {
 472     return _bottom <= p && p < orig_end();
 473   }
 474 
 475   // Makes the current region be a "starts humongous" region, i.e.,
 476   // the first region in a series of one or more contiguous regions
 477   // that will contain a single "humongous" object. The two parameters
 478   // are as follows:
 479   //
 480   // new_top : The new value of the top field of this region which
 481   // points to the end of the humongous object that's being
 482   // allocated. If there is more than one region in the series, top
 483   // will lie beyond this region's original end field and on the last
 484   // region in the series.
 485   //
 486   // new_end : The new value of the end field of this region which
 487   // points to the end of the last region in the series. If there is
 488   // one region in the series (namely: this one) end will be the same
 489   // as the original end of this region.
 490   //
 491   // Updating top and end as described above makes this region look as
 492   // if it spans the entire space taken up by all the regions in the
 493   // series and an single allocation moved its top to new_top. This
 494   // ensures that the space (capacity / allocated) taken up by all
 495   // humongous regions can be calculated by just looking at the
 496   // "starts humongous" regions and by ignoring the "continues
 497   // humongous" regions.
 498   void set_starts_humongous(HeapWord* new_top, HeapWord* new_end);
 499 
 500   // Makes the current region be a "continues humongous'
 501   // region. first_hr is the "start humongous" region of the series
 502   // which this region will be part of.
 503   void set_continues_humongous(HeapRegion* first_hr);
 504 
 505   // Unsets the humongous-related fields on the region.
 506   void clear_humongous();
 507 
 508   // If the region has a remembered set, return a pointer to it.
 509   HeapRegionRemSet* rem_set() const {
 510     return _rem_set;
 511   }
 512 
 513   inline bool in_collection_set() const;
 514 
 515   inline HeapRegion* next_in_collection_set() const;
 516   inline void set_next_in_collection_set(HeapRegion* r);
 517 
 518   void set_allocation_context(AllocationContext_t context) {


 549   }
 550 
 551   HeapRegionSetBase* containing_set() { return _containing_set; }
 552 #else // ASSERT
 553   void set_containing_set(HeapRegionSetBase* containing_set) { }
 554 
 555   // containing_set() is only used in asserts so there's no reason
 556   // to provide a dummy version of it.
 557 #endif // ASSERT
 558 
 559   HeapRegion* get_next_young_region() { return _next_young_region; }
 560   void set_next_young_region(HeapRegion* hr) {
 561     _next_young_region = hr;
 562   }
 563 
 564   HeapRegion* get_next_dirty_cards_region() const { return _next_dirty_cards_region; }
 565   HeapRegion** next_dirty_cards_region_addr() { return &_next_dirty_cards_region; }
 566   void set_next_dirty_cards_region(HeapRegion* hr) { _next_dirty_cards_region = hr; }
 567   bool is_on_dirty_cards_region_list() const { return get_next_dirty_cards_region() != NULL; }
 568 
 569   // For the start region of a humongous sequence, it's original end().
 570   HeapWord* orig_end() const { return _bottom + GrainWords; }
 571 
 572   // Reset HR stuff to default values.
 573   void hr_clear(bool par, bool clear_space, bool locked = false);
 574   void par_clear();
 575 
 576   // Get the start of the unmarked area in this region.
 577   HeapWord* prev_top_at_mark_start() const { return _prev_top_at_mark_start; }
 578   HeapWord* next_top_at_mark_start() const { return _next_top_at_mark_start; }
 579 
 580   // Note the start or end of marking. This tells the heap region
 581   // that the collector is about to start or has finished (concurrently)
 582   // marking the heap.
 583 
 584   // Notify the region that concurrent marking is starting. Initialize
 585   // all fields related to the next marking info.
 586   inline void note_start_of_marking();
 587 
 588   // Notify the region that concurrent marking has finished. Copy the
 589   // (now finalized) next marking info fields into the prev marking
 590   // info fields.
 591   inline void note_end_of_marking();


 597   // Notify the region that it ceases being to-space during a GC and
 598   // we will not copy objects into it any more.
 599   inline void note_end_of_copying(bool during_initial_mark);
 600 
 601   // Notify the region that we are about to start processing
 602   // self-forwarded objects during evac failure handling.
 603   void note_self_forwarding_removal_start(bool during_initial_mark,
 604                                           bool during_conc_mark);
 605 
 606   // Notify the region that we have finished processing self-forwarded
 607   // objects during evac failure handling.
 608   void note_self_forwarding_removal_end(bool during_initial_mark,
 609                                         bool during_conc_mark,
 610                                         size_t marked_bytes);
 611 
 612   // Returns "false" iff no object in the region was allocated when the
 613   // last mark phase ended.
 614   bool is_marked() { return _prev_top_at_mark_start != bottom(); }
 615 
 616   void reset_during_compaction() {
 617     assert(is_starts_humongous(),
 618            "should only be called for starts humongous regions");
 619 
 620     zero_marked_bytes();
 621     init_top_at_mark_start();
 622   }
 623 
 624   void calc_gc_efficiency(void);
 625   double gc_efficiency() { return _gc_efficiency;}
 626 
 627   int  young_index_in_cset() const { return _young_index_in_cset; }
 628   void set_young_index_in_cset(int index) {
 629     assert( (index == -1) || is_young(), "pre-condition" );
 630     _young_index_in_cset = index;
 631   }
 632 
 633   int age_in_surv_rate_group() {
 634     assert( _surv_rate_group != NULL, "pre-condition" );
 635     assert( _age_index > -1, "pre-condition" );
 636     return _surv_rate_group->age_in_group(_age_index);
 637   }
 638 




  26 #define SHARE_VM_GC_G1_HEAPREGION_HPP
  27 
  28 #include "gc/g1/g1AllocationContext.hpp"
  29 #include "gc/g1/g1BlockOffsetTable.hpp"
  30 #include "gc/g1/heapRegionType.hpp"
  31 #include "gc/g1/survRateGroup.hpp"
  32 #include "gc/shared/ageTable.hpp"
  33 #include "gc/shared/spaceDecorator.hpp"
  34 #include "utilities/macros.hpp"
  35 
  36 // A HeapRegion is the smallest piece of a G1CollectedHeap that
  37 // can be collected independently.
  38 
  39 // NOTE: Although a HeapRegion is a Space, its
  40 // Space::initDirtyCardClosure method must not be called.
  41 // The problem is that the existence of this method breaks
  42 // the independence of barrier sets from remembered sets.
  43 // The solution is to remove this method from the definition
  44 // of a Space.
  45 
  46 // Each heap region is self contained. top() and end() can never
  47 // be set beyond the end of the region. For humongous objects,
  48 // the first region is a StartsHumongous region. If the humongous
  49 // object is larger than a heap region, the following regions will
  50 // be of type ContinuesHumongous. In this case the top() and end()
  51 // of the StartHumongous region will point to the end of that region.
  52 // The same will be true for all ContinuesHumongous regions except
  53 // the last, which will have its' top() at the objects' top.
  54 
  55 class G1CollectedHeap;
  56 class HeapRegionRemSet;
  57 class HeapRegionRemSetIterator;
  58 class HeapRegion;
  59 class HeapRegionSetBase;
  60 class nmethod;
  61 
  62 #define HR_FORMAT "%u:(%s)[" PTR_FORMAT "," PTR_FORMAT "," PTR_FORMAT "]"
  63 #define HR_FORMAT_PARAMS(_hr_) \
  64                 (_hr_)->hrm_index(), \
  65                 (_hr_)->get_short_type_str(), \
  66                 p2i((_hr_)->bottom()), p2i((_hr_)->top()), p2i((_hr_)->end())
  67 
  68 // sentinel value for hrm_index
  69 #define G1_NO_HRM_INDEX ((uint) -1)
  70 
  71 // A dirty card to oop closure for heap regions. It
  72 // knows how to get the G1 heap and how to use the bitmap
  73 // in the concurrent marker used by G1 to filter remembered
  74 // sets.


 381   uint hrm_index() const { return _hrm_index; }
 382 
 383   // The number of bytes marked live in the region in the last marking phase.
 384   size_t marked_bytes()    { return _prev_marked_bytes; }
 385   size_t live_bytes() {
 386     return (top() - prev_top_at_mark_start()) * HeapWordSize + marked_bytes();
 387   }
 388 
 389   // The number of bytes counted in the next marking.
 390   size_t next_marked_bytes() { return _next_marked_bytes; }
 391   // The number of bytes live wrt the next marking.
 392   size_t next_live_bytes() {
 393     return
 394       (top() - next_top_at_mark_start()) * HeapWordSize + next_marked_bytes();
 395   }
 396 
 397   // A lower bound on the amount of garbage bytes in the region.
 398   size_t garbage_bytes() {
 399     size_t used_at_mark_start_bytes =
 400       (prev_top_at_mark_start() - bottom()) * HeapWordSize;


 401     return used_at_mark_start_bytes - marked_bytes();
 402   }
 403 
 404   // Return the amount of bytes we'll reclaim if we collect this
 405   // region. This includes not only the known garbage bytes in the
 406   // region but also any unallocated space in it, i.e., [top, end),
 407   // since it will also be reclaimed if we collect the region.
 408   size_t reclaimable_bytes() {
 409     size_t known_live_bytes = live_bytes();
 410     assert(known_live_bytes <= capacity(), "sanity");
 411     return capacity() - known_live_bytes;
 412   }
 413 
 414   // An upper bound on the number of live bytes in the region.
 415   size_t max_live_bytes() { return used() - garbage_bytes(); }
 416 
 417   void add_to_marked_bytes(size_t incr_bytes) {
 418     _next_marked_bytes = _next_marked_bytes + incr_bytes;

 419   }
 420 
 421   void zero_marked_bytes()      {
 422     _prev_marked_bytes = _next_marked_bytes = 0;
 423   }
 424 
 425   const char* get_type_str() const { return _type.get_str(); }
 426   const char* get_short_type_str() const { return _type.get_short_str(); }
 427 
 428   bool is_free() const { return _type.is_free(); }
 429 
 430   bool is_young()    const { return _type.is_young();    }
 431   bool is_eden()     const { return _type.is_eden();     }
 432   bool is_survivor() const { return _type.is_survivor(); }
 433 
 434   bool is_humongous() const { return _type.is_humongous(); }
 435   bool is_starts_humongous() const { return _type.is_starts_humongous(); }
 436   bool is_continues_humongous() const { return _type.is_continues_humongous();   }
 437 
 438   bool is_old() const { return _type.is_old(); }
 439 
 440   // A pinned region contains objects which are not moved by garbage collections.
 441   // Humongous regions and archive regions are pinned.
 442   bool is_pinned() const { return _type.is_pinned(); }
 443 
 444   // An archive region is a pinned region, also tagged as old, which
 445   // should not be marked during mark/sweep. This allows the address
 446   // space to be shared by JVM instances.
 447   bool is_archive() const { return _type.is_archive(); }
 448 
 449   // For a humongous region, region in which it starts.
 450   HeapRegion* humongous_start_region() const {
 451     return _humongous_start_region;
 452   }
 453 



























 454   // Makes the current region be a "starts humongous" region, i.e.,
 455   // the first region in a series of one or more contiguous regions
 456   // that will contain a single "humongous" object.







 457   //
 458   // obj_top : points to the end of the humongous object that's being
 459   // allocated.
 460   void set_starts_humongous(HeapWord* obj_top);










 461 
 462   // Makes the current region be a "continues humongous'
 463   // region. first_hr is the "start humongous" region of the series
 464   // which this region will be part of.
 465   void set_continues_humongous(HeapRegion* first_hr);
 466 
 467   // Unsets the humongous-related fields on the region.
 468   void clear_humongous();
 469 
 470   // If the region has a remembered set, return a pointer to it.
 471   HeapRegionRemSet* rem_set() const {
 472     return _rem_set;
 473   }
 474 
 475   inline bool in_collection_set() const;
 476 
 477   inline HeapRegion* next_in_collection_set() const;
 478   inline void set_next_in_collection_set(HeapRegion* r);
 479 
 480   void set_allocation_context(AllocationContext_t context) {


 511   }
 512 
 513   HeapRegionSetBase* containing_set() { return _containing_set; }
 514 #else // ASSERT
 515   void set_containing_set(HeapRegionSetBase* containing_set) { }
 516 
 517   // containing_set() is only used in asserts so there's no reason
 518   // to provide a dummy version of it.
 519 #endif // ASSERT
 520 
 521   HeapRegion* get_next_young_region() { return _next_young_region; }
 522   void set_next_young_region(HeapRegion* hr) {
 523     _next_young_region = hr;
 524   }
 525 
 526   HeapRegion* get_next_dirty_cards_region() const { return _next_dirty_cards_region; }
 527   HeapRegion** next_dirty_cards_region_addr() { return &_next_dirty_cards_region; }
 528   void set_next_dirty_cards_region(HeapRegion* hr) { _next_dirty_cards_region = hr; }
 529   bool is_on_dirty_cards_region_list() const { return get_next_dirty_cards_region() != NULL; }
 530 



 531   // Reset HR stuff to default values.
 532   void hr_clear(bool par, bool clear_space, bool locked = false);
 533   void par_clear();
 534 
 535   // Get the start of the unmarked area in this region.
 536   HeapWord* prev_top_at_mark_start() const { return _prev_top_at_mark_start; }
 537   HeapWord* next_top_at_mark_start() const { return _next_top_at_mark_start; }
 538 
 539   // Note the start or end of marking. This tells the heap region
 540   // that the collector is about to start or has finished (concurrently)
 541   // marking the heap.
 542 
 543   // Notify the region that concurrent marking is starting. Initialize
 544   // all fields related to the next marking info.
 545   inline void note_start_of_marking();
 546 
 547   // Notify the region that concurrent marking has finished. Copy the
 548   // (now finalized) next marking info fields into the prev marking
 549   // info fields.
 550   inline void note_end_of_marking();


 556   // Notify the region that it ceases being to-space during a GC and
 557   // we will not copy objects into it any more.
 558   inline void note_end_of_copying(bool during_initial_mark);
 559 
 560   // Notify the region that we are about to start processing
 561   // self-forwarded objects during evac failure handling.
 562   void note_self_forwarding_removal_start(bool during_initial_mark,
 563                                           bool during_conc_mark);
 564 
 565   // Notify the region that we have finished processing self-forwarded
 566   // objects during evac failure handling.
 567   void note_self_forwarding_removal_end(bool during_initial_mark,
 568                                         bool during_conc_mark,
 569                                         size_t marked_bytes);
 570 
 571   // Returns "false" iff no object in the region was allocated when the
 572   // last mark phase ended.
 573   bool is_marked() { return _prev_top_at_mark_start != bottom(); }
 574 
 575   void reset_during_compaction() {
 576     assert(is_humongous(),
 577            "should only be called for humongous regions");
 578 
 579     zero_marked_bytes();
 580     init_top_at_mark_start();
 581   }
 582 
 583   void calc_gc_efficiency(void);
 584   double gc_efficiency() { return _gc_efficiency;}
 585 
 586   int  young_index_in_cset() const { return _young_index_in_cset; }
 587   void set_young_index_in_cset(int index) {
 588     assert( (index == -1) || is_young(), "pre-condition" );
 589     _young_index_in_cset = index;
 590   }
 591 
 592   int age_in_surv_rate_group() {
 593     assert( _surv_rate_group != NULL, "pre-condition" );
 594     assert( _age_index > -1, "pre-condition" );
 595     return _surv_rate_group->age_in_group(_age_index);
 596   }
 597 


< prev index next >