< prev index next >

src/share/vm/gc/g1/heapRegion.hpp

Print this page




 372   uint hrm_index() const { return _hrm_index; }
 373 
 374   // The number of bytes marked live in the region in the last marking phase.
 375   size_t marked_bytes()    { return _prev_marked_bytes; }
 376   size_t live_bytes() {
 377     return (top() - prev_top_at_mark_start()) * HeapWordSize + marked_bytes();
 378   }
 379 
 380   // The number of bytes counted in the next marking.
 381   size_t next_marked_bytes() { return _next_marked_bytes; }
 382   // The number of bytes live wrt the next marking.
 383   size_t next_live_bytes() {
 384     return
 385       (top() - next_top_at_mark_start()) * HeapWordSize + next_marked_bytes();
 386   }
 387 
 388   // A lower bound on the amount of garbage bytes in the region.
 389   size_t garbage_bytes() {
 390     size_t used_at_mark_start_bytes =
 391       (prev_top_at_mark_start() - bottom()) * HeapWordSize;
 392     assert(used_at_mark_start_bytes >= marked_bytes(),
 393            "Can't mark more than we have.");
 394     return used_at_mark_start_bytes - marked_bytes();
 395   }
 396 
 397   // Return the amount of bytes we'll reclaim if we collect this
 398   // region. This includes not only the known garbage bytes in the
 399   // region but also any unallocated space in it, i.e., [top, end),
 400   // since it will also be reclaimed if we collect the region.
 401   size_t reclaimable_bytes() {
 402     size_t known_live_bytes = live_bytes();
 403     assert(known_live_bytes <= capacity(), "sanity");
 404     return capacity() - known_live_bytes;
 405   }
 406 
 407   // An upper bound on the number of live bytes in the region.
 408   size_t max_live_bytes() { return used() - garbage_bytes(); }
 409 
 410   void add_to_marked_bytes(size_t incr_bytes) {
 411     _next_marked_bytes = _next_marked_bytes + incr_bytes;
 412     assert(_next_marked_bytes <= used(), "invariant" );
 413   }
 414 
 415   void zero_marked_bytes()      {
 416     _prev_marked_bytes = _next_marked_bytes = 0;
 417   }
 418 
 419   const char* get_type_str() const { return _type.get_str(); }
 420   const char* get_short_type_str() const { return _type.get_short_str(); }
 421 
 422   bool is_free() const { return _type.is_free(); }
 423 
 424   bool is_young()    const { return _type.is_young();    }
 425   bool is_eden()     const { return _type.is_eden();     }
 426   bool is_survivor() const { return _type.is_survivor(); }
 427 
 428   bool is_humongous() const { return _type.is_humongous(); }
 429   bool is_starts_humongous() const { return _type.is_starts_humongous(); }
 430   bool is_continues_humongous() const { return _type.is_continues_humongous();   }
 431 
 432   bool is_old() const { return _type.is_old(); }
 433 
 434   // A pinned region contains objects which are not moved by garbage collections.
 435   // Humongous regions and archive regions are pinned.
 436   bool is_pinned() const { return _type.is_pinned(); }
 437 
 438   // An archive region is a pinned region, also tagged as old, which
 439   // should not be marked during mark/sweep. This allows the address
 440   // space to be shared by JVM instances.
 441   bool is_archive() const { return _type.is_archive(); }
 442 
 443   // For a humongous region, region in which it starts.
 444   HeapRegion* humongous_start_region() const {
 445     return _humongous_start_region;
 446   }
 447 
 448   // Return the number of distinct regions that are covered by this region:
 449   // 1 if the region is not humongous, >= 1 if the region is humongous.
 450   uint region_num() const {
 451     if (!is_humongous()) {
 452       return 1U;
 453     } else {
 454       assert(is_starts_humongous(), "doesn't make sense on HC regions");
 455       assert(capacity() % HeapRegion::GrainBytes == 0, "sanity");
 456       return (uint) (capacity() >> HeapRegion::LogOfHRGrainBytes);
 457     }
 458   }
 459 
 460   // Return the index + 1 of the last HC regions that's associated
 461   // with this HS region.
 462   uint last_hc_index() const {
 463     assert(is_starts_humongous(), "don't call this otherwise");
 464     return hrm_index() + region_num();
 465   }
 466 
 467   // Same as Space::is_in_reserved, but will use the original size of the region.
 468   // The original size is different only for start humongous regions. They get
 469   // their _end set up to be the end of the last continues region of the
 470   // corresponding humongous object.
 471   bool is_in_reserved_raw(const void* p) const {
 472     return _bottom <= p && p < orig_end();
 473   }
 474 
 475   // Makes the current region be a "starts humongous" region, i.e.,
 476   // the first region in a series of one or more contiguous regions
 477   // that will contain a single "humongous" object. The two parameters
 478   // are as follows:
 479   //
 480   // new_top : The new value of the top field of this region which
 481   // points to the end of the humongous object that's being
 482   // allocated. If there is more than one region in the series, top
 483   // will lie beyond this region's original end field and on the last
 484   // region in the series.
 485   //
 486   // new_end : The new value of the end field of this region which
 487   // points to the end of the last region in the series. If there is
 488   // one region in the series (namely: this one) end will be the same
 489   // as the original end of this region.
 490   //
 491   // Updating top and end as described above makes this region look as
 492   // if it spans the entire space taken up by all the regions in the
 493   // series and an single allocation moved its top to new_top. This
 494   // ensures that the space (capacity / allocated) taken up by all
 495   // humongous regions can be calculated by just looking at the
 496   // "starts humongous" regions and by ignoring the "continues
 497   // humongous" regions.
 498   void set_starts_humongous(HeapWord* new_top, HeapWord* new_end);
 499 
 500   // Makes the current region be a "continues humongous'
 501   // region. first_hr is the "start humongous" region of the series
 502   // which this region will be part of.
 503   void set_continues_humongous(HeapRegion* first_hr);
 504 
 505   // Unsets the humongous-related fields on the region.
 506   void clear_humongous();
 507 
 508   // If the region has a remembered set, return a pointer to it.
 509   HeapRegionRemSet* rem_set() const {
 510     return _rem_set;
 511   }
 512 
 513   inline bool in_collection_set() const;
 514 
 515   inline HeapRegion* next_in_collection_set() const;
 516   inline void set_next_in_collection_set(HeapRegion* r);
 517 
 518   void set_allocation_context(AllocationContext_t context) {


 549   }
 550 
 551   HeapRegionSetBase* containing_set() { return _containing_set; }
 552 #else // ASSERT
 553   void set_containing_set(HeapRegionSetBase* containing_set) { }
 554 
 555   // containing_set() is only used in asserts so there's no reason
 556   // to provide a dummy version of it.
 557 #endif // ASSERT
 558 
 559   HeapRegion* get_next_young_region() { return _next_young_region; }
 560   void set_next_young_region(HeapRegion* hr) {
 561     _next_young_region = hr;
 562   }
 563 
 564   HeapRegion* get_next_dirty_cards_region() const { return _next_dirty_cards_region; }
 565   HeapRegion** next_dirty_cards_region_addr() { return &_next_dirty_cards_region; }
 566   void set_next_dirty_cards_region(HeapRegion* hr) { _next_dirty_cards_region = hr; }
 567   bool is_on_dirty_cards_region_list() const { return get_next_dirty_cards_region() != NULL; }
 568 
 569   // For the start region of a humongous sequence, it's original end().
 570   HeapWord* orig_end() const { return _bottom + GrainWords; }
 571 
 572   // Reset HR stuff to default values.
 573   void hr_clear(bool par, bool clear_space, bool locked = false);
 574   void par_clear();
 575 
 576   // Get the start of the unmarked area in this region.
 577   HeapWord* prev_top_at_mark_start() const { return _prev_top_at_mark_start; }
 578   HeapWord* next_top_at_mark_start() const { return _next_top_at_mark_start; }
 579 
 580   // Note the start or end of marking. This tells the heap region
 581   // that the collector is about to start or has finished (concurrently)
 582   // marking the heap.
 583 
 584   // Notify the region that concurrent marking is starting. Initialize
 585   // all fields related to the next marking info.
 586   inline void note_start_of_marking();
 587 
 588   // Notify the region that concurrent marking has finished. Copy the
 589   // (now finalized) next marking info fields into the prev marking
 590   // info fields.
 591   inline void note_end_of_marking();


 597   // Notify the region that it ceases being to-space during a GC and
 598   // we will not copy objects into it any more.
 599   inline void note_end_of_copying(bool during_initial_mark);
 600 
 601   // Notify the region that we are about to start processing
 602   // self-forwarded objects during evac failure handling.
 603   void note_self_forwarding_removal_start(bool during_initial_mark,
 604                                           bool during_conc_mark);
 605 
 606   // Notify the region that we have finished processing self-forwarded
 607   // objects during evac failure handling.
 608   void note_self_forwarding_removal_end(bool during_initial_mark,
 609                                         bool during_conc_mark,
 610                                         size_t marked_bytes);
 611 
 612   // Returns "false" iff no object in the region was allocated when the
 613   // last mark phase ended.
 614   bool is_marked() { return _prev_top_at_mark_start != bottom(); }
 615 
 616   void reset_during_compaction() {
 617     assert(is_starts_humongous(),
 618            "should only be called for starts humongous regions");
 619 
 620     zero_marked_bytes();
 621     init_top_at_mark_start();
 622   }
 623 
 624   void calc_gc_efficiency(void);
 625   double gc_efficiency() { return _gc_efficiency;}
 626 
 627   int  young_index_in_cset() const { return _young_index_in_cset; }
 628   void set_young_index_in_cset(int index) {
 629     assert( (index == -1) || is_young(), "pre-condition" );
 630     _young_index_in_cset = index;
 631   }
 632 
 633   int age_in_surv_rate_group() {
 634     assert( _surv_rate_group != NULL, "pre-condition" );
 635     assert( _age_index > -1, "pre-condition" );
 636     return _surv_rate_group->age_in_group(_age_index);
 637   }
 638 
 639   void record_surv_words_in_group(size_t words_survived) {




 372   uint hrm_index() const { return _hrm_index; }
 373 
 374   // The number of bytes marked live in the region in the last marking phase.
 375   size_t marked_bytes()    { return _prev_marked_bytes; }
 376   size_t live_bytes() {
 377     return (top() - prev_top_at_mark_start()) * HeapWordSize + marked_bytes();
 378   }
 379 
 380   // The number of bytes counted in the next marking.
 381   size_t next_marked_bytes() { return _next_marked_bytes; }
 382   // The number of bytes live wrt the next marking.
 383   size_t next_live_bytes() {
 384     return
 385       (top() - next_top_at_mark_start()) * HeapWordSize + next_marked_bytes();
 386   }
 387 
 388   // A lower bound on the amount of garbage bytes in the region.
 389   size_t garbage_bytes() {
 390     size_t used_at_mark_start_bytes =
 391       (prev_top_at_mark_start() - bottom()) * HeapWordSize;


 392     return used_at_mark_start_bytes - marked_bytes();
 393   }
 394 
 395   // Return the amount of bytes we'll reclaim if we collect this
 396   // region. This includes not only the known garbage bytes in the
 397   // region but also any unallocated space in it, i.e., [top, end),
 398   // since it will also be reclaimed if we collect the region.
 399   size_t reclaimable_bytes() {
 400     size_t known_live_bytes = live_bytes();
 401     assert(known_live_bytes <= capacity(), "sanity");
 402     return capacity() - known_live_bytes;
 403   }
 404 
 405   // An upper bound on the number of live bytes in the region.
 406   size_t max_live_bytes() { return used() - garbage_bytes(); }
 407 
 408   void add_to_marked_bytes(size_t incr_bytes) {
 409     _next_marked_bytes = _next_marked_bytes + incr_bytes;

 410   }
 411 
 412   void zero_marked_bytes()      {
 413     _prev_marked_bytes = _next_marked_bytes = 0;
 414   }
 415 
 416   const char* get_type_str() const { return _type.get_str(); }
 417   const char* get_short_type_str() const { return _type.get_short_str(); }
 418 
 419   bool is_free() const { return _type.is_free(); }
 420 
 421   bool is_young()    const { return _type.is_young();    }
 422   bool is_eden()     const { return _type.is_eden();     }
 423   bool is_survivor() const { return _type.is_survivor(); }
 424 
 425   bool is_humongous() const { return _type.is_humongous(); }
 426   bool is_starts_humongous() const { return _type.is_starts_humongous(); }
 427   bool is_continues_humongous() const { return _type.is_continues_humongous();   }
 428 
 429   bool is_old() const { return _type.is_old(); }
 430 
 431   // A pinned region contains objects which are not moved by garbage collections.
 432   // Humongous regions and archive regions are pinned.
 433   bool is_pinned() const { return _type.is_pinned(); }
 434 
 435   // An archive region is a pinned region, also tagged as old, which
 436   // should not be marked during mark/sweep. This allows the address
 437   // space to be shared by JVM instances.
 438   bool is_archive() const { return _type.is_archive(); }
 439 
 440   // For a humongous region, region in which it starts.
 441   HeapRegion* humongous_start_region() const {
 442     return _humongous_start_region;
 443   }
 444 



























 445   // Makes the current region be a "starts humongous" region, i.e.,
 446   // the first region in a series of one or more contiguous regions
 447   // that will contain a single "humongous" object.

 448   //
 449   // obj_top : points to the end of the humongous object that's being
 450   // allocated.
 451   void set_starts_humongous(HeapWord* obj_top);
















 452 
 453   // Makes the current region be a "continues humongous'
 454   // region. first_hr is the "start humongous" region of the series
 455   // which this region will be part of.
 456   void set_continues_humongous(HeapRegion* first_hr);
 457 
 458   // Unsets the humongous-related fields on the region.
 459   void clear_humongous();
 460 
 461   // If the region has a remembered set, return a pointer to it.
 462   HeapRegionRemSet* rem_set() const {
 463     return _rem_set;
 464   }
 465 
 466   inline bool in_collection_set() const;
 467 
 468   inline HeapRegion* next_in_collection_set() const;
 469   inline void set_next_in_collection_set(HeapRegion* r);
 470 
 471   void set_allocation_context(AllocationContext_t context) {


 502   }
 503 
 504   HeapRegionSetBase* containing_set() { return _containing_set; }
 505 #else // ASSERT
 506   void set_containing_set(HeapRegionSetBase* containing_set) { }
 507 
 508   // containing_set() is only used in asserts so there's no reason
 509   // to provide a dummy version of it.
 510 #endif // ASSERT
 511 
 512   HeapRegion* get_next_young_region() { return _next_young_region; }
 513   void set_next_young_region(HeapRegion* hr) {
 514     _next_young_region = hr;
 515   }
 516 
 517   HeapRegion* get_next_dirty_cards_region() const { return _next_dirty_cards_region; }
 518   HeapRegion** next_dirty_cards_region_addr() { return &_next_dirty_cards_region; }
 519   void set_next_dirty_cards_region(HeapRegion* hr) { _next_dirty_cards_region = hr; }
 520   bool is_on_dirty_cards_region_list() const { return get_next_dirty_cards_region() != NULL; }
 521 



 522   // Reset HR stuff to default values.
 523   void hr_clear(bool par, bool clear_space, bool locked = false);
 524   void par_clear();
 525 
 526   // Get the start of the unmarked area in this region.
 527   HeapWord* prev_top_at_mark_start() const { return _prev_top_at_mark_start; }
 528   HeapWord* next_top_at_mark_start() const { return _next_top_at_mark_start; }
 529 
 530   // Note the start or end of marking. This tells the heap region
 531   // that the collector is about to start or has finished (concurrently)
 532   // marking the heap.
 533 
 534   // Notify the region that concurrent marking is starting. Initialize
 535   // all fields related to the next marking info.
 536   inline void note_start_of_marking();
 537 
 538   // Notify the region that concurrent marking has finished. Copy the
 539   // (now finalized) next marking info fields into the prev marking
 540   // info fields.
 541   inline void note_end_of_marking();


 547   // Notify the region that it ceases being to-space during a GC and
 548   // we will not copy objects into it any more.
 549   inline void note_end_of_copying(bool during_initial_mark);
 550 
 551   // Notify the region that we are about to start processing
 552   // self-forwarded objects during evac failure handling.
 553   void note_self_forwarding_removal_start(bool during_initial_mark,
 554                                           bool during_conc_mark);
 555 
 556   // Notify the region that we have finished processing self-forwarded
 557   // objects during evac failure handling.
 558   void note_self_forwarding_removal_end(bool during_initial_mark,
 559                                         bool during_conc_mark,
 560                                         size_t marked_bytes);
 561 
 562   // Returns "false" iff no object in the region was allocated when the
 563   // last mark phase ended.
 564   bool is_marked() { return _prev_top_at_mark_start != bottom(); }
 565 
 566   void reset_during_compaction() {



 567     zero_marked_bytes();
 568     init_top_at_mark_start();
 569   }
 570 
 571   void calc_gc_efficiency(void);
 572   double gc_efficiency() { return _gc_efficiency;}
 573 
 574   int  young_index_in_cset() const { return _young_index_in_cset; }
 575   void set_young_index_in_cset(int index) {
 576     assert( (index == -1) || is_young(), "pre-condition" );
 577     _young_index_in_cset = index;
 578   }
 579 
 580   int age_in_surv_rate_group() {
 581     assert( _surv_rate_group != NULL, "pre-condition" );
 582     assert( _age_index > -1, "pre-condition" );
 583     return _surv_rate_group->age_in_group(_age_index);
 584   }
 585 
 586   void record_surv_words_in_group(size_t words_survived) {


< prev index next >