< prev index next >

src/share/vm/gc/g1/heapRegion.hpp

Print this page
rev 11265 : 8142749: HeapRegion::_predicted_bytes_to_copy is unused and can be removed
Reviewed-by: ?
Contributed-by: Vsevolod Tolstopyatov <qwwdfsad@gmail.com>


 299 
 300   void init_top_at_mark_start() {
 301     assert(_prev_marked_bytes == 0 &&
 302            _next_marked_bytes == 0,
 303            "Must be called after zero_marked_bytes.");
 304     HeapWord* bot = bottom();
 305     _prev_top_at_mark_start = bot;
 306     _next_top_at_mark_start = bot;
 307   }
 308 
 309   // Cached attributes used in the collection set policy information
 310 
 311   // The RSet length that was added to the total value
 312   // for the collection set.
 313   size_t _recorded_rs_length;
 314 
 315   // The predicted elapsed time that was added to total value
 316   // for the collection set.
 317   double _predicted_elapsed_time_ms;
 318 
 319   // The predicted number of bytes to copy that was added to
 320   // the total value for the collection set.
 321   size_t _predicted_bytes_to_copy;
 322 
 323  public:
 324   HeapRegion(uint hrm_index,
 325              G1BlockOffsetTable* bot,
 326              MemRegion mr);
 327 
 328   // Initializing the HeapRegion not only resets the data structure, but also
 329   // resets the BOT for that heap region.
 330   // The default values for clear_space means that we will do the clearing if
 331   // there's clearing to be done ourselves. We also always mangle the space.
 332   virtual void initialize(MemRegion mr, bool clear_space = false, bool mangle_space = SpaceDecorator::Mangle);
 333 
 334   static int    LogOfHRGrainBytes;
 335   static int    LogOfHRGrainWords;
 336 
 337   static size_t GrainBytes;
 338   static size_t GrainWords;
 339   static size_t CardsPerRegion;
 340 
 341   static size_t align_up_to_region_byte_size(size_t sz) {
 342     return (sz + (size_t) GrainBytes - 1) &


 540   // Notify the region that concurrent marking has finished. Copy the
 541   // (now finalized) next marking info fields into the prev marking
 542   // info fields.
 543   inline void note_end_of_marking();
 544 
 545   // Notify the region that it will be used as to-space during a GC
 546   // and we are about to start copying objects into it.
 547   inline void note_start_of_copying(bool during_initial_mark);
 548 
 549   // Notify the region that it ceases being to-space during a GC and
 550   // we will not copy objects into it any more.
 551   inline void note_end_of_copying(bool during_initial_mark);
 552 
 553   // Notify the region that we are about to start processing
 554   // self-forwarded objects during evac failure handling.
 555   void note_self_forwarding_removal_start(bool during_initial_mark,
 556                                           bool during_conc_mark);
 557 
 558   // Notify the region that we have finished processing self-forwarded
 559   // objects during evac failure handling.
 560   void note_self_forwarding_removal_end(bool during_initial_mark,
 561                                         bool during_conc_mark,
 562                                         size_t marked_bytes);
 563 
 564   // Returns "false" iff no object in the region was allocated when the
 565   // last mark phase ended.
 566   bool is_marked() { return _prev_top_at_mark_start != bottom(); }
 567 
 568   void reset_during_compaction() {
 569     assert(is_humongous(),
 570            "should only be called for humongous regions");
 571 
 572     zero_marked_bytes();
 573     init_top_at_mark_start();
 574   }
 575 
 576   void calc_gc_efficiency(void);
 577   double gc_efficiency() { return _gc_efficiency;}
 578 
 579   int  young_index_in_cset() const { return _young_index_in_cset; }
 580   void set_young_index_in_cset(int index) {
 581     assert( (index == -1) || is_young(), "pre-condition" );
 582     _young_index_in_cset = index;


 642   // is within the unmarked area of the region.
 643   bool obj_allocated_since_prev_marking(oop obj) const {
 644     return (HeapWord *) obj >= prev_top_at_mark_start();
 645   }
 646   bool obj_allocated_since_next_marking(oop obj) const {
 647     return (HeapWord *) obj >= next_top_at_mark_start();
 648   }
 649 
 650   // Returns the "evacuation_failed" property of the region.
 651   bool evacuation_failed() { return _evacuation_failed; }
 652 
 653   // Sets the "evacuation_failed" property of the region.
 654   void set_evacuation_failed(bool b) {
 655     _evacuation_failed = b;
 656 
 657     if (b) {
 658       _next_marked_bytes = 0;
 659     }
 660   }
 661 
 662   // Requires that "mr" be entirely within the region.
 663   // Apply "cl->do_object" to all objects that intersect with "mr".
 664   // If the iteration encounters an unparseable portion of the region,
 665   // or if "cl->abort()" is true after a closure application,
 666   // terminate the iteration and return the address of the start of the
 667   // subregion that isn't done.  (The two can be distinguished by querying
 668   // "cl->abort()".)  Return of "NULL" indicates that the iteration
 669   // completed.
 670   HeapWord*
 671   object_iterate_mem_careful(MemRegion mr, ObjectClosure* cl);
 672 
 673   // filter_young: if true and the region is a young region then we
 674   // skip the iteration.
 675   // card_ptr: if not NULL, and we decide that the card is not young
 676   // and we iterate over it, we'll clean the card before we start the
 677   // iteration.
 678   HeapWord*
 679   oops_on_card_seq_iterate_careful(MemRegion mr,
 680                                    FilterOutOfRegionClosure* cl,
 681                                    bool filter_young,
 682                                    jbyte* card_ptr);
 683 
 684   size_t recorded_rs_length() const        { return _recorded_rs_length; }
 685   double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; }
 686   size_t predicted_bytes_to_copy() const   { return _predicted_bytes_to_copy; }
 687 
 688   void set_recorded_rs_length(size_t rs_length) {
 689     _recorded_rs_length = rs_length;
 690   }
 691 
 692   void set_predicted_elapsed_time_ms(double ms) {
 693     _predicted_elapsed_time_ms = ms;
 694   }
 695 
 696   void set_predicted_bytes_to_copy(size_t bytes) {
 697     _predicted_bytes_to_copy = bytes;
 698   }
 699 
 700   virtual CompactibleSpace* next_compaction_space() const;
 701 
 702   virtual void reset_after_compaction();
 703 
 704   // Routines for managing a list of code roots (attached to the
 705   // this region's RSet) that point into this heap region.
 706   void add_strong_code_root(nmethod* nm);
 707   void add_strong_code_root_locked(nmethod* nm);
 708   void remove_strong_code_root(nmethod* nm);
 709 
 710   // Applies blk->do_code_blob() to each of the entries in
 711   // the strong code roots list for this region
 712   void strong_code_roots_do(CodeBlobClosure* blk) const;
 713 
 714   // Verify that the entries on the strong code root list for this
 715   // region are live and include at least one pointer into this region.
 716   void verify_strong_code_roots(VerifyOption vo, bool* failures) const;
 717 




 299 
 300   void init_top_at_mark_start() {
 301     assert(_prev_marked_bytes == 0 &&
 302            _next_marked_bytes == 0,
 303            "Must be called after zero_marked_bytes.");
 304     HeapWord* bot = bottom();
 305     _prev_top_at_mark_start = bot;
 306     _next_top_at_mark_start = bot;
 307   }
 308 
 309   // Cached attributes used in the collection set policy information
 310 
 311   // The RSet length that was added to the total value
 312   // for the collection set.
 313   size_t _recorded_rs_length;
 314 
 315   // The predicted elapsed time that was added to total value
 316   // for the collection set.
 317   double _predicted_elapsed_time_ms;
 318 




 319  public:
 320   HeapRegion(uint hrm_index,
 321              G1BlockOffsetTable* bot,
 322              MemRegion mr);
 323 
 324   // Initializing the HeapRegion not only resets the data structure, but also
 325   // resets the BOT for that heap region.
 326   // The default values for clear_space means that we will do the clearing if
 327   // there's clearing to be done ourselves. We also always mangle the space.
 328   virtual void initialize(MemRegion mr, bool clear_space = false, bool mangle_space = SpaceDecorator::Mangle);
 329 
 330   static int    LogOfHRGrainBytes;
 331   static int    LogOfHRGrainWords;
 332 
 333   static size_t GrainBytes;
 334   static size_t GrainWords;
 335   static size_t CardsPerRegion;
 336 
 337   static size_t align_up_to_region_byte_size(size_t sz) {
 338     return (sz + (size_t) GrainBytes - 1) &


 536   // Notify the region that concurrent marking has finished. Copy the
 537   // (now finalized) next marking info fields into the prev marking
 538   // info fields.
 539   inline void note_end_of_marking();
 540 
 541   // Notify the region that it will be used as to-space during a GC
 542   // and we are about to start copying objects into it.
 543   inline void note_start_of_copying(bool during_initial_mark);
 544 
 545   // Notify the region that it ceases being to-space during a GC and
 546   // we will not copy objects into it any more.
 547   inline void note_end_of_copying(bool during_initial_mark);
 548 
 549   // Notify the region that we are about to start processing
 550   // self-forwarded objects during evac failure handling.
 551   void note_self_forwarding_removal_start(bool during_initial_mark,
 552                                           bool during_conc_mark);
 553 
 554   // Notify the region that we have finished processing self-forwarded
 555   // objects during evac failure handling.
 556   void note_self_forwarding_removal_end(size_t marked_bytes);


 557 
 558   // Returns "false" iff no object in the region was allocated when the
 559   // last mark phase ended.
 560   bool is_marked() { return _prev_top_at_mark_start != bottom(); }
 561 
 562   void reset_during_compaction() {
 563     assert(is_humongous(),
 564            "should only be called for humongous regions");
 565 
 566     zero_marked_bytes();
 567     init_top_at_mark_start();
 568   }
 569 
 570   void calc_gc_efficiency(void);
 571   double gc_efficiency() { return _gc_efficiency;}
 572 
 573   int  young_index_in_cset() const { return _young_index_in_cset; }
 574   void set_young_index_in_cset(int index) {
 575     assert( (index == -1) || is_young(), "pre-condition" );
 576     _young_index_in_cset = index;


 636   // is within the unmarked area of the region.
 637   bool obj_allocated_since_prev_marking(oop obj) const {
 638     return (HeapWord *) obj >= prev_top_at_mark_start();
 639   }
 640   bool obj_allocated_since_next_marking(oop obj) const {
 641     return (HeapWord *) obj >= next_top_at_mark_start();
 642   }
 643 
 644   // Returns the "evacuation_failed" property of the region.
 645   bool evacuation_failed() { return _evacuation_failed; }
 646 
 647   // Sets the "evacuation_failed" property of the region.
 648   void set_evacuation_failed(bool b) {
 649     _evacuation_failed = b;
 650 
 651     if (b) {
 652       _next_marked_bytes = 0;
 653     }
 654   }
 655 











 656   // filter_young: if true and the region is a young region then we
 657   // skip the iteration.
 658   // card_ptr: if not NULL, and we decide that the card is not young
 659   // and we iterate over it, we'll clean the card before we start the
 660   // iteration.
 661   HeapWord*
 662   oops_on_card_seq_iterate_careful(MemRegion mr,
 663                                    FilterOutOfRegionClosure* cl,
 664                                    bool filter_young,
 665                                    jbyte* card_ptr);
 666 
 667   size_t recorded_rs_length() const        { return _recorded_rs_length; }
 668   double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; }

 669 
 670   void set_recorded_rs_length(size_t rs_length) {
 671     _recorded_rs_length = rs_length;
 672   }
 673 
 674   void set_predicted_elapsed_time_ms(double ms) {
 675     _predicted_elapsed_time_ms = ms;




 676   }
 677 
 678   virtual CompactibleSpace* next_compaction_space() const;
 679 
 680   virtual void reset_after_compaction();
 681 
 682   // Routines for managing a list of code roots (attached to the
 683   // this region's RSet) that point into this heap region.
 684   void add_strong_code_root(nmethod* nm);
 685   void add_strong_code_root_locked(nmethod* nm);
 686   void remove_strong_code_root(nmethod* nm);
 687 
 688   // Applies blk->do_code_blob() to each of the entries in
 689   // the strong code roots list for this region
 690   void strong_code_roots_do(CodeBlobClosure* blk) const;
 691 
 692   // Verify that the entries on the strong code root list for this
 693   // region are live and include at least one pointer into this region.
 694   void verify_strong_code_roots(VerifyOption vo, bool* failures) const;
 695 


< prev index next >