< prev index next >

src/hotspot/share/gc/g1/heapRegion.hpp

Print this page
rev 57067 : imported patch 8233588-cleanup-survrategroup
rev 57068 : imported patch 8233588-kbarrett-review
rev 57069 : imported patch 8231579-incremental-calculation-wrong
rev 57072 : imported patch 8234179-move-heapregion-inc-cset-stats-into-g1collectionset


 237   size_t _prev_marked_bytes;    // Bytes known to be live via last completed marking.
 238   size_t _next_marked_bytes;    // Bytes known to be live via in-progress marking.
 239 
 240   void init_top_at_mark_start() {
 241     assert(_prev_marked_bytes == 0 &&
 242            _next_marked_bytes == 0,
 243            "Must be called after zero_marked_bytes.");
 244     _prev_top_at_mark_start = _next_top_at_mark_start = bottom();
 245   }
 246 
 247   // Data for young region survivor prediction.
 248   uint  _young_index_in_cset;
 249   SurvRateGroup* _surv_rate_group;
 250   int  _age_index;
 251 
 252   // Cached attributes used in the collection set policy information
 253 
 254   // The calculated GC efficiency of the region.
 255   double _gc_efficiency;
 256 
 257   // The remembered set length that was added to the total value
 258   // for the collection set.
 259   size_t _recorded_rs_length;
 260 
 261   // The predicted time without copy time that was added to total value
 262   // for the collection set.
 263   double _predicted_non_copy_time_ms;
 264 
 265   uint _node_index;
 266 
 267   void report_region_type_change(G1HeapRegionTraceType::Type to);
 268 
 269   // Returns whether the given object address refers to a dead object, and either the
 270   // size of the object (if live) or the size of the block (if dead) in size.
 271   // May
 272   // - only called with obj < top()
 273   // - not called on humongous objects or archive regions
 274   inline bool is_obj_dead_with_size(const oop obj, const G1CMBitMap* const prev_bitmap, size_t* size) const;
 275 
 276   // Iterate over the references covered by the given MemRegion in a humongous
 277   // object and apply the given closure to them.
 278   // Humongous objects are allocated directly in the old-gen. So we need special
 279   // handling for concurrent processing encountering an in-progress allocation.
 280   // Returns the address after the last actually scanned or NULL if the area could
 281   // not be scanned (That should only happen when invoked concurrently with the
 282   // mutator).
 283   template <class Closure, bool is_gc_active>
 284   inline HeapWord* do_oops_on_memregion_in_humongous(MemRegion mr,


 560   // Determine if an object has been allocated since the last
 561   // mark performed by the collector. This returns true iff the object
 562   // is within the unmarked area of the region.
 563   bool obj_allocated_since_prev_marking(oop obj) const {
 564     return (HeapWord *) obj >= prev_top_at_mark_start();
 565   }
 566   bool obj_allocated_since_next_marking(oop obj) const {
 567     return (HeapWord *) obj >= next_top_at_mark_start();
 568   }
 569 
 570   // Iterate over the objects overlapping the given memory region, applying cl
 571   // to all references in the region.  This is a helper for
 572   // G1RemSet::refine_card*, and is tightly coupled with them.
 573   // mr must not be empty. Must be trimmed to the allocated/parseable space in this region.
 574   // This region must be old or humongous.
 575   // Returns the next unscanned address if the designated objects were successfully
 576   // processed, NULL if an unparseable part of the heap was encountered (That should
 577   // only happen when invoked concurrently with the mutator).
 578   template <bool is_gc_active, class Closure>
 579   inline HeapWord* oops_on_memregion_seq_iterate_careful(MemRegion mr, Closure* cl);
 580 
 581   size_t recorded_rs_length() const         { return _recorded_rs_length; }
 582   double predicted_non_copy_time_ms() const { return _predicted_non_copy_time_ms; }
 583 
 584   void set_recorded_rs_length(size_t rs_length) {
 585     _recorded_rs_length = rs_length;
 586   }
 587 
 588   void set_predicted_non_copy_time_ms(double ms) {
 589     _predicted_non_copy_time_ms = ms;
 590   }
 591 
 592   // Routines for managing a list of code roots (attached to the
 593   // this region's RSet) that point into this heap region.
 594   void add_strong_code_root(nmethod* nm);
 595   void add_strong_code_root_locked(nmethod* nm);
 596   void remove_strong_code_root(nmethod* nm);
 597 
 598   // Applies blk->do_code_blob() to each of the entries in
 599   // the strong code roots list for this region
 600   void strong_code_roots_do(CodeBlobClosure* blk) const;
 601 
 602   uint node_index() const { return _node_index; }
 603   void set_node_index(uint node_index) { _node_index = node_index; }
 604 
 605   // Verify that the entries on the strong code root list for this
 606   // region are live and include at least one pointer into this region.
 607   void verify_strong_code_roots(VerifyOption vo, bool* failures) const;
 608 
 609   void print() const;
 610   void print_on(outputStream* st) const;




 237   size_t _prev_marked_bytes;    // Bytes known to be live via last completed marking.
 238   size_t _next_marked_bytes;    // Bytes known to be live via in-progress marking.
 239 
 240   void init_top_at_mark_start() {
 241     assert(_prev_marked_bytes == 0 &&
 242            _next_marked_bytes == 0,
 243            "Must be called after zero_marked_bytes.");
 244     _prev_top_at_mark_start = _next_top_at_mark_start = bottom();
 245   }
 246 
 247   // Data for young region survivor prediction.
 248   uint  _young_index_in_cset;
 249   SurvRateGroup* _surv_rate_group;
 250   int  _age_index;
 251 
 252   // Cached attributes used in the collection set policy information
 253 
 254   // The calculated GC efficiency of the region.
 255   double _gc_efficiency;
 256 








 257   uint _node_index;
 258 
 259   void report_region_type_change(G1HeapRegionTraceType::Type to);
 260 
 261   // Returns whether the given object address refers to a dead object, and either the
 262   // size of the object (if live) or the size of the block (if dead) in size.
 263   // May
 264   // - only called with obj < top()
 265   // - not called on humongous objects or archive regions
 266   inline bool is_obj_dead_with_size(const oop obj, const G1CMBitMap* const prev_bitmap, size_t* size) const;
 267 
 268   // Iterate over the references covered by the given MemRegion in a humongous
 269   // object and apply the given closure to them.
 270   // Humongous objects are allocated directly in the old-gen. So we need special
 271   // handling for concurrent processing encountering an in-progress allocation.
 272   // Returns the address after the last actually scanned or NULL if the area could
 273   // not be scanned (That should only happen when invoked concurrently with the
 274   // mutator).
 275   template <class Closure, bool is_gc_active>
 276   inline HeapWord* do_oops_on_memregion_in_humongous(MemRegion mr,


 552   // Determine if an object has been allocated since the last
 553   // mark performed by the collector. This returns true iff the object
 554   // is within the unmarked area of the region.
 555   bool obj_allocated_since_prev_marking(oop obj) const {
 556     return (HeapWord *) obj >= prev_top_at_mark_start();
 557   }
 558   bool obj_allocated_since_next_marking(oop obj) const {
 559     return (HeapWord *) obj >= next_top_at_mark_start();
 560   }
 561 
 562   // Iterate over the objects overlapping the given memory region, applying cl
 563   // to all references in the region.  This is a helper for
 564   // G1RemSet::refine_card*, and is tightly coupled with them.
 565   // mr must not be empty. Must be trimmed to the allocated/parseable space in this region.
 566   // This region must be old or humongous.
 567   // Returns the next unscanned address if the designated objects were successfully
 568   // processed, NULL if an unparseable part of the heap was encountered (That should
 569   // only happen when invoked concurrently with the mutator).
 570   template <bool is_gc_active, class Closure>
 571   inline HeapWord* oops_on_memregion_seq_iterate_careful(MemRegion mr, Closure* cl);











 572 
 573   // Routines for managing a list of code roots (attached to the
 574   // this region's RSet) that point into this heap region.
 575   void add_strong_code_root(nmethod* nm);
 576   void add_strong_code_root_locked(nmethod* nm);
 577   void remove_strong_code_root(nmethod* nm);
 578 
 579   // Applies blk->do_code_blob() to each of the entries in
 580   // the strong code roots list for this region
 581   void strong_code_roots_do(CodeBlobClosure* blk) const;
 582 
 583   uint node_index() const { return _node_index; }
 584   void set_node_index(uint node_index) { _node_index = node_index; }
 585 
 586   // Verify that the entries on the strong code root list for this
 587   // region are live and include at least one pointer into this region.
 588   void verify_strong_code_roots(VerifyOption vo, bool* failures) const;
 589 
 590   void print() const;
 591   void print_on(outputStream* st) const;


< prev index next >