229 // have been allocated in this part since the last mark phase.
230 // "prev" is the top at the start of the last completed marking.
231 // "next" is the top at the start of the in-progress marking (if any.)
232 HeapWord* _prev_top_at_mark_start;
233 HeapWord* _next_top_at_mark_start;
234
235 // We use concurrent marking to determine the amount of live data
236 // in each heap region.
237 size_t _prev_marked_bytes; // Bytes known to be live via last completed marking.
238 size_t _next_marked_bytes; // Bytes known to be live via in-progress marking.
239
240 void init_top_at_mark_start() {
241 assert(_prev_marked_bytes == 0 &&
242 _next_marked_bytes == 0,
243 "Must be called after zero_marked_bytes.");
244 _prev_top_at_mark_start = _next_top_at_mark_start = bottom();
245 }
246
247 // Data for young region survivor prediction.
248 uint _young_index_in_cset;
249 SurvRateGroup* _surv_rate_group;
250 int _age_index;
251
252 // Cached attributes used in the collection set policy information
253
254 // The calculated GC efficiency of the region.
255 double _gc_efficiency;
256
257 uint _node_index;
258
259 void report_region_type_change(G1HeapRegionTraceType::Type to);
260
261 // Returns whether the given object address refers to a dead object, and either the
262 // size of the object (if live) or the size of the block (if dead) in size.
263 // May
264 // - only called with obj < top()
265 // - not called on humongous objects or archive regions
266 inline bool is_obj_dead_with_size(const oop obj, const G1CMBitMap* const prev_bitmap, size_t* size) const;
267
268 // Iterate over the references covered by the given MemRegion in a humongous
269 // object and apply the given closure to them.
527
528 void calc_gc_efficiency(void);
529 double gc_efficiency() const { return _gc_efficiency;}
530
531 uint young_index_in_cset() const { return _young_index_in_cset; }
532 void clear_young_index_in_cset() { _young_index_in_cset = 0; }
533 void set_young_index_in_cset(uint index) {
534 assert(index != UINT_MAX, "just checking");
535 assert(index != 0, "just checking");
536 assert(is_young(), "pre-condition");
537 _young_index_in_cset = index;
538 }
539
540 int age_in_surv_rate_group() const;
541 bool has_valid_age_in_surv_rate() const;
542
543 bool has_surv_rate_group() const;
544
545 double surv_rate_prediction(G1Predictions const& predictor) const;
546
547 void install_surv_rate_group(SurvRateGroup* surv_rate_group);
548 void uninstall_surv_rate_group();
549
550 void record_surv_words_in_group(size_t words_survived);
551
552 // Determine if an object has been allocated since the last
553 // mark performed by the collector. This returns true iff the object
554 // is within the unmarked area of the region.
555 bool obj_allocated_since_prev_marking(oop obj) const {
556 return (HeapWord *) obj >= prev_top_at_mark_start();
557 }
558 bool obj_allocated_since_next_marking(oop obj) const {
559 return (HeapWord *) obj >= next_top_at_mark_start();
560 }
561
562 // Iterate over the objects overlapping the given memory region, applying cl
563 // to all references in the region. This is a helper for
564 // G1RemSet::refine_card*, and is tightly coupled with them.
565 // mr must not be empty. Must be trimmed to the allocated/parseable space in this region.
566 // This region must be old or humongous.
567 // Returns the next unscanned address if the designated objects were successfully
|
229 // have been allocated in this part since the last mark phase.
230 // "prev" is the top at the start of the last completed marking.
231 // "next" is the top at the start of the in-progress marking (if any.)
232 HeapWord* _prev_top_at_mark_start;
233 HeapWord* _next_top_at_mark_start;
234
235 // We use concurrent marking to determine the amount of live data
236 // in each heap region.
237 size_t _prev_marked_bytes; // Bytes known to be live via last completed marking.
238 size_t _next_marked_bytes; // Bytes known to be live via in-progress marking.
239
240 void init_top_at_mark_start() {
241 assert(_prev_marked_bytes == 0 &&
242 _next_marked_bytes == 0,
243 "Must be called after zero_marked_bytes.");
244 _prev_top_at_mark_start = _next_top_at_mark_start = bottom();
245 }
246
247 // Data for young region survivor prediction.
248 uint _young_index_in_cset;
249 G1SurvRateGroup* _surv_rate_group;
250 int _age_index;
251
252 // Cached attributes used in the collection set policy information
253
254 // The calculated GC efficiency of the region.
255 double _gc_efficiency;
256
257 uint _node_index;
258
259 void report_region_type_change(G1HeapRegionTraceType::Type to);
260
261 // Returns whether the given object address refers to a dead object, and either the
262 // size of the object (if live) or the size of the block (if dead) in size.
263 // May
264 // - only called with obj < top()
265 // - not called on humongous objects or archive regions
266 inline bool is_obj_dead_with_size(const oop obj, const G1CMBitMap* const prev_bitmap, size_t* size) const;
267
268 // Iterate over the references covered by the given MemRegion in a humongous
269 // object and apply the given closure to them.
527
528 void calc_gc_efficiency(void);
529 double gc_efficiency() const { return _gc_efficiency;}
530
531 uint young_index_in_cset() const { return _young_index_in_cset; }
532 void clear_young_index_in_cset() { _young_index_in_cset = 0; }
533 void set_young_index_in_cset(uint index) {
534 assert(index != UINT_MAX, "just checking");
535 assert(index != 0, "just checking");
536 assert(is_young(), "pre-condition");
537 _young_index_in_cset = index;
538 }
539
540 int age_in_surv_rate_group() const;
541 bool has_valid_age_in_surv_rate() const;
542
543 bool has_surv_rate_group() const;
544
545 double surv_rate_prediction(G1Predictions const& predictor) const;
546
547 void install_surv_rate_group(G1SurvRateGroup* surv_rate_group);
548 void uninstall_surv_rate_group();
549
550 void record_surv_words_in_group(size_t words_survived);
551
552 // Determine if an object has been allocated since the last
553 // mark performed by the collector. This returns true iff the object
554 // is within the unmarked area of the region.
555 bool obj_allocated_since_prev_marking(oop obj) const {
556 return (HeapWord *) obj >= prev_top_at_mark_start();
557 }
558 bool obj_allocated_since_next_marking(oop obj) const {
559 return (HeapWord *) obj >= next_top_at_mark_start();
560 }
561
562 // Iterate over the objects overlapping the given memory region, applying cl
563 // to all references in the region. This is a helper for
564 // G1RemSet::refine_card*, and is tightly coupled with them.
565 // mr must not be empty. Must be trimmed to the allocated/parseable space in this region.
566 // This region must be old or humongous.
567 // Returns the next unscanned address if the designated objects were successfully
|