241 assert(_prev_marked_bytes == 0 &&
242 _next_marked_bytes == 0,
243 "Must be called after zero_marked_bytes.");
244 _prev_top_at_mark_start = _next_top_at_mark_start = bottom();
245 }
246
247 // Data for young region survivor prediction.
248 uint _young_index_in_cset;
249 SurvRateGroup* _surv_rate_group;
250 int _age_index;
251
252 // Cached attributes used in the collection set policy information
253
254 // The calculated GC efficiency of the region.
255 double _gc_efficiency;
256
257 // The remembered set length that was added to the total value
258 // for the collection set.
259 size_t _recorded_rs_length;
260
261 // The predicted elapsed time that was added to total value
262 // for the collection set.
263 double _predicted_elapsed_time_ms;
264
265 uint _node_index;
266
267 void report_region_type_change(G1HeapRegionTraceType::Type to);
268
269 // Returns whether the given object address refers to a dead object, and either the
270 // size of the object (if live) or the size of the block (if dead) in size.
271 // May
272 // - only called with obj < top()
273 // - not called on humongous objects or archive regions
274 inline bool is_obj_dead_with_size(const oop obj, const G1CMBitMap* const prev_bitmap, size_t* size) const;
275
276 // Iterate over the references covered by the given MemRegion in a humongous
277 // object and apply the given closure to them.
278 // Humongous objects are allocated directly in the old-gen. So we need special
279 // handling for concurrent processing encountering an in-progress allocation.
280 // Returns the address after the last actually scanned or NULL if the area could
281 // not be scanned (That should only happen when invoked concurrently with the
282 // mutator).
283 template <class Closure, bool is_gc_active>
561 // is within the unmarked area of the region.
562 bool obj_allocated_since_prev_marking(oop obj) const {
563 return (HeapWord *) obj >= prev_top_at_mark_start();
564 }
565 bool obj_allocated_since_next_marking(oop obj) const {
566 return (HeapWord *) obj >= next_top_at_mark_start();
567 }
568
569 // Iterate over the objects overlapping the given memory region, applying cl
570 // to all references in the region. This is a helper for
571 // G1RemSet::refine_card*, and is tightly coupled with them.
572 // mr must not be empty. Must be trimmed to the allocated/parseable space in this region.
573 // This region must be old or humongous.
574 // Returns the next unscanned address if the designated objects were successfully
575 // processed, NULL if an unparseable part of the heap was encountered (That should
576 // only happen when invoked concurrently with the mutator).
577 template <bool is_gc_active, class Closure>
578 inline HeapWord* oops_on_memregion_seq_iterate_careful(MemRegion mr, Closure* cl);
579
580 size_t recorded_rs_length() const { return _recorded_rs_length; }
581 double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; }
582
583 void set_recorded_rs_length(size_t rs_length) {
584 _recorded_rs_length = rs_length;
585 }
586
587 void set_predicted_elapsed_time_ms(double ms) {
588 _predicted_elapsed_time_ms = ms;
589 }
590
591 // Routines for managing a list of code roots (attached to the
592 // this region's RSet) that point into this heap region.
593 void add_strong_code_root(nmethod* nm);
594 void add_strong_code_root_locked(nmethod* nm);
595 void remove_strong_code_root(nmethod* nm);
596
597 // Applies blk->do_code_blob() to each of the entries in
598 // the strong code roots list for this region
599 void strong_code_roots_do(CodeBlobClosure* blk) const;
600
601 uint node_index() const { return _node_index; }
602 void set_node_index(uint node_index) { _node_index = node_index; }
603
604 // Verify that the entries on the strong code root list for this
605 // region are live and include at least one pointer into this region.
606 void verify_strong_code_roots(VerifyOption vo, bool* failures) const;
607
608 void print() const;
|
241 assert(_prev_marked_bytes == 0 &&
242 _next_marked_bytes == 0,
243 "Must be called after zero_marked_bytes.");
244 _prev_top_at_mark_start = _next_top_at_mark_start = bottom();
245 }
246
247 // Data for young region survivor prediction.
248 uint _young_index_in_cset;
249 SurvRateGroup* _surv_rate_group;
250 int _age_index;
251
252 // Cached attributes used in the collection set policy information
253
254 // The calculated GC efficiency of the region.
255 double _gc_efficiency;
256
257 // The remembered set length that was added to the total value
258 // for the collection set.
259 size_t _recorded_rs_length;
260
261 // The predicted time without copy time that was added to total value
262 // for the collection set.
263 double _predicted_non_copy_time_ms;
264
265 uint _node_index;
266
267 void report_region_type_change(G1HeapRegionTraceType::Type to);
268
269 // Returns whether the given object address refers to a dead object, and either the
270 // size of the object (if live) or the size of the block (if dead) in size.
271 // May
272 // - only called with obj < top()
273 // - not called on humongous objects or archive regions
274 inline bool is_obj_dead_with_size(const oop obj, const G1CMBitMap* const prev_bitmap, size_t* size) const;
275
276 // Iterate over the references covered by the given MemRegion in a humongous
277 // object and apply the given closure to them.
278 // Humongous objects are allocated directly in the old-gen. So we need special
279 // handling for concurrent processing encountering an in-progress allocation.
280 // Returns the address after the last actually scanned or NULL if the area could
281 // not be scanned (That should only happen when invoked concurrently with the
282 // mutator).
283 template <class Closure, bool is_gc_active>
561 // is within the unmarked area of the region.
562 bool obj_allocated_since_prev_marking(oop obj) const {
563 return (HeapWord *) obj >= prev_top_at_mark_start();
564 }
565 bool obj_allocated_since_next_marking(oop obj) const {
566 return (HeapWord *) obj >= next_top_at_mark_start();
567 }
568
569 // Iterate over the objects overlapping the given memory region, applying cl
570 // to all references in the region. This is a helper for
571 // G1RemSet::refine_card*, and is tightly coupled with them.
572 // mr must not be empty. Must be trimmed to the allocated/parseable space in this region.
573 // This region must be old or humongous.
574 // Returns the next unscanned address if the designated objects were successfully
575 // processed, NULL if an unparseable part of the heap was encountered (That should
576 // only happen when invoked concurrently with the mutator).
577 template <bool is_gc_active, class Closure>
578 inline HeapWord* oops_on_memregion_seq_iterate_careful(MemRegion mr, Closure* cl);
579
580 size_t recorded_rs_length() const { return _recorded_rs_length; }
581 double predicted_non_copy_time_ms() const { return _predicted_non_copy_time_ms; }
582
583 void set_recorded_rs_length(size_t rs_length) {
584 _recorded_rs_length = rs_length;
585 }
586
587 void set_predicted_non_copy_time_ms(double ms) {
588 _predicted_non_copy_time_ms = ms;
589 }
590
591 // Routines for managing a list of code roots (attached to the
592 // this region's RSet) that point into this heap region.
593 void add_strong_code_root(nmethod* nm);
594 void add_strong_code_root_locked(nmethod* nm);
595 void remove_strong_code_root(nmethod* nm);
596
597 // Applies blk->do_code_blob() to each of the entries in
598 // the strong code roots list for this region
599 void strong_code_roots_do(CodeBlobClosure* blk) const;
600
601 uint node_index() const { return _node_index; }
602 void set_node_index(uint node_index) { _node_index = node_index; }
603
604 // Verify that the entries on the strong code root list for this
605 // region are live and include at least one pointer into this region.
606 void verify_strong_code_roots(VerifyOption vo, bool* failures) const;
607
608 void print() const;
|