18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
27
28 #include "gc_implementation/g1/g1AllocationContext.hpp"
29 #include "gc_implementation/g1/g1BlockOffsetTable.hpp"
30 #include "gc_implementation/g1/g1_specialized_oop_closures.hpp"
31 #include "gc_implementation/g1/heapRegionType.hpp"
32 #include "gc_implementation/g1/survRateGroup.hpp"
33 #include "gc_implementation/shared/ageTable.hpp"
34 #include "gc_implementation/shared/spaceDecorator.hpp"
35 #include "memory/space.inline.hpp"
36 #include "memory/watermark.hpp"
37 #include "utilities/macros.hpp"
38
39 // A HeapRegion is the smallest piece of a G1CollectedHeap that
40 // can be collected independently.
41
42 // NOTE: Although a HeapRegion is a Space, its
43 // Space::initDirtyCardClosure method must not be called.
44 // The problem is that the existence of this method breaks
45 // the independence of barrier sets from remembered sets.
46 // The solution is to remove this method from the definition
47 // of a Space.
48
49 class HeapRegionRemSet;
50 class HeapRegionRemSetIterator;
51 class HeapRegion;
52 class HeapRegionSetBase;
53 class nmethod;
54
55 #define HR_FORMAT "%u:(%s)[" PTR_FORMAT "," PTR_FORMAT "," PTR_FORMAT "]"
56 #define HR_FORMAT_PARAMS(_hr_) \
57 (_hr_)->hrm_index(), \
194 void reset_bot() {
195 _offsets.reset_bot();
196 }
197
198 void print_bot_on(outputStream* out) {
199 _offsets.print_on(out);
200 }
201 };
202
203 class HeapRegion: public G1OffsetTableContigSpace {
204 friend class VMStructs;
205 private:
206
207 // The remembered set for this region.
208 // (Might want to make this "inline" later, to avoid some alloc failure
209 // issues.)
210 HeapRegionRemSet* _rem_set;
211
212 G1BlockOffsetArrayContigSpace* offsets() { return &_offsets; }
213
214 protected:
215 // The index of this region in the heap region sequence.
216 uint _hrm_index;
217
218 AllocationContext_t _allocation_context;
219
220 HeapRegionType _type;
221
222 // For a humongous region, region in which it starts.
223 HeapRegion* _humongous_start_region;
224 // For the start region of a humongous sequence, it's original end().
225 HeapWord* _orig_end;
226
227 // True iff the region is in current collection_set.
228 bool _in_collection_set;
229
230 // True iff an attempt to evacuate an object in the region failed.
231 bool _evacuation_failed;
232
233 // A heap region may be a member one of a number of special subsets, each
388 size_t reclaimable_bytes() {
389 size_t known_live_bytes = live_bytes();
390 assert(known_live_bytes <= capacity(), "sanity");
391 return capacity() - known_live_bytes;
392 }
393
394 // An upper bound on the number of live bytes in the region.
395 size_t max_live_bytes() { return used() - garbage_bytes(); }
396
397 void add_to_marked_bytes(size_t incr_bytes) {
398 _next_marked_bytes = _next_marked_bytes + incr_bytes;
399 assert(_next_marked_bytes <= used(), "invariant" );
400 }
401
402 void zero_marked_bytes() {
403 _prev_marked_bytes = _next_marked_bytes = 0;
404 }
405
406 const char* get_type_str() const { return _type.get_str(); }
407 const char* get_short_type_str() const { return _type.get_short_str(); }
408
409 bool is_free() const { return _type.is_free(); }
410
411 bool is_young() const { return _type.is_young(); }
412 bool is_eden() const { return _type.is_eden(); }
413 bool is_survivor() const { return _type.is_survivor(); }
414
415 bool isHumongous() const { return _type.is_humongous(); }
416 bool startsHumongous() const { return _type.is_starts_humongous(); }
417 bool continuesHumongous() const { return _type.is_continues_humongous(); }
418
419 bool is_old() const { return _type.is_old(); }
420
421 // For a humongous region, region in which it starts.
422 HeapRegion* humongous_start_region() const {
423 return _humongous_start_region;
424 }
425
426 // Return the number of distinct regions that are covered by this region:
427 // 1 if the region is not humongous, >= 1 if the region is humongous.
650 assert( surv_rate_group != NULL, "pre-condition" );
651 assert( _surv_rate_group == NULL, "pre-condition" );
652 assert( is_young(), "pre-condition" );
653
654 _surv_rate_group = surv_rate_group;
655 _age_index = surv_rate_group->next_age_index();
656 }
657
658 void uninstall_surv_rate_group() {
659 if (_surv_rate_group != NULL) {
660 assert( _age_index > -1, "pre-condition" );
661 assert( is_young(), "pre-condition" );
662
663 _surv_rate_group = NULL;
664 _age_index = -1;
665 } else {
666 assert( _age_index == -1, "pre-condition" );
667 }
668 }
669
670 void set_free() { _type.set_free(); }
671
672 void set_eden() { _type.set_eden(); }
673 void set_eden_pre_gc() { _type.set_eden_pre_gc(); }
674 void set_survivor() { _type.set_survivor(); }
675
676 void set_old() { _type.set_old(); }
677
678 // Determine if an object has been allocated since the last
679 // mark performed by the collector. This returns true iff the object
680 // is within the unmarked area of the region.
681 bool obj_allocated_since_prev_marking(oop obj) const {
682 return (HeapWord *) obj >= prev_top_at_mark_start();
683 }
684 bool obj_allocated_since_next_marking(oop obj) const {
685 return (HeapWord *) obj >= next_top_at_mark_start();
686 }
687
688 // For parallel heapRegion traversal.
689 bool claimHeapRegion(int claimValue);
690 jint claim_value() { return _claimed; }
691 // Use this carefully: only when you're sure no one is claiming...
692 void set_claim_value(int claimValue) { _claimed = claimValue; }
693
694 // Returns the "evacuation_failed" property of the region.
695 bool evacuation_failed() { return _evacuation_failed; }
696
|
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
27
28 #include "gc_implementation/g1/g1AllocationContext.hpp"
29 #include "gc_implementation/g1/g1BlockOffsetTable.hpp"
30 #include "gc_implementation/g1/g1_specialized_oop_closures.hpp"
31 #include "gc_implementation/g1/heapRegionType.hpp"
32 #include "gc_implementation/g1/survRateGroup.hpp"
33 #include "gc_implementation/shared/ageTable.hpp"
34 #include "gc_implementation/shared/spaceDecorator.hpp"
35 #include "memory/space.inline.hpp"
36 #include "memory/watermark.hpp"
37 #include "utilities/macros.hpp"
38 #include "gc_implementation/g1/g1HeapRegionTraceType.hpp"
39
40 // A HeapRegion is the smallest piece of a G1CollectedHeap that
41 // can be collected independently.
42
43 // NOTE: Although a HeapRegion is a Space, its
44 // Space::initDirtyCardClosure method must not be called.
45 // The problem is that the existence of this method breaks
46 // the independence of barrier sets from remembered sets.
47 // The solution is to remove this method from the definition
48 // of a Space.
49
50 class HeapRegionRemSet;
51 class HeapRegionRemSetIterator;
52 class HeapRegion;
53 class HeapRegionSetBase;
54 class nmethod;
55
56 #define HR_FORMAT "%u:(%s)[" PTR_FORMAT "," PTR_FORMAT "," PTR_FORMAT "]"
57 #define HR_FORMAT_PARAMS(_hr_) \
58 (_hr_)->hrm_index(), \
195 void reset_bot() {
196 _offsets.reset_bot();
197 }
198
199 void print_bot_on(outputStream* out) {
200 _offsets.print_on(out);
201 }
202 };
203
204 class HeapRegion: public G1OffsetTableContigSpace {
205 friend class VMStructs;
206 private:
207
208 // The remembered set for this region.
209 // (Might want to make this "inline" later, to avoid some alloc failure
210 // issues.)
211 HeapRegionRemSet* _rem_set;
212
213 G1BlockOffsetArrayContigSpace* offsets() { return &_offsets; }
214
215 void report_region_type_change(G1HeapRegionTraceType::Type to);
216
217 protected:
218 // The index of this region in the heap region sequence.
219 uint _hrm_index;
220
221 AllocationContext_t _allocation_context;
222
223 HeapRegionType _type;
224
225 // For a humongous region, region in which it starts.
226 HeapRegion* _humongous_start_region;
227 // For the start region of a humongous sequence, it's original end().
228 HeapWord* _orig_end;
229
230 // True iff the region is in current collection_set.
231 bool _in_collection_set;
232
233 // True iff an attempt to evacuate an object in the region failed.
234 bool _evacuation_failed;
235
236 // A heap region may be a member one of a number of special subsets, each
391 size_t reclaimable_bytes() {
392 size_t known_live_bytes = live_bytes();
393 assert(known_live_bytes <= capacity(), "sanity");
394 return capacity() - known_live_bytes;
395 }
396
397 // An upper bound on the number of live bytes in the region.
398 size_t max_live_bytes() { return used() - garbage_bytes(); }
399
400 void add_to_marked_bytes(size_t incr_bytes) {
401 _next_marked_bytes = _next_marked_bytes + incr_bytes;
402 assert(_next_marked_bytes <= used(), "invariant" );
403 }
404
405 void zero_marked_bytes() {
406 _prev_marked_bytes = _next_marked_bytes = 0;
407 }
408
409 const char* get_type_str() const { return _type.get_str(); }
410 const char* get_short_type_str() const { return _type.get_short_str(); }
411 G1HeapRegionTraceType::Type get_trace_type() { return _type.get_trace_type(); }
412
413 bool is_free() const { return _type.is_free(); }
414
415 bool is_young() const { return _type.is_young(); }
416 bool is_eden() const { return _type.is_eden(); }
417 bool is_survivor() const { return _type.is_survivor(); }
418
419 bool isHumongous() const { return _type.is_humongous(); }
420 bool startsHumongous() const { return _type.is_starts_humongous(); }
421 bool continuesHumongous() const { return _type.is_continues_humongous(); }
422
423 bool is_old() const { return _type.is_old(); }
424
425 // For a humongous region, region in which it starts.
426 HeapRegion* humongous_start_region() const {
427 return _humongous_start_region;
428 }
429
430 // Return the number of distinct regions that are covered by this region:
431 // 1 if the region is not humongous, >= 1 if the region is humongous.
654 assert( surv_rate_group != NULL, "pre-condition" );
655 assert( _surv_rate_group == NULL, "pre-condition" );
656 assert( is_young(), "pre-condition" );
657
658 _surv_rate_group = surv_rate_group;
659 _age_index = surv_rate_group->next_age_index();
660 }
661
662 void uninstall_surv_rate_group() {
663 if (_surv_rate_group != NULL) {
664 assert( _age_index > -1, "pre-condition" );
665 assert( is_young(), "pre-condition" );
666
667 _surv_rate_group = NULL;
668 _age_index = -1;
669 } else {
670 assert( _age_index == -1, "pre-condition" );
671 }
672 }
673
674 void set_free();
675
676 void set_eden();
677 void set_eden_pre_gc();
678 void set_survivor();
679
680 void set_old();
681
682 // Determine if an object has been allocated since the last
683 // mark performed by the collector. This returns true iff the object
684 // is within the unmarked area of the region.
685 bool obj_allocated_since_prev_marking(oop obj) const {
686 return (HeapWord *) obj >= prev_top_at_mark_start();
687 }
688 bool obj_allocated_since_next_marking(oop obj) const {
689 return (HeapWord *) obj >= next_top_at_mark_start();
690 }
691
692 // For parallel heapRegion traversal.
693 bool claimHeapRegion(int claimValue);
694 jint claim_value() { return _claimed; }
695 // Use this carefully: only when you're sure no one is claiming...
696 void set_claim_value(int claimValue) { _claimed = claimValue; }
697
698 // Returns the "evacuation_failed" property of the region.
699 bool evacuation_failed() { return _evacuation_failed; }
700
|