22 *
23 */
24
25 #ifndef SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_HPP
26 #define SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_HPP
27
28 #include "gc/g1/dirtyCardQueue.hpp"
29 #include "gc/g1/g1CollectedHeap.hpp"
30 #include "gc/g1/g1CollectorPolicy.hpp"
31 #include "gc/g1/g1OopClosures.hpp"
32 #include "gc/g1/g1RemSet.hpp"
33 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
34 #include "gc/shared/ageTable.hpp"
35 #include "memory/allocation.hpp"
36 #include "oops/oop.hpp"
37
38 class G1PLABAllocator;
39 class HeapRegion;
40 class outputStream;
41
42 class G1ParScanThreadState : public StackObj {
43 private:
44 G1CollectedHeap* _g1h;
45 RefToScanQueue* _refs;
46 DirtyCardQueue _dcq;
47 G1SATBCardTableModRefBS* _ct_bs;
48 G1RemSet* _g1_rem;
49
50 G1PLABAllocator* _plab_allocator;
51
52 ageTable _age_table;
53 InCSetState _dest[InCSetState::Num];
54 // Local tenuring threshold.
55 uint _tenuring_threshold;
56 G1ParScanClosure _scanner;
57
58 int _hash_seed;
59 uint _worker_id;
60
61 size_t _term_attempts;
62
63 double _start;
64 double _start_strong_roots;
65 double _strong_roots_time;
66 double _start_term;
67 double _term_time;
68
69 // Map from young-age-index (0 == not young, 1 is youngest) to
70 // surviving words. base is what we get back from the malloc call
71 size_t* _surviving_young_words_base;
72 // this points into the array, as we use the first few entries for padding
73 size_t* _surviving_young_words;
74
75 // Indicates whether in the last generation (old) there is no more space
76 // available for allocation.
77 bool _old_gen_is_full;
78
79 #define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t))
80
81 DirtyCardQueue& dirty_card_queue() { return _dcq; }
82 G1SATBCardTableModRefBS* ctbs() { return _ct_bs; }
83
84 InCSetState dest(InCSetState original) const {
85 assert(original.is_valid(),
86 err_msg("Original state invalid: " CSETSTATE_FORMAT, original.value()));
87 assert(_dest[original.value()].is_valid_gen(),
88 err_msg("Dest state is invalid: " CSETSTATE_FORMAT, _dest[original.value()].value()));
89 return _dest[original.value()];
90 }
91
92 public:
93 G1ParScanThreadState(G1CollectedHeap* g1h, uint worker_id, ReferenceProcessor* rp);
94 ~G1ParScanThreadState();
95
96 ageTable* age_table() { return &_age_table; }
97
98 #ifdef ASSERT
99 bool queue_is_empty() const { return _refs->is_empty(); }
100
101 bool verify_ref(narrowOop* ref) const;
102 bool verify_ref(oop* ref) const;
103 bool verify_task(StarTask ref) const;
104 #endif // ASSERT
105
106 template <class T> void push_on_queue(T* ref);
107
108 template <class T> void update_rs(HeapRegion* from, T* p, uint tid) {
109 // If the new value of the field points to the same region or
110 // is the to-space, we don't need to include it in the Rset updates.
111 if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
112 size_t card_index = ctbs()->index_for(p);
113 // If the card hasn't been added to the buffer, do it.
114 if (ctbs()->mark_card_deferred(card_index)) {
115 dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
116 }
117 }
118 }
119
120 uint worker_id() { return _worker_id; }
121
122 size_t term_attempts() const { return _term_attempts; }
123 void note_term_attempt() { _term_attempts++; }
124
125 void start_strong_roots() {
126 _start_strong_roots = os::elapsedTime();
127 }
128 void end_strong_roots() {
129 _strong_roots_time += (os::elapsedTime() - _start_strong_roots);
130 }
131 double strong_roots_time() const { return _strong_roots_time; }
132
133 void start_term_time() {
134 note_term_attempt();
135 _start_term = os::elapsedTime();
136 }
137 void end_term_time() {
138 _term_time += (os::elapsedTime() - _start_term);
139 }
140 double term_time() const { return _term_time; }
141
142 double elapsed_time() const {
143 return os::elapsedTime() - _start;
144 }
145
146 // Print the header for the per-thread termination statistics.
147 static void print_termination_stats_hdr(outputStream* const st = gclog_or_tty);
148
149 // Print actual per-thread termination statistics.
150 void print_termination_stats(outputStream* const st = gclog_or_tty) const;
151
152 size_t* surviving_young_words() {
153 // We add on to hide entry 0 which accumulates surviving words for
154 // age -1 regions (i.e. non-young ones)
155 return _surviving_young_words;
156 }
157
158 private:
159 #define G1_PARTIAL_ARRAY_MASK 0x2
160
161 inline bool has_partial_array_mask(oop* ref) const {
162 return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK;
163 }
164
165 // We never encode partial array oops as narrowOop*, so return false immediately.
166 // This allows the compiler to create optimized code when popping references from
167 // the work queue.
168 inline bool has_partial_array_mask(narrowOop* ref) const {
169 assert(((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) != G1_PARTIAL_ARRAY_MASK, "Partial array oop reference encoded as narrowOop*");
170 return false;
171 }
172
173 // Only implement set_partial_array_mask() for regular oops, not for narrowOops.
174 // We always encode partial arrays as regular oop, to allow the
175 // specialization for has_partial_array_mask() for narrowOops above.
|
22 *
23 */
24
25 #ifndef SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_HPP
26 #define SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_HPP
27
28 #include "gc/g1/dirtyCardQueue.hpp"
29 #include "gc/g1/g1CollectedHeap.hpp"
30 #include "gc/g1/g1CollectorPolicy.hpp"
31 #include "gc/g1/g1OopClosures.hpp"
32 #include "gc/g1/g1RemSet.hpp"
33 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
34 #include "gc/shared/ageTable.hpp"
35 #include "memory/allocation.hpp"
36 #include "oops/oop.hpp"
37
38 class G1PLABAllocator;
39 class HeapRegion;
40 class outputStream;
41
42 class G1ParScanThreadState : public CHeapObj<mtGC> {
43 private:
44 G1CollectedHeap* _g1h;
45 RefToScanQueue* _refs;
46 DirtyCardQueue _dcq;
47 G1SATBCardTableModRefBS* _ct_bs;
48 G1RemSet* _g1_rem;
49
50 G1PLABAllocator* _plab_allocator;
51
52 ageTable _age_table;
53 InCSetState _dest[InCSetState::Num];
54 // Local tenuring threshold.
55 uint _tenuring_threshold;
56 G1ParScanClosure _scanner;
57
58 int _hash_seed;
59 uint _worker_id;
60
61 // Map from young-age-index (0 == not young, 1 is youngest) to
62 // surviving words. base is what we get back from the malloc call
63 size_t* _surviving_young_words_base;
64 // this points into the array, as we use the first few entries for padding
65 size_t* _surviving_young_words;
66
67 // Indicates whether in the last generation (old) there is no more space
68 // available for allocation.
69 bool _old_gen_is_full;
70
71 #define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t))
72
73 DirtyCardQueue& dirty_card_queue() { return _dcq; }
74 G1SATBCardTableModRefBS* ctbs() { return _ct_bs; }
75
76 InCSetState dest(InCSetState original) const {
77 assert(original.is_valid(),
78 err_msg("Original state invalid: " CSETSTATE_FORMAT, original.value()));
79 assert(_dest[original.value()].is_valid_gen(),
80 err_msg("Dest state is invalid: " CSETSTATE_FORMAT, _dest[original.value()].value()));
81 return _dest[original.value()];
82 }
83
84 public:
85 G1ParScanThreadState(G1CollectedHeap* g1h, uint worker_id);
86 ~G1ParScanThreadState();
87
88 void set_ref_processor(ReferenceProcessor* rp) { _scanner.set_ref_processor(rp); }
89
90 ageTable* age_table() { return &_age_table; }
91
92 #ifdef ASSERT
93 bool queue_is_empty() const { return _refs->is_empty(); }
94
95 bool verify_ref(narrowOop* ref) const;
96 bool verify_ref(oop* ref) const;
97 bool verify_task(StarTask ref) const;
98 #endif // ASSERT
99
100 template <class T> void push_on_queue(T* ref);
101
102 template <class T> void update_rs(HeapRegion* from, T* p, uint tid) {
103 // If the new value of the field points to the same region or
104 // is the to-space, we don't need to include it in the Rset updates.
105 if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
106 size_t card_index = ctbs()->index_for(p);
107 // If the card hasn't been added to the buffer, do it.
108 if (ctbs()->mark_card_deferred(card_index)) {
109 dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
110 }
111 }
112 }
113
114 uint worker_id() { return _worker_id; }
115
116 // Returns the current amount of waste due to alignment or not being able to fit
117 // objects within LABs and the undo waste.
118 virtual void waste(size_t& wasted, size_t& undo_wasted);
119
120 size_t* surviving_young_words() {
121 // We add one to hide entry 0 which accumulates surviving words for
122 // age -1 regions (i.e. non-young ones)
123 return _surviving_young_words + 1;
124 }
125
126 private:
127 #define G1_PARTIAL_ARRAY_MASK 0x2
128
129 inline bool has_partial_array_mask(oop* ref) const {
130 return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK;
131 }
132
133 // We never encode partial array oops as narrowOop*, so return false immediately.
134 // This allows the compiler to create optimized code when popping references from
135 // the work queue.
136 inline bool has_partial_array_mask(narrowOop* ref) const {
137 assert(((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) != G1_PARTIAL_ARRAY_MASK, "Partial array oop reference encoded as narrowOop*");
138 return false;
139 }
140
141 // Only implement set_partial_array_mask() for regular oops, not for narrowOops.
142 // We always encode partial arrays as regular oop, to allow the
143 // specialization for has_partial_array_mask() for narrowOops above.
|