49
50 // Process queue entries and release resources.
51 void flush() { flush_impl(); }
52
53 inline G1DirtyCardQueueSet* dirty_card_qset() const;
54
55 // Compiler support.
56 static ByteSize byte_offset_of_index() {
57 return PtrQueue::byte_offset_of_index<G1DirtyCardQueue>();
58 }
59 using PtrQueue::byte_width_of_index;
60
61 static ByteSize byte_offset_of_buf() {
62 return PtrQueue::byte_offset_of_buf<G1DirtyCardQueue>();
63 }
64 using PtrQueue::byte_width_of_buf;
65
66 };
67
68 class G1DirtyCardQueueSet: public PtrQueueSet {
69 Monitor* _cbl_mon; // Protects the fields below.
70 BufferNode* _completed_buffers_head;
71 BufferNode* _completed_buffers_tail;
72
73 // Number of actual entries in the list of completed buffers.
74 volatile size_t _num_entries_in_completed_buffers;
75
76 size_t _process_completed_buffers_threshold;
77 volatile bool _process_completed_buffers;
78
79 // If true, notify_all on _cbl_mon when the threshold is reached.
80 bool _notify_when_complete;
81
82 void abandon_completed_buffers();
83
84 // Apply the closure to the elements of "node" from it's index to
85 // buffer_size. If all closure applications return true, then
86 // returns true. Stops processing after the first closure
87 // application that returns false, and returns false from this
88 // function. The node's index is updated to exclude the processed
89 // elements, e.g. up to the element for which the closure returned
90 // false, or one past the last element if the closure always
91 // returned true.
92 bool apply_closure_to_buffer(G1CardTableEntryClosure* cl,
93 BufferNode* node,
94 uint worker_i = 0);
95
96 // If there are more than stop_at completed buffers, pop one, apply
97 // the specified closure to its active elements, and return true.
98 // Otherwise return false.
99 //
100 // A completely processed buffer is freed. However, if a closure
101 // invocation returns false, processing is stopped and the partially
102 // processed buffer (with its index updated to exclude the processed
103 // elements, e.g. up to the element for which the closure returned
104 // false) is returned to the completed buffer set.
105 //
106 // If during_pause is true, stop_at must be zero, and the closure
107 // must never return false.
108 bool apply_closure_to_completed_buffer(G1CardTableEntryClosure* cl,
109 uint worker_i,
110 size_t stop_at,
111 bool during_pause);
112
113 bool mut_process_buffer(BufferNode* node);
114
115 // If the queue contains more buffers than configured here, the
116 // mutator must start doing some of the concurrent refinement work,
117 size_t _max_completed_buffers;
118 size_t _completed_buffers_padding;
119 static const size_t MaxCompletedBuffersUnlimited = SIZE_MAX;
120
121 G1FreeIdSet* _free_ids;
122
123 // The number of completed buffers processed by mutator and rs thread,
124 // respectively.
125 jint _processed_buffers_mut;
126 jint _processed_buffers_rs_thread;
127
128 public:
129 G1DirtyCardQueueSet(bool notify_when_complete = true);
130 ~G1DirtyCardQueueSet();
131
132 void initialize(Monitor* cbl_mon,
133 BufferNode::Allocator* allocator,
134 bool init_free_ids = false);
135
136 // The number of parallel ids that can be claimed to allow collector or
137 // mutator threads to do card-processing work.
138 static uint num_par_ids();
139
140 static void handle_zero_index_for_thread(Thread* t);
141
142 // Either process the entire buffer and return true, or enqueue the
143 // buffer and return false. If the buffer is completely processed,
144 // it can be reused in place.
145 bool process_or_enqueue_completed_buffer(BufferNode* node);
146
147 virtual void enqueue_completed_buffer(BufferNode* node);
148
149 // If the number of completed buffers is > stop_at, then remove and
150 // return a completed buffer from the list. Otherwise, return NULL.
151 BufferNode* get_completed_buffer(size_t stop_at = 0);
152
153 // The number of buffers in the list. Derived as an approximation from the number
154 // of entries in the buffers. Racy.
155 size_t num_completed_buffers() const {
156 return (num_entries_in_completed_buffers() + buffer_size() - 1) / buffer_size();
157 }
158 // The number of entries in completed buffers. Read without synchronization.
159 size_t num_entries_in_completed_buffers() const { return _num_entries_in_completed_buffers; }
160
161 // Verify that _num_entries_in_completed_buffers is equal to the sum of actual entries
162 // in the completed buffers.
163 void verify_num_entries_in_completed_buffers() const NOT_DEBUG_RETURN;
164
165 bool process_completed_buffers() { return _process_completed_buffers; }
166 void set_process_completed_buffers(bool x) { _process_completed_buffers = x; }
167
168 // Get/Set the number of completed buffers that triggers log processing.
169 // Log processing should be done when the number of buffers exceeds the
170 // threshold.
171 void set_process_completed_buffers_threshold(size_t sz) {
172 _process_completed_buffers_threshold = sz;
173 }
174 size_t process_completed_buffers_threshold() const {
175 return _process_completed_buffers_threshold;
176 }
177 static const size_t ProcessCompletedBuffersThresholdNever = SIZE_MAX;
178
179 // Notify the consumer if the number of buffers crossed the threshold
180 void notify_if_necessary();
181
182 void merge_bufferlists(G1RedirtyCardsQueueSet* src);
183
184 // Apply G1RefineCardConcurrentlyClosure to completed buffers until there are stop_at
185 // completed buffers remaining.
186 bool refine_completed_buffer_concurrently(uint worker_i, size_t stop_at);
187
188 // Apply the given closure to all completed buffers. The given closure's do_card_ptr
189 // must never return false. Must only be called during GC.
190 bool apply_closure_during_gc(G1CardTableEntryClosure* cl, uint worker_i);
191
192 // If a full collection is happening, reset partial logs, and release
193 // completed ones: the full collection will make them all irrelevant.
194 void abandon_logs();
195
196 // If any threads have partial logs, add them to the global list of logs.
197 void concatenate_logs();
198
199 void set_max_completed_buffers(size_t m) {
200 _max_completed_buffers = m;
201 }
202 size_t max_completed_buffers() const {
203 return _max_completed_buffers;
204 }
205
206 void set_completed_buffers_padding(size_t padding) {
207 _completed_buffers_padding = padding;
208 }
209 size_t completed_buffers_padding() const {
210 return _completed_buffers_padding;
211 }
212
213 jint processed_buffers_mut() {
214 return _processed_buffers_mut;
215 }
216 jint processed_buffers_rs_thread() {
217 return _processed_buffers_rs_thread;
218 }
219
220 };
221
222 inline G1DirtyCardQueueSet* G1DirtyCardQueue::dirty_card_qset() const {
223 return static_cast<G1DirtyCardQueueSet*>(qset());
224 }
225
226 #endif // SHARE_GC_G1_G1DIRTYCARDQUEUE_HPP
|
49
50 // Process queue entries and release resources.
51 void flush() { flush_impl(); }
52
53 inline G1DirtyCardQueueSet* dirty_card_qset() const;
54
55 // Compiler support.
56 static ByteSize byte_offset_of_index() {
57 return PtrQueue::byte_offset_of_index<G1DirtyCardQueue>();
58 }
59 using PtrQueue::byte_width_of_index;
60
61 static ByteSize byte_offset_of_buf() {
62 return PtrQueue::byte_offset_of_buf<G1DirtyCardQueue>();
63 }
64 using PtrQueue::byte_width_of_buf;
65
66 };
67
68 class G1DirtyCardQueueSet: public PtrQueueSet {
69 Monitor* _cbl_mon; // Protects the list and count members.
70 BufferNode* _completed_buffers_head;
71 BufferNode* _completed_buffers_tail;
72
73 // Number of actual cards in the list of completed buffers.
74 volatile size_t _num_cards;
75
76 size_t _process_cards_threshold;
77 volatile bool _process_completed_buffers;
78
79 // If true, notify_all on _cbl_mon when the threshold is reached.
80 bool _notify_when_complete;
81
82 void abandon_completed_buffers();
83
84 // Apply the closure to the elements of "node" from it's index to
85 // buffer_size. If all closure applications return true, then
86 // returns true. Stops processing after the first closure
87 // application that returns false, and returns false from this
88 // function. The node's index is updated to exclude the processed
89 // elements, e.g. up to the element for which the closure returned
90 // false, or one past the last element if the closure always
91 // returned true.
92 bool apply_closure_to_buffer(G1CardTableEntryClosure* cl,
93 BufferNode* node,
94 uint worker_i = 0);
95
96 // If there are more than stop_at completed buffers, pop one, apply
97 // the specified closure to its active elements, and return true.
98 // Otherwise return false.
99 //
100 // A completely processed buffer is freed. However, if a closure
101 // invocation returns false, processing is stopped and the partially
102 // processed buffer (with its index updated to exclude the processed
103 // elements, e.g. up to the element for which the closure returned
104 // false) is returned to the completed buffer set.
105 //
106 // If during_pause is true, stop_at must be zero, and the closure
107 // must never return false.
108 bool apply_closure_to_completed_buffer(G1CardTableEntryClosure* cl,
109 uint worker_i,
110 size_t stop_at,
111 bool during_pause);
112
113 bool mut_process_buffer(BufferNode* node);
114
115 // If the queue contains more cards than configured here, the
116 // mutator must start doing some of the concurrent refinement work.
117 size_t _max_cards;
118 size_t _max_cards_padding;
119 static const size_t MaxCardsUnlimited = SIZE_MAX;
120
121 G1FreeIdSet* _free_ids;
122
123 // The number of completed buffers processed by mutator and rs thread,
124 // respectively.
125 jint _processed_buffers_mut;
126 jint _processed_buffers_rs_thread;
127
128 public:
129 G1DirtyCardQueueSet(bool notify_when_complete = true);
130 ~G1DirtyCardQueueSet();
131
132 void initialize(Monitor* cbl_mon,
133 BufferNode::Allocator* allocator,
134 bool init_free_ids = false);
135
136 // The number of parallel ids that can be claimed to allow collector or
137 // mutator threads to do card-processing work.
138 static uint num_par_ids();
139
140 static void handle_zero_index_for_thread(Thread* t);
141
142 // Either process the entire buffer and return true, or enqueue the
143 // buffer and return false. If the buffer is completely processed,
144 // it can be reused in place.
145 bool process_or_enqueue_completed_buffer(BufferNode* node);
146
147 virtual void enqueue_completed_buffer(BufferNode* node);
148
149 // If the number of completed buffers is > stop_at, then remove and
150 // return a completed buffer from the list. Otherwise, return NULL.
151 BufferNode* get_completed_buffer(size_t stop_at = 0);
152
153 // The number of cards in completed buffers. Read without synchronization.
154 size_t num_cards() const { return _num_cards; }
155
156 // Verify that _num_cards is equal to the sum of actual cards
157 // in the completed buffers.
158 void verify_num_cards() const NOT_DEBUG_RETURN;
159
160 bool process_completed_buffers() { return _process_completed_buffers; }
161 void set_process_completed_buffers(bool x) { _process_completed_buffers = x; }
162
163 // Get/Set the number of cards that triggers log processing.
164 // Log processing should be done when the number of cards exceeds the
165 // threshold.
166 void set_process_cards_threshold(size_t sz) {
167 _process_cards_threshold = sz;
168 }
169 size_t process_cards_threshold() const {
170 return _process_cards_threshold;
171 }
172 static const size_t ProcessCardsThresholdNever = SIZE_MAX;
173
174 // Notify the consumer if the number of buffers crossed the threshold
175 void notify_if_necessary();
176
177 void merge_bufferlists(G1RedirtyCardsQueueSet* src);
178
179 // Apply G1RefineCardConcurrentlyClosure to completed buffers until there are stop_at
180 // completed buffers remaining.
181 bool refine_completed_buffer_concurrently(uint worker_i, size_t stop_at);
182
183 // Apply the given closure to all completed buffers. The given closure's do_card_ptr
184 // must never return false. Must only be called during GC.
185 bool apply_closure_during_gc(G1CardTableEntryClosure* cl, uint worker_i);
186
187 // If a full collection is happening, reset partial logs, and release
188 // completed ones: the full collection will make them all irrelevant.
189 void abandon_logs();
190
191 // If any threads have partial logs, add them to the global list of logs.
192 void concatenate_logs();
193
194 void set_max_cards(size_t m) {
195 _max_cards = m;
196 }
197 size_t max_cards() const {
198 return _max_cards;
199 }
200
201 void set_max_cards_padding(size_t padding) {
202 _max_cards_padding = padding;
203 }
204 size_t max_cards_padding() const {
205 return _max_cards_padding;
206 }
207
208 jint processed_buffers_mut() {
209 return _processed_buffers_mut;
210 }
211 jint processed_buffers_rs_thread() {
212 return _processed_buffers_rs_thread;
213 }
214
215 };
216
217 inline G1DirtyCardQueueSet* G1DirtyCardQueue::dirty_card_qset() const {
218 return static_cast<G1DirtyCardQueueSet*>(qset());
219 }
220
221 #endif // SHARE_GC_G1_G1DIRTYCARDQUEUE_HPP
|