101 }
102
103 inline bool retain_entry(const void* entry, G1CollectedHeap* heap) {
104 return requires_marking(entry, heap) && !heap->isMarkedNext((oop)entry);
105 }
106
107 // This method removes entries from a SATB buffer that will not be
108 // useful to the concurrent marking threads. Entries are retained if
109 // they require marking and are not already marked. Retained entries
110 // are compacted toward the top of the buffer.
111
112 void SATBMarkQueue::filter() {
113 G1CollectedHeap* g1h = G1CollectedHeap::heap();
114 void** buf = _buf;
115
116 if (buf == NULL) {
117 // nothing to do
118 return;
119 }
120
121 assert(_index <= _sz, "invariant");
122
123 // Two-fingered compaction toward the end.
124 void** src = &buf[byte_index_to_index(_index)];
125 void** dst = &buf[byte_index_to_index(_sz)];
126 for ( ; src < dst; ++src) {
127 // Search low to high for an entry to keep.
128 void* entry = *src;
129 if (retain_entry(entry, g1h)) {
130 // Found keeper. Search high to low for an entry to discard.
131 while (src < --dst) {
132 if (!retain_entry(*dst, g1h)) {
133 *dst = entry; // Replace discard with keeper.
134 break;
135 }
136 }
137 // If discard search failed (src == dst), the outer loop will also end.
138 }
139 }
140 // dst points to the lowest retained entry, or the end of the buffer
141 // if all the entries were filtered out.
142 _index = pointer_delta(dst, buf, 1);
143 }
144
145 // This method will first apply the above filtering to the buffer. If
146 // post-filtering a large enough chunk of the buffer has been cleared
147 // we can re-use the buffer (instead of enqueueing it) and we can just
148 // allow the mutator to carry on executing using the same buffer
149 // instead of replacing it.
150
151 bool SATBMarkQueue::should_enqueue_buffer() {
152 assert(_lock == NULL || _lock->owned_by_self(),
153 "we should have taken the lock before calling this");
154
155 // If G1SATBBufferEnqueueingThresholdPercent == 0 we could skip filtering.
156
157 // This method should only be called if there is a non-NULL buffer
158 // that is full.
159 assert(_index == 0, "pre-condition");
160 assert(_buf != NULL, "pre-condition");
161
162 filter();
163
164 size_t percent_used = ((_sz - _index) * 100) / _sz;
165 bool should_enqueue = percent_used > G1SATBBufferEnqueueingThresholdPercent;
166 return should_enqueue;
167 }
168
169 void SATBMarkQueue::apply_closure_and_empty(SATBBufferClosure* cl) {
170 assert(SafepointSynchronize::is_at_safepoint(),
171 "SATB queues must only be processed at safepoints");
172 if (_buf != NULL) {
173 assert(_index % sizeof(void*) == 0, "invariant");
174 assert(_sz % sizeof(void*) == 0, "invariant");
175 assert(_index <= _sz, "invariant");
176 cl->do_buffer(_buf + byte_index_to_index(_index),
177 byte_index_to_index(_sz - _index));
178 _index = _sz;
179 }
180 }
181
182 #ifndef PRODUCT
183 // Helpful for debugging
184
185 void SATBMarkQueue::print(const char* name) {
186 print(name, _buf, _index, _sz);
187 }
188
189 void SATBMarkQueue::print(const char* name,
190 void** buf, size_t index, size_t sz) {
191 tty->print_cr(" SATB BUFFER [%s] buf: " PTR_FORMAT " index: " SIZE_FORMAT " sz: " SIZE_FORMAT,
192 name, p2i(buf), index, sz);
193 }
194 #endif // PRODUCT
195
196 SATBMarkQueueSet::SATBMarkQueueSet() :
197 PtrQueueSet(),
198 _shared_satb_queue(this, true /* permanent */) { }
199
200 void SATBMarkQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock,
201 int process_completed_threshold,
202 Mutex* lock) {
203 PtrQueueSet::initialize(cbl_mon, fl_lock, process_completed_threshold, -1);
204 _shared_satb_queue.set_lock(lock);
205 }
206
207 void SATBMarkQueueSet::handle_zero_index_for_thread(JavaThread* t) {
208 t->satb_mark_queue().handle_zero_index();
209 }
210
211 #ifdef ASSERT
212 void SATBMarkQueueSet::dump_active_states(bool expected_active) {
213 log_error(gc, verify)("Expected SATB active state: %s", expected_active ? "ACTIVE" : "INACTIVE");
258 for(JavaThread* t = Threads::first(); t; t = t->next()) {
259 t->satb_mark_queue().filter();
260 }
261 shared_satb_queue()->filter();
262 }
263
264 bool SATBMarkQueueSet::apply_closure_to_completed_buffer(SATBBufferClosure* cl) {
265 BufferNode* nd = NULL;
266 {
267 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
268 if (_completed_buffers_head != NULL) {
269 nd = _completed_buffers_head;
270 _completed_buffers_head = nd->next();
271 if (_completed_buffers_head == NULL) _completed_buffers_tail = NULL;
272 _n_completed_buffers--;
273 if (_n_completed_buffers == 0) _process_completed = false;
274 }
275 }
276 if (nd != NULL) {
277 void **buf = BufferNode::make_buffer_from_node(nd);
278 size_t index = SATBMarkQueue::byte_index_to_index(nd->index());
279 size_t size = SATBMarkQueue::byte_index_to_index(_sz);
280 assert(index <= size, "invariant");
281 cl->do_buffer(buf + index, size - index);
282 deallocate_buffer(nd);
283 return true;
284 } else {
285 return false;
286 }
287 }
288
289 #ifndef PRODUCT
290 // Helpful for debugging
291
292 #define SATB_PRINTER_BUFFER_SIZE 256
293
294 void SATBMarkQueueSet::print_all(const char* msg) {
295 char buffer[SATB_PRINTER_BUFFER_SIZE];
296 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
297
298 tty->cr();
299 tty->print_cr("SATB BUFFERS [%s]", msg);
300
301 BufferNode* nd = _completed_buffers_head;
302 int i = 0;
303 while (nd != NULL) {
304 void** buf = BufferNode::make_buffer_from_node(nd);
305 jio_snprintf(buffer, SATB_PRINTER_BUFFER_SIZE, "Enqueued: %d", i);
306 SATBMarkQueue::print(buffer, buf, 0, _sz);
307 nd = nd->next();
308 i += 1;
309 }
310
311 for (JavaThread* t = Threads::first(); t; t = t->next()) {
312 jio_snprintf(buffer, SATB_PRINTER_BUFFER_SIZE, "Thread: %s", t->name());
313 t->satb_mark_queue().print(buffer);
314 }
315
316 shared_satb_queue()->print("Shared");
317
318 tty->cr();
319 }
320 #endif // PRODUCT
321
322 void SATBMarkQueueSet::abandon_partial_marking() {
323 BufferNode* buffers_to_delete = NULL;
324 {
325 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
326 while (_completed_buffers_head != NULL) {
|
101 }
102
103 inline bool retain_entry(const void* entry, G1CollectedHeap* heap) {
104 return requires_marking(entry, heap) && !heap->isMarkedNext((oop)entry);
105 }
106
107 // This method removes entries from a SATB buffer that will not be
108 // useful to the concurrent marking threads. Entries are retained if
109 // they require marking and are not already marked. Retained entries
110 // are compacted toward the top of the buffer.
111
112 void SATBMarkQueue::filter() {
113 G1CollectedHeap* g1h = G1CollectedHeap::heap();
114 void** buf = _buf;
115
116 if (buf == NULL) {
117 // nothing to do
118 return;
119 }
120
121 // Two-fingered compaction toward the end.
122 void** src = &buf[index()];
123 void** dst = &buf[capacity()];
124 assert(src <= dst, "invariant");
125 for ( ; src < dst; ++src) {
126 // Search low to high for an entry to keep.
127 void* entry = *src;
128 if (retain_entry(entry, g1h)) {
129 // Found keeper. Search high to low for an entry to discard.
130 while (src < --dst) {
131 if (!retain_entry(*dst, g1h)) {
132 *dst = entry; // Replace discard with keeper.
133 break;
134 }
135 }
136 // If discard search failed (src == dst), the outer loop will also end.
137 }
138 }
139 // dst points to the lowest retained entry, or the end of the buffer
140 // if all the entries were filtered out.
141 set_index(dst - buf);
142 }
143
144 // This method will first apply the above filtering to the buffer. If
145 // post-filtering a large enough chunk of the buffer has been cleared
146 // we can re-use the buffer (instead of enqueueing it) and we can just
147 // allow the mutator to carry on executing using the same buffer
148 // instead of replacing it.
149
150 bool SATBMarkQueue::should_enqueue_buffer() {
151 assert(_lock == NULL || _lock->owned_by_self(),
152 "we should have taken the lock before calling this");
153
154 // If G1SATBBufferEnqueueingThresholdPercent == 0 we could skip filtering.
155
156 // This method should only be called if there is a non-NULL buffer
157 // that is full.
158 assert(index() == 0, "pre-condition");
159 assert(_buf != NULL, "pre-condition");
160
161 filter();
162
163 size_t cap = capacity();
164 size_t percent_used = ((cap - index()) * 100) / cap;
165 bool should_enqueue = percent_used > G1SATBBufferEnqueueingThresholdPercent;
166 return should_enqueue;
167 }
168
169 void SATBMarkQueue::apply_closure_and_empty(SATBBufferClosure* cl) {
170 assert(SafepointSynchronize::is_at_safepoint(),
171 "SATB queues must only be processed at safepoints");
172 if (_buf != NULL) {
173 cl->do_buffer(&_buf[index()], size());
174 reset();
175 }
176 }
177
178 #ifndef PRODUCT
179 // Helpful for debugging
180
181 static void print_satb_buffer(const char* name,
182 void** buf,
183 size_t index,
184 size_t capacity) {
185 tty->print_cr(" SATB BUFFER [%s] buf: " PTR_FORMAT " index: " SIZE_FORMAT
186 " capacity: " SIZE_FORMAT,
187 name, p2i(buf), index, capacity);
188 }
189
190 void SATBMarkQueue::print(const char* name) {
191 print_satb_buffer(name, _buf, index(), capacity());
192 }
193
194 #endif // PRODUCT
195
196 SATBMarkQueueSet::SATBMarkQueueSet() :
197 PtrQueueSet(),
198 _shared_satb_queue(this, true /* permanent */) { }
199
200 void SATBMarkQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock,
201 int process_completed_threshold,
202 Mutex* lock) {
203 PtrQueueSet::initialize(cbl_mon, fl_lock, process_completed_threshold, -1);
204 _shared_satb_queue.set_lock(lock);
205 }
206
207 void SATBMarkQueueSet::handle_zero_index_for_thread(JavaThread* t) {
208 t->satb_mark_queue().handle_zero_index();
209 }
210
211 #ifdef ASSERT
212 void SATBMarkQueueSet::dump_active_states(bool expected_active) {
213 log_error(gc, verify)("Expected SATB active state: %s", expected_active ? "ACTIVE" : "INACTIVE");
258 for(JavaThread* t = Threads::first(); t; t = t->next()) {
259 t->satb_mark_queue().filter();
260 }
261 shared_satb_queue()->filter();
262 }
263
264 bool SATBMarkQueueSet::apply_closure_to_completed_buffer(SATBBufferClosure* cl) {
265 BufferNode* nd = NULL;
266 {
267 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
268 if (_completed_buffers_head != NULL) {
269 nd = _completed_buffers_head;
270 _completed_buffers_head = nd->next();
271 if (_completed_buffers_head == NULL) _completed_buffers_tail = NULL;
272 _n_completed_buffers--;
273 if (_n_completed_buffers == 0) _process_completed = false;
274 }
275 }
276 if (nd != NULL) {
277 void **buf = BufferNode::make_buffer_from_node(nd);
278 size_t index = nd->index();
279 size_t size = buffer_size();
280 assert(index <= size, "invariant");
281 cl->do_buffer(buf + index, size - index);
282 deallocate_buffer(nd);
283 return true;
284 } else {
285 return false;
286 }
287 }
288
289 #ifndef PRODUCT
290 // Helpful for debugging
291
292 #define SATB_PRINTER_BUFFER_SIZE 256
293
294 void SATBMarkQueueSet::print_all(const char* msg) {
295 char buffer[SATB_PRINTER_BUFFER_SIZE];
296 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
297
298 tty->cr();
299 tty->print_cr("SATB BUFFERS [%s]", msg);
300
301 BufferNode* nd = _completed_buffers_head;
302 int i = 0;
303 while (nd != NULL) {
304 void** buf = BufferNode::make_buffer_from_node(nd);
305 jio_snprintf(buffer, SATB_PRINTER_BUFFER_SIZE, "Enqueued: %d", i);
306 print_satb_buffer(buffer, buf, nd->index(), buffer_size());
307 nd = nd->next();
308 i += 1;
309 }
310
311 for (JavaThread* t = Threads::first(); t; t = t->next()) {
312 jio_snprintf(buffer, SATB_PRINTER_BUFFER_SIZE, "Thread: %s", t->name());
313 t->satb_mark_queue().print(buffer);
314 }
315
316 shared_satb_queue()->print("Shared");
317
318 tty->cr();
319 }
320 #endif // PRODUCT
321
322 void SATBMarkQueueSet::abandon_partial_marking() {
323 BufferNode* buffers_to_delete = NULL;
324 {
325 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
326 while (_completed_buffers_head != NULL) {
|