83 // processing must be somewhat circumspect and not assume entries
84 // in an unfiltered buffer refer to valid objects.
85
86 inline bool requires_marking(const void* entry, G1CollectedHeap* heap) {
87 // Includes rejection of NULL pointers.
88 assert(heap->is_in_reserved(entry),
89 "Non-heap pointer in SATB buffer: " PTR_FORMAT, p2i(entry));
90
91 HeapRegion* region = heap->heap_region_containing(entry);
92 assert(region != NULL, "No region for " PTR_FORMAT, p2i(entry));
93 if (entry >= region->next_top_at_mark_start()) {
94 return false;
95 }
96
97 assert(((oop)entry)->is_oop(true /* ignore mark word */),
98 "Invalid oop in SATB buffer: " PTR_FORMAT, p2i(entry));
99
100 return true;
101 }
102
103 // This method removes entries from a SATB buffer that will not be
104 // useful to the concurrent marking threads. Entries are retained if
105 // they require marking and are not already marked. Retained entries
106 // are compacted toward the top of the buffer.
107
108 void SATBMarkQueue::filter() {
109 G1CollectedHeap* g1h = G1CollectedHeap::heap();
110 void** buf = _buf;
111
112 if (buf == NULL) {
113 // nothing to do
114 return;
115 }
116
117 // Used for sanity checking at the end of the loop.
118 DEBUG_ONLY(size_t entries = 0; size_t retained = 0;)
119
120 assert(_index <= _sz, "invariant");
121 void** limit = &buf[byte_index_to_index(_index)];
122 void** src = &buf[byte_index_to_index(_sz)];
123 void** dst = src;
124
125 while (limit < src) {
126 DEBUG_ONLY(entries += 1;)
127 --src;
128 void* entry = *src;
129 // NULL the entry so that unused parts of the buffer contain NULLs
130 // at the end. If we are going to retain it we will copy it to its
131 // final place. If we have retained all entries we have visited so
132 // far, we'll just end up copying it to the same place.
133 *src = NULL;
134
135 if (requires_marking(entry, g1h) && !g1h->isMarkedNext((oop)entry)) {
136 --dst;
137 assert(*dst == NULL, "filtering destination should be clear");
138 *dst = entry;
139 DEBUG_ONLY(retained += 1;);
140 }
141 }
142 size_t new_index = pointer_delta(dst, buf, 1);
143
144 #ifdef ASSERT
145 size_t entries_calc = (_sz - _index) / sizeof(void*);
146 assert(entries == entries_calc, "the number of entries we counted "
147 "should match the number of entries we calculated");
148 size_t retained_calc = (_sz - new_index) / sizeof(void*);
149 assert(retained == retained_calc, "the number of retained entries we counted "
150 "should match the number of retained entries we calculated");
151 #endif // ASSERT
152
153 _index = new_index;
154 }
155
156 // This method will first apply the above filtering to the buffer. If
157 // post-filtering a large enough chunk of the buffer has been cleared
158 // we can re-use the buffer (instead of enqueueing it) and we can just
159 // allow the mutator to carry on executing using the same buffer
160 // instead of replacing it.
161
162 bool SATBMarkQueue::should_enqueue_buffer() {
163 assert(_lock == NULL || _lock->owned_by_self(),
164 "we should have taken the lock before calling this");
165
166 // If G1SATBBufferEnqueueingThresholdPercent == 0 we could skip filtering.
167
168 // This method should only be called if there is a non-NULL buffer
169 // that is full.
170 assert(_index == 0, "pre-condition");
171 assert(_buf != NULL, "pre-condition");
172
173 filter();
269 for(JavaThread* t = Threads::first(); t; t = t->next()) {
270 t->satb_mark_queue().filter();
271 }
272 shared_satb_queue()->filter();
273 }
274
275 bool SATBMarkQueueSet::apply_closure_to_completed_buffer(SATBBufferClosure* cl) {
276 BufferNode* nd = NULL;
277 {
278 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
279 if (_completed_buffers_head != NULL) {
280 nd = _completed_buffers_head;
281 _completed_buffers_head = nd->next();
282 if (_completed_buffers_head == NULL) _completed_buffers_tail = NULL;
283 _n_completed_buffers--;
284 if (_n_completed_buffers == 0) _process_completed = false;
285 }
286 }
287 if (nd != NULL) {
288 void **buf = BufferNode::make_buffer_from_node(nd);
289 // Skip over NULL entries at beginning (e.g. push end) of buffer.
290 // Filtering can result in non-full completed buffers; see
291 // should_enqueue_buffer.
292 assert(_sz % sizeof(void*) == 0, "invariant");
293 size_t limit = SATBMarkQueue::byte_index_to_index(_sz);
294 for (size_t i = 0; i < limit; ++i) {
295 if (buf[i] != NULL) {
296 // Found the end of the block of NULLs; process the remainder.
297 cl->do_buffer(buf + i, limit - i);
298 break;
299 }
300 }
301 deallocate_buffer(buf);
302 return true;
303 } else {
304 return false;
305 }
306 }
307
308 #ifndef PRODUCT
309 // Helpful for debugging
310
311 #define SATB_PRINTER_BUFFER_SIZE 256
312
313 void SATBMarkQueueSet::print_all(const char* msg) {
314 char buffer[SATB_PRINTER_BUFFER_SIZE];
315 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
316
317 tty->cr();
318 tty->print_cr("SATB BUFFERS [%s]", msg);
319
320 BufferNode* nd = _completed_buffers_head;
321 int i = 0;
338 }
339 #endif // PRODUCT
340
341 void SATBMarkQueueSet::abandon_partial_marking() {
342 BufferNode* buffers_to_delete = NULL;
343 {
344 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
345 while (_completed_buffers_head != NULL) {
346 BufferNode* nd = _completed_buffers_head;
347 _completed_buffers_head = nd->next();
348 nd->set_next(buffers_to_delete);
349 buffers_to_delete = nd;
350 }
351 _completed_buffers_tail = NULL;
352 _n_completed_buffers = 0;
353 DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
354 }
355 while (buffers_to_delete != NULL) {
356 BufferNode* nd = buffers_to_delete;
357 buffers_to_delete = nd->next();
358 deallocate_buffer(BufferNode::make_buffer_from_node(nd));
359 }
360 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
361 // So we can safely manipulate these queues.
362 for (JavaThread* t = Threads::first(); t; t = t->next()) {
363 t->satb_mark_queue().reset();
364 }
365 shared_satb_queue()->reset();
366 }
|
83 // processing must be somewhat circumspect and not assume entries
84 // in an unfiltered buffer refer to valid objects.
85
86 inline bool requires_marking(const void* entry, G1CollectedHeap* heap) {
87 // Includes rejection of NULL pointers.
88 assert(heap->is_in_reserved(entry),
89 "Non-heap pointer in SATB buffer: " PTR_FORMAT, p2i(entry));
90
91 HeapRegion* region = heap->heap_region_containing(entry);
92 assert(region != NULL, "No region for " PTR_FORMAT, p2i(entry));
93 if (entry >= region->next_top_at_mark_start()) {
94 return false;
95 }
96
97 assert(((oop)entry)->is_oop(true /* ignore mark word */),
98 "Invalid oop in SATB buffer: " PTR_FORMAT, p2i(entry));
99
100 return true;
101 }
102
103 inline bool retain_entry(const void* entry, G1CollectedHeap* heap) {
104 return requires_marking(entry, heap) && !heap->isMarkedNext((oop)entry);
105 }
106
107 // This method removes entries from a SATB buffer that will not be
108 // useful to the concurrent marking threads. Entries are retained if
109 // they require marking and are not already marked. Retained entries
110 // are compacted toward the top of the buffer.
111
112 void SATBMarkQueue::filter() {
113 G1CollectedHeap* g1h = G1CollectedHeap::heap();
114 void** buf = _buf;
115
116 if (buf == NULL) {
117 // nothing to do
118 return;
119 }
120
121 assert(_index <= _sz, "invariant");
122
123 // Two-fingered compaction toward the end.
124 void** src = &buf[byte_index_to_index(_index)];
125 void** dst = &buf[byte_index_to_index(_sz)];
126 for ( ; src < dst; ++src) {
127 // Search low to high for an entry to keep.
128 void* entry = *src;
129 if (retain_entry(entry, g1h)) {
130 // Found keeper. Search high to low for an entry to discard.
131 while ((src < --dst) && retain_entry(*dst, g1h)) { }
132 if (src >= dst) break; // Done if no discard found.
133 *dst = entry; // Replace discard with keeper.
134 }
135 }
136 assert(src == dst, "invariant");
137 _index = pointer_delta(dst, buf, 1);
138 }
139
140 // This method will first apply the above filtering to the buffer. If
141 // post-filtering a large enough chunk of the buffer has been cleared
142 // we can re-use the buffer (instead of enqueueing it) and we can just
143 // allow the mutator to carry on executing using the same buffer
144 // instead of replacing it.
145
146 bool SATBMarkQueue::should_enqueue_buffer() {
147 assert(_lock == NULL || _lock->owned_by_self(),
148 "we should have taken the lock before calling this");
149
150 // If G1SATBBufferEnqueueingThresholdPercent == 0 we could skip filtering.
151
152 // This method should only be called if there is a non-NULL buffer
153 // that is full.
154 assert(_index == 0, "pre-condition");
155 assert(_buf != NULL, "pre-condition");
156
157 filter();
253 for(JavaThread* t = Threads::first(); t; t = t->next()) {
254 t->satb_mark_queue().filter();
255 }
256 shared_satb_queue()->filter();
257 }
258
259 bool SATBMarkQueueSet::apply_closure_to_completed_buffer(SATBBufferClosure* cl) {
260 BufferNode* nd = NULL;
261 {
262 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
263 if (_completed_buffers_head != NULL) {
264 nd = _completed_buffers_head;
265 _completed_buffers_head = nd->next();
266 if (_completed_buffers_head == NULL) _completed_buffers_tail = NULL;
267 _n_completed_buffers--;
268 if (_n_completed_buffers == 0) _process_completed = false;
269 }
270 }
271 if (nd != NULL) {
272 void **buf = BufferNode::make_buffer_from_node(nd);
273 size_t index = SATBMarkQueue::byte_index_to_index(nd->index());
274 size_t size = SATBMarkQueue::byte_index_to_index(_sz);
275 assert(index <= size, "invariant");
276 cl->do_buffer(buf + index, size - index);
277 deallocate_buffer(nd);
278 return true;
279 } else {
280 return false;
281 }
282 }
283
284 #ifndef PRODUCT
285 // Helpful for debugging
286
287 #define SATB_PRINTER_BUFFER_SIZE 256
288
289 void SATBMarkQueueSet::print_all(const char* msg) {
290 char buffer[SATB_PRINTER_BUFFER_SIZE];
291 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
292
293 tty->cr();
294 tty->print_cr("SATB BUFFERS [%s]", msg);
295
296 BufferNode* nd = _completed_buffers_head;
297 int i = 0;
314 }
315 #endif // PRODUCT
316
317 void SATBMarkQueueSet::abandon_partial_marking() {
318 BufferNode* buffers_to_delete = NULL;
319 {
320 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
321 while (_completed_buffers_head != NULL) {
322 BufferNode* nd = _completed_buffers_head;
323 _completed_buffers_head = nd->next();
324 nd->set_next(buffers_to_delete);
325 buffers_to_delete = nd;
326 }
327 _completed_buffers_tail = NULL;
328 _n_completed_buffers = 0;
329 DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
330 }
331 while (buffers_to_delete != NULL) {
332 BufferNode* nd = buffers_to_delete;
333 buffers_to_delete = nd->next();
334 deallocate_buffer(nd);
335 }
336 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
337 // So we can safely manipulate these queues.
338 for (JavaThread* t = Threads::first(); t; t = t->next()) {
339 t->satb_mark_queue().reset();
340 }
341 shared_satb_queue()->reset();
342 }
|