< prev index next >

src/share/vm/gc/g1/satbQueue.cpp

Print this page
rev 9217 : imported patch rename_debug_only
rev 9218 : imported patch fix_constructor_set_types
rev 9221 : [mq]: simplify_loops

*** 31,40 **** --- 31,49 ---- #include "runtime/mutexLocker.hpp" #include "runtime/safepoint.hpp" #include "runtime/thread.hpp" #include "runtime/vmThread.hpp" + ObjPtrQueue::ObjPtrQueue(SATBMarkQueueSet* qset, bool permanent) : + // SATB queues are only active during marking cycles. We create + // them with their active field set to false. If a thread is + // created during a cycle and its SATB queue needs to be activated + // before the thread starts running, we'll need to set its active + // field to true. This is done in JavaThread::initialize_queues(). + PtrQueue(qset, permanent, false /* active */) + { } + void ObjPtrQueue::flush() { // Filter now to possibly save work later. If filtering empties the // buffer then flush_impl can deallocate the buffer. filter(); flush_impl();
*** 97,151 **** // are compacted toward the top of the buffer. void ObjPtrQueue::filter() { G1CollectedHeap* g1h = G1CollectedHeap::heap(); void** buf = _buf; - size_t sz = _sz; if (buf == NULL) { // nothing to do return; } // Used for sanity checking at the end of the loop. ! debug_only(size_t entries = 0; size_t retained = 0;) ! size_t i = sz; ! size_t new_index = sz; ! ! while (i > _index) { ! assert(i > 0, "we should have at least one more entry to process"); ! i -= oopSize; ! debug_only(entries += 1;) ! void** p = &buf[byte_index_to_index((int) i)]; ! void* entry = *p; // NULL the entry so that unused parts of the buffer contain NULLs // at the end. If we are going to retain it we will copy it to its // final place. If we have retained all entries we have visited so // far, we'll just end up copying it to the same place. ! *p = NULL; if (requires_marking(entry, g1h) && !g1h->isMarkedNext((oop)entry)) { ! assert(new_index > 0, "we should not have already filled up the buffer"); ! new_index -= oopSize; ! assert(new_index >= i, ! "new_index should never be below i, as we always compact 'up'"); ! void** new_p = &buf[byte_index_to_index((int) new_index)]; ! assert(new_p >= p, "the destination location should never be below " ! "the source as we always compact 'up'"); ! assert(*new_p == NULL, ! "we should have already cleared the destination location"); ! *new_p = entry; ! debug_only(retained += 1;) } } #ifdef ASSERT ! size_t entries_calc = (sz - _index) / oopSize; assert(entries == entries_calc, "the number of entries we counted " "should match the number of entries we calculated"); ! size_t retained_calc = (sz - new_index) / oopSize; assert(retained == retained_calc, "the number of retained entries we counted " "should match the number of retained entries we calculated"); #endif // ASSERT _index = new_index; --- 106,153 ---- // are compacted toward the top of the buffer. void ObjPtrQueue::filter() { G1CollectedHeap* g1h = G1CollectedHeap::heap(); void** buf = _buf; if (buf == NULL) { // nothing to do return; } // Used for sanity checking at the end of the loop. ! DEBUG_ONLY(size_t entries = 0; size_t retained = 0;) ! assert(_index <= _sz, "invariant"); ! void** limit = &buf[byte_index_to_index(_index)]; ! void** src = &buf[byte_index_to_index(_sz)]; ! void** dst = src; ! ! while (limit < src) { ! DEBUG_ONLY(entries += 1;) ! --src; ! void* entry = *src; // NULL the entry so that unused parts of the buffer contain NULLs // at the end. If we are going to retain it we will copy it to its // final place. If we have retained all entries we have visited so // far, we'll just end up copying it to the same place. ! *src = NULL; if (requires_marking(entry, g1h) && !g1h->isMarkedNext((oop)entry)) { ! --dst; ! assert(*dst == NULL, "filtering destination should be clear"); ! *dst = entry; ! DEBUG_ONLY(retained += 1;); } } + size_t new_index = pointer_delta(dst, buf, 1); #ifdef ASSERT ! size_t entries_calc = (_sz - _index) / sizeof(void*); assert(entries == entries_calc, "the number of entries we counted " "should match the number of entries we calculated"); ! size_t retained_calc = (_sz - new_index) / sizeof(void*); assert(retained == retained_calc, "the number of retained entries we counted " "should match the number of retained entries we calculated"); #endif // ASSERT _index = new_index;
*** 169,180 **** assert(_buf != NULL, "pre-condition"); filter(); size_t sz = _sz; ! size_t all_entries = sz / oopSize; ! size_t retained_entries = (sz - _index) / oopSize; size_t perc = retained_entries * 100 / all_entries; bool should_enqueue = perc > (size_t) G1SATBBufferEnqueueingThresholdPercent; return should_enqueue; } --- 171,182 ---- assert(_buf != NULL, "pre-condition"); filter(); size_t sz = _sz; ! size_t all_entries = sz / sizeof(void*); ! size_t retained_entries = (sz - _index) / sizeof(void*); size_t perc = retained_entries * 100 / all_entries; bool should_enqueue = perc > (size_t) G1SATBBufferEnqueueingThresholdPercent; return should_enqueue; }
*** 183,194 **** "SATB queues must only be processed at safepoints"); if (_buf != NULL) { assert(_index % sizeof(void*) == 0, "invariant"); assert(_sz % sizeof(void*) == 0, "invariant"); assert(_index <= _sz, "invariant"); ! cl->do_buffer(_buf + byte_index_to_index((int)_index), ! byte_index_to_index((int)(_sz - _index))); _index = _sz; } } #ifndef PRODUCT --- 185,196 ---- "SATB queues must only be processed at safepoints"); if (_buf != NULL) { assert(_index % sizeof(void*) == 0, "invariant"); assert(_sz % sizeof(void*) == 0, "invariant"); assert(_index <= _sz, "invariant"); ! cl->do_buffer(_buf + byte_index_to_index(_index), ! byte_index_to_index(_sz - _index)); _index = _sz; } } #ifndef PRODUCT
*** 297,307 **** void **buf = BufferNode::make_buffer_from_node(nd); // Skip over NULL entries at beginning (e.g. push end) of buffer. // Filtering can result in non-full completed buffers; see // should_enqueue_buffer. assert(_sz % sizeof(void*) == 0, "invariant"); ! size_t limit = ObjPtrQueue::byte_index_to_index((int)_sz); for (size_t i = 0; i < limit; ++i) { if (buf[i] != NULL) { // Found the end of the block of NULLs; process the remainder. cl->do_buffer(buf + i, limit - i); break; --- 299,309 ---- void **buf = BufferNode::make_buffer_from_node(nd); // Skip over NULL entries at beginning (e.g. push end) of buffer. // Filtering can result in non-full completed buffers; see // should_enqueue_buffer. assert(_sz % sizeof(void*) == 0, "invariant"); ! size_t limit = ObjPtrQueue::byte_index_to_index(_sz); for (size_t i = 0; i < limit; ++i) { if (buf[i] != NULL) { // Found the end of the block of NULLs; process the remainder. cl->do_buffer(buf + i, limit - i); break;
< prev index next >