< prev index next >

src/share/vm/gc/shared/satbMarkQueue.cpp

Print this page

        

*** 22,32 **** * */ #include "precompiled.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" ! #include "gc/g1/satbMarkQueue.hpp" #include "gc/shared/collectedHeap.hpp" #include "memory/allocation.inline.hpp" #include "oops/oop.inline.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/safepoint.hpp" --- 22,32 ---- * */ #include "precompiled.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" ! #include "gc/shared/satbMarkQueue.hpp" #include "gc/shared/collectedHeap.hpp" #include "memory/allocation.inline.hpp" #include "oops/oop.inline.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/safepoint.hpp"
*** 40,60 **** // before the thread starts running, we'll need to set its active // field to true. This is done in JavaThread::initialize_queues(). PtrQueue(qset, permanent, false /* active */) { } void SATBMarkQueue::flush() { // Filter now to possibly save work later. If filtering empties the // buffer then flush_impl can deallocate the buffer. filter(); flush_impl(); } // Return true if a SATB buffer entry refers to an object that // requires marking. // ! // The entry must point into the G1 heap. In particular, it must not // be a NULL pointer. NULL pointers are pre-filtered and never // inserted into a SATB buffer. // // An entry that is below the NTAMS pointer for the containing heap // region requires marking. Such an entry must point to a valid object. --- 40,75 ---- // before the thread starts running, we'll need to set its active // field to true. This is done in JavaThread::initialize_queues(). PtrQueue(qset, permanent, false /* active */) { } + void SATBMarkQueue::enqueue(oop pre_val) { + // Nulls should have been already filtered. + assert(pre_val->is_oop(true), "Error"); + + if (!JavaThread::satb_mark_queue_set().is_active()) return; + Thread* thr = Thread::current(); + if (thr->is_Java_thread()) { + JavaThread* jt = (JavaThread*)thr; + jt->satb_mark_queue().enqueue(pre_val); + } else { + MutexLockerEx x(Shared_SATB_Q_lock, Mutex::_no_safepoint_check_flag); + JavaThread::satb_mark_queue_set().shared_satb_queue()->enqueue(pre_val); + } + } + void SATBMarkQueue::flush() { // Filter now to possibly save work later. If filtering empties the // buffer then flush_impl can deallocate the buffer. filter(); flush_impl(); } // Return true if a SATB buffer entry refers to an object that // requires marking. // ! // The entry must point into the heap. In particular, it must not // be a NULL pointer. NULL pointers are pre-filtered and never // inserted into a SATB buffer. // // An entry that is below the NTAMS pointer for the containing heap // region requires marking. Such an entry must point to a valid object.
*** 81,114 **** // The stale reference cases are implicitly handled by the NTAMS // comparison. Because of the possibility of stale references, buffer // processing must be somewhat circumspect and not assume entries // in an unfiltered buffer refer to valid objects. ! inline bool requires_marking(const void* entry, G1CollectedHeap* heap) { ! // Includes rejection of NULL pointers. ! assert(heap->is_in_reserved(entry), ! "Non-heap pointer in SATB buffer: " PTR_FORMAT, p2i(entry)); ! ! HeapRegion* region = heap->heap_region_containing(entry); ! assert(region != NULL, "No region for " PTR_FORMAT, p2i(entry)); ! if (entry >= region->next_top_at_mark_start()) { ! return false; ! } ! ! assert(((oop)entry)->is_oop(true /* ignore mark word */), ! "Invalid oop in SATB buffer: " PTR_FORMAT, p2i(entry)); ! ! return true; } // This method removes entries from a SATB buffer that will not be // useful to the concurrent marking threads. Entries are retained if // they require marking and are not already marked. Retained entries // are compacted toward the top of the buffer. void SATBMarkQueue::filter() { ! G1CollectedHeap* g1h = G1CollectedHeap::heap(); void** buf = _buf; if (buf == NULL) { // nothing to do return; --- 96,126 ---- // The stale reference cases are implicitly handled by the NTAMS // comparison. Because of the possibility of stale references, buffer // processing must be somewhat circumspect and not assume entries // in an unfiltered buffer refer to valid objects. ! template <class HeapType> ! inline bool requires_marking(const void* entry, HeapType* heap) { ! return heap->requires_marking(entry); } // This method removes entries from a SATB buffer that will not be // useful to the concurrent marking threads. Entries are retained if // they require marking and are not already marked. Retained entries // are compacted toward the top of the buffer. void SATBMarkQueue::filter() { ! if (UseG1GC) { ! filter_impl<G1CollectedHeap>(); ! } else { ! ShouldNotReachHere(); ! } ! } ! ! template <class HeapType> ! void SATBMarkQueue::filter_impl() { ! HeapType* heap = (HeapType*) Universe::heap(); void** buf = _buf; if (buf == NULL) { // nothing to do return;
*** 130,140 **** // at the end. If we are going to retain it we will copy it to its // final place. If we have retained all entries we have visited so // far, we'll just end up copying it to the same place. *src = NULL; ! if (requires_marking(entry, g1h) && !g1h->isMarkedNext((oop)entry)) { --dst; assert(*dst == NULL, "filtering destination should be clear"); *dst = entry; DEBUG_ONLY(retained += 1;); } --- 142,152 ---- // at the end. If we are going to retain it we will copy it to its // final place. If we have retained all entries we have visited so // far, we'll just end up copying it to the same place. *src = NULL; ! if (requires_marking(entry, heap)) { --dst; assert(*dst == NULL, "filtering destination should be clear"); *dst = entry; DEBUG_ONLY(retained += 1;); }
*** 161,181 **** bool SATBMarkQueue::should_enqueue_buffer() { assert(_lock == NULL || _lock->owned_by_self(), "we should have taken the lock before calling this"); ! // If G1SATBBufferEnqueueingThresholdPercent == 0 we could skip filtering. // This method should only be called if there is a non-NULL buffer // that is full. assert(_index == 0, "pre-condition"); assert(_buf != NULL, "pre-condition"); filter(); size_t percent_used = ((_sz - _index) * 100) / _sz; ! bool should_enqueue = percent_used > G1SATBBufferEnqueueingThresholdPercent; return should_enqueue; } void SATBMarkQueue::apply_closure_and_empty(SATBBufferClosure* cl) { assert(SafepointSynchronize::is_at_safepoint(), --- 173,193 ---- bool SATBMarkQueue::should_enqueue_buffer() { assert(_lock == NULL || _lock->owned_by_self(), "we should have taken the lock before calling this"); ! // If SATBBufferEnqueueingThresholdPercent == 0 we could skip filtering. // This method should only be called if there is a non-NULL buffer // that is full. assert(_index == 0, "pre-condition"); assert(_buf != NULL, "pre-condition"); filter(); size_t percent_used = ((_sz - _index) * 100) / _sz; ! bool should_enqueue = percent_used > SATBBufferEnqueueingThresholdPercent; return should_enqueue; } void SATBMarkQueue::apply_closure_and_empty(SATBBufferClosure* cl) { assert(SafepointSynchronize::is_at_safepoint(),
< prev index next >