< prev index next >

src/hotspot/share/gc/g1/g1ParScanThreadState.inline.hpp

Print this page
rev 49680 : imported patch 6672778-partial-queue-trimming
rev 49681 : [mq]: 6672778-refactoring

*** 27,36 **** --- 27,37 ---- #include "gc/g1/g1ParScanThreadState.hpp" #include "gc/g1/g1RemSet.hpp" #include "oops/access.inline.hpp" #include "oops/oop.inline.hpp" + #include "utilities/ticks.inline.hpp" template <class T> void G1ParScanThreadState::do_oop_evac(T* p) { // Reference should not be NULL here as such are never pushed to the task queue. oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
*** 149,154 **** --- 150,193 ---- // we drain the queues as necessary. trim_queue(); } } + inline bool G1ParScanThreadState::should_start_trim_queue_partially() const { + return !_refs->overflow_empty() || _refs->size() > _stack_drain_upper_threshold; + } + + inline bool G1ParScanThreadState::should_end_trim_queue_partially() const { + return _refs->overflow_empty() && _refs->size() <= _stack_drain_lower_threshold; + } + + inline void G1ParScanThreadState::trim_queue_partially_internal() { + StarTask ref; + do { + // Drain the overflow stack first, so other threads can potentially steal. + while (_refs->pop_overflow(ref)) { + if (!_refs->try_push_to_taskqueue(ref)) { + dispatch_reference(ref); + } + } + + while (_refs->pop_local(ref, _stack_drain_lower_threshold)) { + dispatch_reference(ref); + } + } while (!should_end_trim_queue_partially()); + } + + inline void G1ParScanThreadState::trim_queue_partially() { + if (should_start_trim_queue_partially()) { + const Ticks start = Ticks::now(); + trim_queue_partially_internal(); + _trim_ticks += Ticks::now() - start; + } + } + + inline Tickspan G1ParScanThreadState::trim_ticks_and_reset() { + Tickspan result = _trim_ticks; + _trim_ticks = Tickspan(); + return result; + } + #endif // SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_INLINE_HPP
< prev index next >