< prev index next > src/hotspot/share/gc/z/zMarkStack.inline.hpp
Print this page
decode_versioned_pointer(vstack, &stack, &version);
return stack == NULL;
}
template <typename T>
- inline void ZStackList<T>::push_atomic(T* stack) {
+ inline void ZStackList<T>::push(T* stack) {
T* vstack = _head;
uint32_t version = 0;
for (;;) {
decode_versioned_pointer(vstack, stack->next_addr(), &version);
vstack = prev_vstack;
}
}
template <typename T>
- inline T* ZStackList<T>::pop_atomic() {
+ inline T* ZStackList<T>::pop() {
T* vstack = _head;
T* stack = NULL;
uint32_t version = 0;
for (;;) {
// workers to work on, while the overflowed list is used by GC workers
// to publish stacks that overflowed. The intention here is to avoid
// contention between mutators and GC workers as much as possible, while
// still allowing GC workers to help out and steal work from each other.
if (publish) {
- _published.push_atomic(stack);
+ _published.push(stack);
} else {
- _overflowed.push_atomic(stack);
+ _overflowed.push(stack);
}
}
inline ZMarkStack* ZMarkStripe::steal_stack() {
// Steal overflowed stacks first, then published stacks
- ZMarkStack* const stack = _overflowed.pop_atomic();
+ ZMarkStack* const stack = _overflowed.pop();
if (stack != NULL) {
return stack;
}
- return _published.pop_atomic();
+ return _published.pop();
}
inline size_t ZMarkStripeSet::nstripes() const {
return _nstripes;
}
< prev index next >