diff a/src/hotspot/share/gc/z/zMarkStack.inline.hpp b/src/hotspot/share/gc/z/zMarkStack.inline.hpp --- a/src/hotspot/share/gc/z/zMarkStack.inline.hpp +++ b/src/hotspot/share/gc/z/zMarkStack.inline.hpp @@ -112,11 +112,11 @@ decode_versioned_pointer(vstack, &stack, &version); return stack == NULL; } template -inline void ZStackList::push_atomic(T* stack) { +inline void ZStackList::push(T* stack) { T* vstack = _head; uint32_t version = 0; for (;;) { decode_versioned_pointer(vstack, stack->next_addr(), &version); @@ -131,11 +131,11 @@ vstack = prev_vstack; } } template -inline T* ZStackList::pop_atomic() { +inline T* ZStackList::pop() { T* vstack = _head; T* stack = NULL; uint32_t version = 0; for (;;) { @@ -166,24 +166,24 @@ // workers to work on, while the overflowed list is used by GC workers // to publish stacks that overflowed. The intention here is to avoid // contention between mutators and GC workers as much as possible, while // still allowing GC workers to help out and steal work from each other. if (publish) { - _published.push_atomic(stack); + _published.push(stack); } else { - _overflowed.push_atomic(stack); + _overflowed.push(stack); } } inline ZMarkStack* ZMarkStripe::steal_stack() { // Steal overflowed stacks first, then published stacks - ZMarkStack* const stack = _overflowed.pop_atomic(); + ZMarkStack* const stack = _overflowed.pop(); if (stack != NULL) { return stack; } - return _published.pop_atomic(); + return _published.pop(); } inline size_t ZMarkStripeSet::nstripes() const { return _nstripes; }