< prev index next >

src/hotspot/share/gc/z/zMarkStack.inline.hpp

Print this page




 104 }
 105 
 106 template <typename T>
 107 inline bool ZStackList<T>::is_empty() const {
 108   const T* vstack = _head;
 109   T* stack = NULL;
 110   uint32_t version = 0;
 111 
 112   decode_versioned_pointer(vstack, &stack, &version);
 113   return stack == NULL;
 114 }
 115 
 116 template <typename T>
 117 inline void ZStackList<T>::push(T* stack) {
 118   T* vstack = _head;
 119   uint32_t version = 0;
 120 
 121   for (;;) {
 122     decode_versioned_pointer(vstack, stack->next_addr(), &version);
 123     T* const new_vstack = encode_versioned_pointer(stack, version + 1);
 124     T* const prev_vstack = Atomic::cmpxchg(new_vstack, &_head, vstack);
 125     if (prev_vstack == vstack) {
 126       // Success
 127       break;
 128     }
 129 
 130     // Retry
 131     vstack = prev_vstack;
 132   }
 133 }
 134 
 135 template <typename T>
 136 inline T* ZStackList<T>::pop() {
 137   T* vstack = _head;
 138   T* stack = NULL;
 139   uint32_t version = 0;
 140 
 141   for (;;) {
 142     decode_versioned_pointer(vstack, &stack, &version);
 143     if (stack == NULL) {
 144       return NULL;
 145     }
 146 
 147     T* const new_vstack = encode_versioned_pointer(stack->next(), version + 1);
 148     T* const prev_vstack = Atomic::cmpxchg(new_vstack, &_head, vstack);
 149     if (prev_vstack == vstack) {
 150       // Success
 151       return stack;
 152     }
 153 
 154     // Retry
 155     vstack = prev_vstack;
 156   }
 157 }
 158 
 159 inline bool ZMarkStripe::is_empty() const {
 160   return _published.is_empty() && _overflowed.is_empty();
 161 }
 162 
 163 inline void ZMarkStripe::publish_stack(ZMarkStack* stack, bool publish) {
 164   // A stack is published either on the published list or the overflowed
 165   // list. The published list is used by mutators publishing stacks for GC
 166   // workers to work on, while the overflowed list is used by GC workers
 167   // to publish stacks that overflowed. The intention here is to avoid
 168   // contention between mutators and GC workers as much as possible, while




 104 }
 105 
 106 template <typename T>
 107 inline bool ZStackList<T>::is_empty() const {
 108   const T* vstack = _head;
 109   T* stack = NULL;
 110   uint32_t version = 0;
 111 
 112   decode_versioned_pointer(vstack, &stack, &version);
 113   return stack == NULL;
 114 }
 115 
 116 template <typename T>
 117 inline void ZStackList<T>::push(T* stack) {
 118   T* vstack = _head;
 119   uint32_t version = 0;
 120 
 121   for (;;) {
 122     decode_versioned_pointer(vstack, stack->next_addr(), &version);
 123     T* const new_vstack = encode_versioned_pointer(stack, version + 1);
 124     T* const prev_vstack = Atomic::cmpxchg(&_head, vstack, new_vstack);
 125     if (prev_vstack == vstack) {
 126       // Success
 127       break;
 128     }
 129 
 130     // Retry
 131     vstack = prev_vstack;
 132   }
 133 }
 134 
 135 template <typename T>
 136 inline T* ZStackList<T>::pop() {
 137   T* vstack = _head;
 138   T* stack = NULL;
 139   uint32_t version = 0;
 140 
 141   for (;;) {
 142     decode_versioned_pointer(vstack, &stack, &version);
 143     if (stack == NULL) {
 144       return NULL;
 145     }
 146 
 147     T* const new_vstack = encode_versioned_pointer(stack->next(), version + 1);
 148     T* const prev_vstack = Atomic::cmpxchg(&_head, vstack, new_vstack);
 149     if (prev_vstack == vstack) {
 150       // Success
 151       return stack;
 152     }
 153 
 154     // Retry
 155     vstack = prev_vstack;
 156   }
 157 }
 158 
 159 inline bool ZMarkStripe::is_empty() const {
 160   return _published.is_empty() && _overflowed.is_empty();
 161 }
 162 
 163 inline void ZMarkStripe::publish_stack(ZMarkStack* stack, bool publish) {
 164   // A stack is published either on the published list or the overflowed
 165   // list. The published list is used by mutators publishing stacks for GC
 166   // workers to work on, while the overflowed list is used by GC workers
 167   // to publish stacks that overflowed. The intention here is to avoid
 168   // contention between mutators and GC workers as much as possible, while


< prev index next >