< prev index next >

src/hotspot/share/gc/z/zMarkStack.inline.hpp

Print this page

 97   if (addr == (uint32_t)-1) {
 98     *stack = NULL;
 99   } else {
100     *stack = (T*)((addr << ZMarkStackSizeShift) + ZMarkStackSpaceStart);
101   }
102 
103   *version = (uint32_t)(uint64_t)vstack;
104 }
105 
106 template <typename T>
107 inline bool ZStackList<T>::is_empty() const {
108   const T* vstack = _head;
109   T* stack = NULL;
110   uint32_t version = 0;
111 
112   decode_versioned_pointer(vstack, &stack, &version);
113   return stack == NULL;
114 }
115 
116 template <typename T>
117 inline void ZStackList<T>::push_atomic(T* stack) {
118   T* vstack = _head;
119   uint32_t version = 0;
120 
121   for (;;) {
122     decode_versioned_pointer(vstack, stack->next_addr(), &version);
123     T* const new_vstack = encode_versioned_pointer(stack, version + 1);
124     T* const prev_vstack = Atomic::cmpxchg(new_vstack, &_head, vstack);
125     if (prev_vstack == vstack) {
126       // Success
127       break;
128     }
129 
130     // Retry
131     vstack = prev_vstack;
132   }
133 }
134 
135 template <typename T>
136 inline T* ZStackList<T>::pop_atomic() {
137   T* vstack = _head;
138   T* stack = NULL;
139   uint32_t version = 0;
140 
141   for (;;) {
142     decode_versioned_pointer(vstack, &stack, &version);
143     if (stack == NULL) {
144       return NULL;
145     }
146 
147     T* const new_vstack = encode_versioned_pointer(stack->next(), version + 1);
148     T* const prev_vstack = Atomic::cmpxchg(new_vstack, &_head, vstack);
149     if (prev_vstack == vstack) {
150       // Success
151       return stack;
152     }
153 
154     // Retry
155     vstack = prev_vstack;
156   }
157 }
158 
159 inline bool ZMarkStripe::is_empty() const {
160   return _published.is_empty() && _overflowed.is_empty();
161 }
162 
163 inline void ZMarkStripe::publish_stack(ZMarkStack* stack, bool publish) {
164   // A stack is published either on the published list or the overflowed
165   // list. The published list is used by mutators publishing stacks for GC
166   // workers to work on, while the overflowed list is used by GC workers
167   // to publish stacks that overflowed. The intention here is to avoid
168   // contention between mutators and GC workers as much as possible, while
169   // still allowing GC workers to help out and steal work from each other.
170   if (publish) {
171     _published.push_atomic(stack);
172   } else {
173     _overflowed.push_atomic(stack);
174   }
175 }
176 
177 inline ZMarkStack* ZMarkStripe::steal_stack() {
178   // Steal overflowed stacks first, then published stacks
179   ZMarkStack* const stack = _overflowed.pop_atomic();
180   if (stack != NULL) {
181     return stack;
182   }
183 
184   return _published.pop_atomic();
185 }
186 
187 inline size_t ZMarkStripeSet::nstripes() const {
188   return _nstripes;
189 }
190 
191 inline size_t ZMarkStripeSet::stripe_id(const ZMarkStripe* stripe) const {
192   const size_t index = ((uintptr_t)stripe - (uintptr_t)_stripes) / sizeof(ZMarkStripe);
193   assert(index < _nstripes, "Invalid index");
194   return index;
195 }
196 
197 inline ZMarkStripe* ZMarkStripeSet::stripe_at(size_t index) {
198   assert(index < _nstripes, "Invalid index");
199   return &_stripes[index];
200 }
201 
202 inline ZMarkStripe* ZMarkStripeSet::stripe_next(ZMarkStripe* stripe) {
203   const size_t index = (stripe_id(stripe) + 1) & _nstripes_mask;
204   assert(index < _nstripes, "Invalid index");

 97   if (addr == (uint32_t)-1) {
 98     *stack = NULL;
 99   } else {
100     *stack = (T*)((addr << ZMarkStackSizeShift) + ZMarkStackSpaceStart);
101   }
102 
103   *version = (uint32_t)(uint64_t)vstack;
104 }
105 
106 template <typename T>
107 inline bool ZStackList<T>::is_empty() const {
108   const T* vstack = _head;
109   T* stack = NULL;
110   uint32_t version = 0;
111 
112   decode_versioned_pointer(vstack, &stack, &version);
113   return stack == NULL;
114 }
115 
116 template <typename T>
117 inline void ZStackList<T>::push(T* stack) {
118   T* vstack = _head;
119   uint32_t version = 0;
120 
121   for (;;) {
122     decode_versioned_pointer(vstack, stack->next_addr(), &version);
123     T* const new_vstack = encode_versioned_pointer(stack, version + 1);
124     T* const prev_vstack = Atomic::cmpxchg(new_vstack, &_head, vstack);
125     if (prev_vstack == vstack) {
126       // Success
127       break;
128     }
129 
130     // Retry
131     vstack = prev_vstack;
132   }
133 }
134 
135 template <typename T>
136 inline T* ZStackList<T>::pop() {
137   T* vstack = _head;
138   T* stack = NULL;
139   uint32_t version = 0;
140 
141   for (;;) {
142     decode_versioned_pointer(vstack, &stack, &version);
143     if (stack == NULL) {
144       return NULL;
145     }
146 
147     T* const new_vstack = encode_versioned_pointer(stack->next(), version + 1);
148     T* const prev_vstack = Atomic::cmpxchg(new_vstack, &_head, vstack);
149     if (prev_vstack == vstack) {
150       // Success
151       return stack;
152     }
153 
154     // Retry
155     vstack = prev_vstack;
156   }
157 }
158 
159 inline bool ZMarkStripe::is_empty() const {
160   return _published.is_empty() && _overflowed.is_empty();
161 }
162 
163 inline void ZMarkStripe::publish_stack(ZMarkStack* stack, bool publish) {
164   // A stack is published either on the published list or the overflowed
165   // list. The published list is used by mutators publishing stacks for GC
166   // workers to work on, while the overflowed list is used by GC workers
167   // to publish stacks that overflowed. The intention here is to avoid
168   // contention between mutators and GC workers as much as possible, while
169   // still allowing GC workers to help out and steal work from each other.
170   if (publish) {
171     _published.push(stack);
172   } else {
173     _overflowed.push(stack);
174   }
175 }
176 
177 inline ZMarkStack* ZMarkStripe::steal_stack() {
178   // Steal overflowed stacks first, then published stacks
179   ZMarkStack* const stack = _overflowed.pop();
180   if (stack != NULL) {
181     return stack;
182   }
183 
184   return _published.pop();
185 }
186 
187 inline size_t ZMarkStripeSet::nstripes() const {
188   return _nstripes;
189 }
190 
191 inline size_t ZMarkStripeSet::stripe_id(const ZMarkStripe* stripe) const {
192   const size_t index = ((uintptr_t)stripe - (uintptr_t)_stripes) / sizeof(ZMarkStripe);
193   assert(index < _nstripes, "Invalid index");
194   return index;
195 }
196 
197 inline ZMarkStripe* ZMarkStripeSet::stripe_at(size_t index) {
198   assert(index < _nstripes, "Invalid index");
199   return &_stripes[index];
200 }
201 
202 inline ZMarkStripe* ZMarkStripeSet::stripe_next(ZMarkStripe* stripe) {
203   const size_t index = (stripe_id(stripe) + 1) & _nstripes_mask;
204   assert(index < _nstripes, "Invalid index");
< prev index next >