183 TaskQueueEntryChunk* result = remove_chunk_from_list(&_chunk_list);
184 if (result != NULL) {
185 _chunks_in_chunk_list--;
186 }
187 return result;
188 }
189
190 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_free_list() {
191 MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
192 return remove_chunk_from_list(&_free_list);
193 }
194
195 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::allocate_new_chunk() {
196 // This dirty read of _hwm is okay because we only ever increase the _hwm in parallel code.
197 // Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding
198 // wraparound of _hwm.
199 if (_hwm >= _chunk_capacity) {
200 return NULL;
201 }
202
203 size_t cur_idx = Atomic::add(1, &_hwm) - 1;
204 if (cur_idx >= _chunk_capacity) {
205 return NULL;
206 }
207
208 TaskQueueEntryChunk* result = ::new (&_base[cur_idx]) TaskQueueEntryChunk;
209 result->next = NULL;
210 return result;
211 }
212
213 bool G1CMMarkStack::par_push_chunk(G1TaskQueueEntry* ptr_arr) {
214 // Get a new chunk.
215 TaskQueueEntryChunk* new_chunk = remove_chunk_from_free_list();
216
217 if (new_chunk == NULL) {
218 // Did not get a chunk from the free list. Allocate from backing memory.
219 new_chunk = allocate_new_chunk();
220
221 if (new_chunk == NULL) {
222 return false;
223 }
|
183 TaskQueueEntryChunk* result = remove_chunk_from_list(&_chunk_list);
184 if (result != NULL) {
185 _chunks_in_chunk_list--;
186 }
187 return result;
188 }
189
190 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_free_list() {
191 MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
192 return remove_chunk_from_list(&_free_list);
193 }
194
195 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::allocate_new_chunk() {
196 // This dirty read of _hwm is okay because we only ever increase the _hwm in parallel code.
197 // Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding
198 // wraparound of _hwm.
199 if (_hwm >= _chunk_capacity) {
200 return NULL;
201 }
202
203 size_t cur_idx = Atomic::add(1u, &_hwm) - 1;
204 if (cur_idx >= _chunk_capacity) {
205 return NULL;
206 }
207
208 TaskQueueEntryChunk* result = ::new (&_base[cur_idx]) TaskQueueEntryChunk;
209 result->next = NULL;
210 return result;
211 }
212
213 bool G1CMMarkStack::par_push_chunk(G1TaskQueueEntry* ptr_arr) {
214 // Get a new chunk.
215 TaskQueueEntryChunk* new_chunk = remove_chunk_from_free_list();
216
217 if (new_chunk == NULL) {
218 // Did not get a chunk from the free list. Allocate from backing memory.
219 new_chunk = allocate_new_chunk();
220
221 if (new_chunk == NULL) {
222 return false;
223 }
|