199 size_t old_capacity = _chunk_capacity;
200 // Double capacity if possible
201 size_t new_capacity = MIN2(old_capacity * 2, _max_chunk_capacity);
202
203 if (resize(new_capacity)) {
204 log_debug(gc)("Expanded mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
205 old_capacity, new_capacity);
206 } else {
207 log_warning(gc)("Failed to expand mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
208 old_capacity, new_capacity);
209 }
210 }
211
212 G1CMMarkStack::~G1CMMarkStack() {
213 if (_base != NULL) {
214 MmapArrayAllocator<OopChunk, mtGC>::free(_base, _chunk_capacity);
215 }
216 }
217
218 void G1CMMarkStack::add_chunk_to_list(OopChunk* volatile* list, OopChunk* elem) {
219 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
220 elem->next = *list;
221 *list = elem;
222 }
223
224 G1CMMarkStack::OopChunk* G1CMMarkStack::remove_chunk_from_list(OopChunk* volatile* list) {
225 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
226
227 OopChunk* result = *list;
228 if (result != NULL) {
229 *list = (*list)->next;
230 }
231 return result;
232 }
233
234 G1CMMarkStack::OopChunk* G1CMMarkStack::allocate_new_chunk() {
235 // This dirty read is okay because we only ever increase the _hwm in parallel code.
236 if (_hwm >= _chunk_capacity) {
237 return NULL;
238 }
239
240 size_t cur_idx = Atomic::add(1, &_hwm) - 1;
241 if (cur_idx >= _chunk_capacity) {
242 return NULL;
243 }
244
245 OopChunk* result = ::new (&_base[cur_idx]) OopChunk;
246 result->next = NULL;
247 return result;
248 }
249
250 void G1CMMarkStack::par_push_chunk(oop* ptr_arr) {
251 // Get a new chunk.
252 OopChunk* new_chunk = remove_chunk_from_list(&_free_list);
253
254 if (new_chunk == NULL) {
255 // Did not get a chunk from the free list. Allocate from backing memory.
256 new_chunk = allocate_new_chunk();
257 }
258
259 if (new_chunk == NULL) {
260 _out_of_memory = true;
261 return;
262 }
263
264 for (size_t i = 0; i < OopsPerChunk; i++) {
265 new_chunk->data[i] = ptr_arr[i];
266 }
267
268 add_chunk_to_list(&_chunk_list, new_chunk);
269 Atomic::inc(&_chunks_in_chunk_list);
270 }
271
272 bool G1CMMarkStack::par_pop_chunk(oop* ptr_arr) {
273 OopChunk* cur = remove_chunk_from_list(&_chunk_list);
274
275 if (cur == NULL) {
276 return false;
277 }
278
279 Atomic::dec(&_chunks_in_chunk_list);
280
281 for (size_t i = 0; i < OopsPerChunk; i++) {
282 ptr_arr[i] = (oop)cur->data[i];
283 }
284
285 add_chunk_to_list(&_free_list, cur);
286 return true;
287 }
288
289 void G1CMMarkStack::set_empty() {
290 _chunks_in_chunk_list = 0;
291 _hwm = 0;
292 clear_out_of_memory();
293 _chunk_list = NULL;
294 _free_list = NULL;
295 }
296
297 G1CMRootRegions::G1CMRootRegions() :
298 _cm(NULL), _scan_in_progress(false),
299 _should_abort(false), _claimed_survivor_index(0) { }
300
301 void G1CMRootRegions::init(const G1SurvivorRegions* survivors, G1ConcurrentMark* cm) {
302 _survivors = survivors;
303 _cm = cm;
1714 // Set the degree of MT processing here. If the discovery was done MT,
1715 // the number of threads involved during discovery could differ from
1716 // the number of active workers. This is OK as long as the discovered
1717 // Reference lists are balanced (see balance_all_queues() and balance_queues()).
1718 rp->set_active_mt_degree(active_workers);
1719
1720 // Process the weak references.
1721 const ReferenceProcessorStats& stats =
1722 rp->process_discovered_references(&g1_is_alive,
1723 &g1_keep_alive,
1724 &g1_drain_mark_stack,
1725 executor,
1726 _gc_timer_cm);
1727 _gc_tracer_cm->report_gc_reference_stats(stats);
1728
1729 // The do_oop work routines of the keep_alive and drain_marking_stack
1730 // oop closures will set the has_overflown flag if we overflow the
1731 // global marking stack.
1732
1733 assert(_global_mark_stack.is_out_of_memory() || _global_mark_stack.is_empty(),
1734 "mark stack should be empty (unless it overflowed)");
1735
1736 if (_global_mark_stack.is_out_of_memory()) {
1737 // This should have been done already when we tried to push an
1738 // entry on to the global mark stack. But let's do it again.
1739 set_has_overflown();
1740 }
1741
1742 assert(rp->num_q() == active_workers, "why not");
1743
1744 rp->enqueue_discovered_references(executor);
1745
1746 rp->verify_no_references_recorded();
1747 assert(!rp->discovery_enabled(), "Post condition");
1748 }
1749
1750 if (has_overflown()) {
1751 // We can not trust g1_is_alive if the marking stack overflowed
1752 return;
1753 }
1754
|
199 size_t old_capacity = _chunk_capacity;
200 // Double capacity if possible
201 size_t new_capacity = MIN2(old_capacity * 2, _max_chunk_capacity);
202
203 if (resize(new_capacity)) {
204 log_debug(gc)("Expanded mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
205 old_capacity, new_capacity);
206 } else {
207 log_warning(gc)("Failed to expand mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
208 old_capacity, new_capacity);
209 }
210 }
211
212 G1CMMarkStack::~G1CMMarkStack() {
213 if (_base != NULL) {
214 MmapArrayAllocator<OopChunk, mtGC>::free(_base, _chunk_capacity);
215 }
216 }
217
218 void G1CMMarkStack::add_chunk_to_list(OopChunk* volatile* list, OopChunk* elem) {
219 elem->next = *list;
220 *list = elem;
221 }
222
223 void G1CMMarkStack::add_chunk_to_chunk_list(OopChunk* elem) {
224 MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
225 add_chunk_to_list(&_chunk_list, elem);
226 _chunks_in_chunk_list++;
227 }
228
229 void G1CMMarkStack::add_chunk_to_free_list(OopChunk* elem) {
230 MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
231 add_chunk_to_list(&_free_list, elem);
232 }
233
234 G1CMMarkStack::OopChunk* G1CMMarkStack::remove_chunk_from_list(OopChunk* volatile* list) {
235 OopChunk* result = *list;
236 if (result != NULL) {
237 *list = (*list)->next;
238 }
239 return result;
240 }
241
242 G1CMMarkStack::OopChunk* G1CMMarkStack::remove_chunk_from_chunk_list() {
243 MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
244 _chunks_in_chunk_list--;
245 return remove_chunk_from_list(&_chunk_list);
246 }
247
248 G1CMMarkStack::OopChunk* G1CMMarkStack::remove_chunk_from_free_list() {
249 MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
250 return remove_chunk_from_list(&_free_list);
251 }
252
253 G1CMMarkStack::OopChunk* G1CMMarkStack::allocate_new_chunk() {
254 // This dirty read of _hwm is okay because we only ever increase the _hwm in parallel code.
255 // Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding
256 // wraparound of _hwm.
257 if (_hwm >= _chunk_capacity) {
258 return NULL;
259 }
260
261 size_t cur_idx = Atomic::add(1, &_hwm) - 1;
262 if (cur_idx >= _chunk_capacity) {
263 return NULL;
264 }
265
266 OopChunk* result = ::new (&_base[cur_idx]) OopChunk;
267 result->next = NULL;
268 return result;
269 }
270
271 bool G1CMMarkStack::par_push_chunk(oop* ptr_arr) {
272 // Get a new chunk.
273 OopChunk* new_chunk = remove_chunk_from_list(&_free_list);
274
275 if (new_chunk == NULL) {
276 // Did not get a chunk from the free list. Allocate from backing memory.
277 new_chunk = allocate_new_chunk();
278 }
279
280 if (new_chunk == NULL) {
281 _out_of_memory = true;
282 return false;
283 }
284
285 Copy::conjoint_oops_atomic(ptr_arr, new_chunk->data, OopsPerChunk);
286
287 add_chunk_to_list(&_chunk_list, new_chunk);
288 Atomic::inc(&_chunks_in_chunk_list);
289
290 return true;
291 }
292
293 bool G1CMMarkStack::par_pop_chunk(oop* ptr_arr) {
294 OopChunk* cur = remove_chunk_from_list(&_chunk_list);
295
296 if (cur == NULL) {
297 return false;
298 }
299
300 Atomic::dec(&_chunks_in_chunk_list);
301
302 Copy::conjoint_oops_atomic(cur->data, ptr_arr, OopsPerChunk);
303
304 add_chunk_to_list(&_free_list, cur);
305 return true;
306 }
307
308 void G1CMMarkStack::set_empty() {
309 _chunks_in_chunk_list = 0;
310 _hwm = 0;
311 clear_out_of_memory();
312 _chunk_list = NULL;
313 _free_list = NULL;
314 }
315
316 G1CMRootRegions::G1CMRootRegions() :
317 _cm(NULL), _scan_in_progress(false),
318 _should_abort(false), _claimed_survivor_index(0) { }
319
320 void G1CMRootRegions::init(const G1SurvivorRegions* survivors, G1ConcurrentMark* cm) {
321 _survivors = survivors;
322 _cm = cm;
1733 // Set the degree of MT processing here. If the discovery was done MT,
1734 // the number of threads involved during discovery could differ from
1735 // the number of active workers. This is OK as long as the discovered
1736 // Reference lists are balanced (see balance_all_queues() and balance_queues()).
1737 rp->set_active_mt_degree(active_workers);
1738
1739 // Process the weak references.
1740 const ReferenceProcessorStats& stats =
1741 rp->process_discovered_references(&g1_is_alive,
1742 &g1_keep_alive,
1743 &g1_drain_mark_stack,
1744 executor,
1745 _gc_timer_cm);
1746 _gc_tracer_cm->report_gc_reference_stats(stats);
1747
1748 // The do_oop work routines of the keep_alive and drain_marking_stack
1749 // oop closures will set the has_overflown flag if we overflow the
1750 // global marking stack.
1751
1752 assert(_global_mark_stack.is_out_of_memory() || _global_mark_stack.is_empty(),
1753 "Mark stack should be empty (unless it is out of memory)");
1754
1755 if (_global_mark_stack.is_out_of_memory()) {
1756 // This should have been done already when we tried to push an
1757 // entry on to the global mark stack. But let's do it again.
1758 set_has_overflown();
1759 }
1760
1761 assert(rp->num_q() == active_workers, "why not");
1762
1763 rp->enqueue_discovered_references(executor);
1764
1765 rp->verify_no_references_recorded();
1766 assert(!rp->discovery_enabled(), "Post condition");
1767 }
1768
1769 if (has_overflown()) {
1770 // We can not trust g1_is_alive if the marking stack overflowed
1771 return;
1772 }
1773
|