< prev index next >

src/share/vm/gc/g1/g1ConcurrentMark.cpp

Print this page
rev 11973 : [mq]: 8164948-crash-in-bitmap-scan-during-mark
rev 11974 : imported patch 8159422-high-mark-stack-contention
rev 11975 : imported patch 8159422-mikael-review
rev 11976 : imported patch 8159422-aba-fix-1
rev 11977 : imported patch 8159422-kim-review
rev 11978 : imported patch 8159422-kim-review2

*** 214,240 **** MmapArrayAllocator<OopChunk, mtGC>::free(_base, _chunk_capacity); } } void G1CMMarkStack::add_chunk_to_list(OopChunk* volatile* list, OopChunk* elem) { - MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); elem->next = *list; *list = elem; } ! G1CMMarkStack::OopChunk* G1CMMarkStack::remove_chunk_from_list(OopChunk* volatile* list) { ! MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); OopChunk* result = *list; if (result != NULL) { *list = (*list)->next; } return result; } G1CMMarkStack::OopChunk* G1CMMarkStack::allocate_new_chunk() { ! // This dirty read is okay because we only ever increase the _hwm in parallel code. if (_hwm >= _chunk_capacity) { return NULL; } size_t cur_idx = Atomic::add(1, &_hwm) - 1; --- 214,261 ---- MmapArrayAllocator<OopChunk, mtGC>::free(_base, _chunk_capacity); } } void G1CMMarkStack::add_chunk_to_list(OopChunk* volatile* list, OopChunk* elem) { elem->next = *list; *list = elem; } ! void G1CMMarkStack::add_chunk_to_chunk_list(OopChunk* elem) { ! MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag); ! add_chunk_to_list(&_chunk_list, elem); ! _chunks_in_chunk_list++; ! } + void G1CMMarkStack::add_chunk_to_free_list(OopChunk* elem) { + MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag); + add_chunk_to_list(&_free_list, elem); + } + + G1CMMarkStack::OopChunk* G1CMMarkStack::remove_chunk_from_list(OopChunk* volatile* list) { OopChunk* result = *list; if (result != NULL) { *list = (*list)->next; } return result; } + G1CMMarkStack::OopChunk* G1CMMarkStack::remove_chunk_from_chunk_list() { + MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag); + _chunks_in_chunk_list--; + return remove_chunk_from_list(&_chunk_list); + } + + G1CMMarkStack::OopChunk* G1CMMarkStack::remove_chunk_from_free_list() { + MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag); + return remove_chunk_from_list(&_free_list); + } + G1CMMarkStack::OopChunk* G1CMMarkStack::allocate_new_chunk() { ! // This dirty read of _hwm is okay because we only ever increase the _hwm in parallel code. ! // Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding ! // wraparound of _hwm. if (_hwm >= _chunk_capacity) { return NULL; } size_t cur_idx = Atomic::add(1, &_hwm) - 1;
*** 245,274 **** OopChunk* result = ::new (&_base[cur_idx]) OopChunk; result->next = NULL; return result; } ! void G1CMMarkStack::par_push_chunk(oop* ptr_arr) { // Get a new chunk. OopChunk* new_chunk = remove_chunk_from_list(&_free_list); if (new_chunk == NULL) { // Did not get a chunk from the free list. Allocate from backing memory. new_chunk = allocate_new_chunk(); } if (new_chunk == NULL) { _out_of_memory = true; ! return; } ! for (size_t i = 0; i < OopsPerChunk; i++) { ! new_chunk->data[i] = ptr_arr[i]; ! } add_chunk_to_list(&_chunk_list, new_chunk); Atomic::inc(&_chunks_in_chunk_list); } bool G1CMMarkStack::par_pop_chunk(oop* ptr_arr) { OopChunk* cur = remove_chunk_from_list(&_chunk_list); --- 266,295 ---- OopChunk* result = ::new (&_base[cur_idx]) OopChunk; result->next = NULL; return result; } ! bool G1CMMarkStack::par_push_chunk(oop* ptr_arr) { // Get a new chunk. OopChunk* new_chunk = remove_chunk_from_list(&_free_list); if (new_chunk == NULL) { // Did not get a chunk from the free list. Allocate from backing memory. new_chunk = allocate_new_chunk(); } if (new_chunk == NULL) { _out_of_memory = true; ! return false; } ! Copy::conjoint_oops_atomic(ptr_arr, new_chunk->data, OopsPerChunk); add_chunk_to_list(&_chunk_list, new_chunk); Atomic::inc(&_chunks_in_chunk_list); + + return true; } bool G1CMMarkStack::par_pop_chunk(oop* ptr_arr) { OopChunk* cur = remove_chunk_from_list(&_chunk_list);
*** 276,288 **** return false; } Atomic::dec(&_chunks_in_chunk_list); ! for (size_t i = 0; i < OopsPerChunk; i++) { ! ptr_arr[i] = (oop)cur->data[i]; ! } add_chunk_to_list(&_free_list, cur); return true; } --- 297,307 ---- return false; } Atomic::dec(&_chunks_in_chunk_list); ! Copy::conjoint_oops_atomic(cur->data, ptr_arr, OopsPerChunk); add_chunk_to_list(&_free_list, cur); return true; }
*** 1729,1739 **** // The do_oop work routines of the keep_alive and drain_marking_stack // oop closures will set the has_overflown flag if we overflow the // global marking stack. assert(_global_mark_stack.is_out_of_memory() || _global_mark_stack.is_empty(), ! "mark stack should be empty (unless it overflowed)"); if (_global_mark_stack.is_out_of_memory()) { // This should have been done already when we tried to push an // entry on to the global mark stack. But let's do it again. set_has_overflown(); --- 1748,1758 ---- // The do_oop work routines of the keep_alive and drain_marking_stack // oop closures will set the has_overflown flag if we overflow the // global marking stack. assert(_global_mark_stack.is_out_of_memory() || _global_mark_stack.is_empty(), ! "Mark stack should be empty (unless it is out of memory)"); if (_global_mark_stack.is_out_of_memory()) { // This should have been done already when we tried to push an // entry on to the global mark stack. But let's do it again. set_has_overflown();
< prev index next >