--- old/src/share/vm/gc/g1/g1ConcurrentMark.cpp 2016-09-13 11:46:15.729111639 +0200 +++ new/src/share/vm/gc/g1/g1ConcurrentMark.cpp 2016-09-13 11:46:15.581107031 +0200 @@ -216,14 +216,22 @@ } void G1CMMarkStack::add_chunk_to_list(OopChunk* volatile* list, OopChunk* elem) { - MutexLockerEx x(GlobalMarkStack_lock, Mutex::_no_safepoint_check_flag); elem->next = *list; *list = elem; } -G1CMMarkStack::OopChunk* G1CMMarkStack::remove_chunk_from_list(OopChunk* volatile* list) { - MutexLockerEx x(GlobalMarkStack_lock, Mutex::_no_safepoint_check_flag); +void G1CMMarkStack::add_chunk_to_chunk_list(OopChunk* elem) { + MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag); + add_chunk_to_list(&_chunk_list, elem); + _chunks_in_chunk_list++; +} +void G1CMMarkStack::add_chunk_to_free_list(OopChunk* elem) { + MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag); + add_chunk_to_list(&_free_list, elem); +} + +G1CMMarkStack::OopChunk* G1CMMarkStack::remove_chunk_from_list(OopChunk* volatile* list) { OopChunk* result = *list; if (result != NULL) { *list = (*list)->next; @@ -231,8 +239,21 @@ return result; } +G1CMMarkStack::OopChunk* G1CMMarkStack::remove_chunk_from_chunk_list() { + MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag); + _chunks_in_chunk_list--; + return remove_chunk_from_list(&_chunk_list); +} + +G1CMMarkStack::OopChunk* G1CMMarkStack::remove_chunk_from_free_list() { + MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag); + return remove_chunk_from_list(&_free_list); +} + G1CMMarkStack::OopChunk* G1CMMarkStack::allocate_new_chunk() { - // This dirty read is okay because we only ever increase the _hwm in parallel code. + // This dirty read of _hwm is okay because we only ever increase the _hwm in parallel code. + // Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding + // wraparound of _hwm. if (_hwm >= _chunk_capacity) { return NULL; } @@ -261,7 +282,7 @@ return false; } - Copy::conjoint_memory_atomic(ptr_arr, new_chunk->data, OopsPerChunk * sizeof(oop)); + Copy::conjoint_oops_atomic(ptr_arr, new_chunk->data, OopsPerChunk); add_chunk_to_list(&_chunk_list, new_chunk); Atomic::inc(&_chunks_in_chunk_list); @@ -278,7 +299,7 @@ Atomic::dec(&_chunks_in_chunk_list); - Copy::conjoint_memory_atomic(cur->data, ptr_arr, OopsPerChunk * sizeof(oop)); + Copy::conjoint_oops_atomic(cur->data, ptr_arr, OopsPerChunk); add_chunk_to_list(&_free_list, cur); return true;