< prev index next >

src/share/vm/gc/g1/g1ConcurrentMark.cpp

Print this page
rev 11973 : [mq]: 8164948-crash-in-bitmap-scan-during-mark
rev 11974 : imported patch 8159422-high-mark-stack-contention
rev 11975 : imported patch 8159422-mikael-review
rev 11976 : imported patch 8159422-aba-fix-1
rev 11977 : imported patch 8159422-kim-review
rev 11978 : imported patch 8159422-kim-review2

@@ -214,27 +214,48 @@
     MmapArrayAllocator<OopChunk, mtGC>::free(_base, _chunk_capacity);
   }
 }
 
 void G1CMMarkStack::add_chunk_to_list(OopChunk* volatile* list, OopChunk* elem) {
-  MutexLockerEx x(GlobalMarkStack_lock, Mutex::_no_safepoint_check_flag);
   elem->next = *list;
   *list = elem;
 }
 
-G1CMMarkStack::OopChunk* G1CMMarkStack::remove_chunk_from_list(OopChunk* volatile* list) {
-  MutexLockerEx x(GlobalMarkStack_lock, Mutex::_no_safepoint_check_flag);
+void G1CMMarkStack::add_chunk_to_chunk_list(OopChunk* elem) {
+  MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
+  add_chunk_to_list(&_chunk_list, elem);
+  _chunks_in_chunk_list++;
+}
 
+void G1CMMarkStack::add_chunk_to_free_list(OopChunk* elem) {
+  MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
+  add_chunk_to_list(&_free_list, elem);
+}
+
+G1CMMarkStack::OopChunk* G1CMMarkStack::remove_chunk_from_list(OopChunk* volatile* list) {
   OopChunk* result = *list;
   if (result != NULL) {
     *list = (*list)->next;
   }
   return result;
 }
 
+G1CMMarkStack::OopChunk* G1CMMarkStack::remove_chunk_from_chunk_list() {
+  MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
+  _chunks_in_chunk_list--;
+  return remove_chunk_from_list(&_chunk_list);
+}
+
+G1CMMarkStack::OopChunk* G1CMMarkStack::remove_chunk_from_free_list() {
+  MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
+  return remove_chunk_from_list(&_free_list);
+}
+
 G1CMMarkStack::OopChunk* G1CMMarkStack::allocate_new_chunk() {
-  // This dirty read is okay because we only ever increase the _hwm in parallel code.
+  // This dirty read of _hwm is okay because we only ever increase the _hwm in parallel code.
+  // Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding
+  // wraparound of _hwm.
   if (_hwm >= _chunk_capacity) {
     return NULL;
   }
 
   size_t cur_idx = Atomic::add(1, &_hwm) - 1;

@@ -259,11 +280,11 @@
   if (new_chunk == NULL) {
     _out_of_memory = true;
     return false;
   }
 
-  Copy::conjoint_memory_atomic(ptr_arr, new_chunk->data, OopsPerChunk * sizeof(oop));
+  Copy::conjoint_oops_atomic(ptr_arr, new_chunk->data, OopsPerChunk);
 
   add_chunk_to_list(&_chunk_list, new_chunk);
   Atomic::inc(&_chunks_in_chunk_list);
 
   return true;

@@ -276,11 +297,11 @@
     return false;
   }
 
   Atomic::dec(&_chunks_in_chunk_list);
 
-  Copy::conjoint_memory_atomic(cur->data, ptr_arr, OopsPerChunk * sizeof(oop));
+  Copy::conjoint_oops_atomic(cur->data, ptr_arr, OopsPerChunk);
 
   add_chunk_to_list(&_free_list, cur);
   return true;
 }
 
< prev index next >