< prev index next >

src/share/vm/gc/g1/ptrQueue.cpp

Print this page
rev 13131 : [mq]: G1LockOrderProblems


  58     _buf = NULL;
  59     set_index(0);
  60   }
  61 }
  62 
  63 
  64 void PtrQueue::enqueue_known_active(void* ptr) {
  65   while (_index == 0) {
  66     handle_zero_index();
  67   }
  68 
  69   assert(_buf != NULL, "postcondition");
  70   assert(index() > 0, "postcondition");
  71   assert(index() <= capacity(), "invariant");
  72   _index -= _element_size;
  73   _buf[index()] = ptr;
  74 }
  75 
  76 void PtrQueue::locking_enqueue_completed_buffer(BufferNode* node) {
  77   assert(_lock->owned_by_self(), "Required.");
  78 
  79   // We have to unlock _lock (which may be Shared_DirtyCardQ_lock) before
  80   // we acquire DirtyCardQ_CBL_mon inside enqueue_complete_buffer as they
  81   // have the same rank and we may get the "possible deadlock" message
  82   _lock->unlock();
  83 
  84   qset()->enqueue_complete_buffer(node);
  85   // We must relock only because the caller will unlock, for the normal
  86   // case.
  87   _lock->lock_without_safepoint_check();
  88 }
  89 
  90 
  91 BufferNode* BufferNode::allocate(size_t size) {
  92   size_t byte_size = size * sizeof(void*);
  93   void* data = NEW_C_HEAP_ARRAY(char, buffer_offset() + byte_size, mtGC);
  94   return new (data) BufferNode;
  95 }
  96 
  97 void BufferNode::deallocate(BufferNode* node) {
  98   node->~BufferNode();
  99   FREE_C_HEAP_ARRAY(char, node);
 100 }
 101 
 102 PtrQueueSet::PtrQueueSet(bool notify_when_complete) :
 103   _buffer_size(0),
 104   _max_completed_queue(0),
 105   _cbl_mon(NULL), _fl_lock(NULL),
 106   _notify_when_complete(notify_when_complete),
 107   _completed_buffers_head(NULL),


 172     _buf_free_list = node->next();
 173     _buf_free_list_sz--;
 174     BufferNode::deallocate(node);
 175   }
 176 }
 177 
 178 void PtrQueue::handle_zero_index() {
 179   assert(index() == 0, "precondition");
 180 
 181   // This thread records the full buffer and allocates a new one (while
 182   // holding the lock if there is one).
 183   if (_buf != NULL) {
 184     if (!should_enqueue_buffer()) {
 185       assert(index() > 0, "the buffer can only be re-used if it's not full");
 186       return;
 187     }
 188 
 189     if (_lock) {
 190       assert(_lock->owned_by_self(), "Required.");
 191 
 192       // The current PtrQ may be the shared dirty card queue and
 193       // may be being manipulated by more than one worker thread
 194       // during a pause. Since the enqueueing of the completed
 195       // buffer unlocks the Shared_DirtyCardQ_lock more than one
 196       // worker thread can 'race' on reading the shared queue attributes
 197       // (_buf and _index) and multiple threads can call into this
 198       // routine for the same buffer. This will cause the completed
 199       // buffer to be added to the CBL multiple times.
 200 
 201       // We "claim" the current buffer by caching value of _buf in
 202       // a local and clearing the field while holding _lock. When
 203       // _lock is released (while enqueueing the completed buffer)
 204       // the thread that acquires _lock will skip this code,
 205       // preventing the subsequent the multiple enqueue, and
 206       // install a newly allocated buffer below.
 207 
 208       BufferNode* node = BufferNode::make_node_from_buffer(_buf, index());
 209       _buf = NULL;         // clear shared _buf field
 210 
 211       locking_enqueue_completed_buffer(node); // enqueue completed buffer
 212 
 213       // While the current thread was enqueueing the buffer another thread
 214       // may have a allocated a new buffer and inserted it into this pointer
 215       // queue. If that happens then we just return so that the current
 216       // thread doesn't overwrite the buffer allocated by the other thread
 217       // and potentially losing some dirtied cards.
 218 
 219       if (_buf != NULL) return;
 220     } else {
 221       BufferNode* node = BufferNode::make_node_from_buffer(_buf, index());
 222       if (qset()->process_or_enqueue_complete_buffer(node)) {
 223         // Recycle the buffer. No allocation.
 224         assert(_buf == BufferNode::make_buffer_from_node(node), "invariant");
 225         assert(capacity() == qset()->buffer_size(), "invariant");
 226         reset();
 227         return;
 228       }
 229     }
 230   }
 231   // Set capacity in case this is the first allocation.
 232   set_capacity(qset()->buffer_size());
 233   // Allocate a new buffer.
 234   _buf = qset()->allocate_buffer();
 235   reset();
 236 }
 237 
 238 bool PtrQueueSet::process_or_enqueue_complete_buffer(BufferNode* node) {
 239   if (Thread::current()->is_Java_thread()) {




  58     _buf = NULL;
  59     set_index(0);
  60   }
  61 }
  62 
  63 
  64 void PtrQueue::enqueue_known_active(void* ptr) {
  65   while (_index == 0) {
  66     handle_zero_index();
  67   }
  68 
  69   assert(_buf != NULL, "postcondition");
  70   assert(index() > 0, "postcondition");
  71   assert(index() <= capacity(), "invariant");
  72   _index -= _element_size;
  73   _buf[index()] = ptr;
  74 }
  75 
  76 void PtrQueue::locking_enqueue_completed_buffer(BufferNode* node) {
  77   assert(_lock->owned_by_self(), "Required.");






  78   qset()->enqueue_complete_buffer(node);



  79 }
  80 
  81 
  82 BufferNode* BufferNode::allocate(size_t size) {
  83   size_t byte_size = size * sizeof(void*);
  84   void* data = NEW_C_HEAP_ARRAY(char, buffer_offset() + byte_size, mtGC);
  85   return new (data) BufferNode;
  86 }
  87 
  88 void BufferNode::deallocate(BufferNode* node) {
  89   node->~BufferNode();
  90   FREE_C_HEAP_ARRAY(char, node);
  91 }
  92 
  93 PtrQueueSet::PtrQueueSet(bool notify_when_complete) :
  94   _buffer_size(0),
  95   _max_completed_queue(0),
  96   _cbl_mon(NULL), _fl_lock(NULL),
  97   _notify_when_complete(notify_when_complete),
  98   _completed_buffers_head(NULL),


 163     _buf_free_list = node->next();
 164     _buf_free_list_sz--;
 165     BufferNode::deallocate(node);
 166   }
 167 }
 168 
 169 void PtrQueue::handle_zero_index() {
 170   assert(index() == 0, "precondition");
 171 
 172   // This thread records the full buffer and allocates a new one (while
 173   // holding the lock if there is one).
 174   if (_buf != NULL) {
 175     if (!should_enqueue_buffer()) {
 176       assert(index() > 0, "the buffer can only be re-used if it's not full");
 177       return;
 178     }
 179 
 180     if (_lock) {
 181       assert(_lock->owned_by_self(), "Required.");
 182 
















 183       BufferNode* node = BufferNode::make_node_from_buffer(_buf, index());
 184       _buf = NULL;         // clear shared _buf field
 185 
 186       locking_enqueue_completed_buffer(node); // enqueue completed buffer
 187       assert(_buf == NULL, "multiple enqueuers appear to be racing");







 188     } else {
 189       BufferNode* node = BufferNode::make_node_from_buffer(_buf, index());
 190       if (qset()->process_or_enqueue_complete_buffer(node)) {
 191         // Recycle the buffer. No allocation.
 192         assert(_buf == BufferNode::make_buffer_from_node(node), "invariant");
 193         assert(capacity() == qset()->buffer_size(), "invariant");
 194         reset();
 195         return;
 196       }
 197     }
 198   }
 199   // Set capacity in case this is the first allocation.
 200   set_capacity(qset()->buffer_size());
 201   // Allocate a new buffer.
 202   _buf = qset()->allocate_buffer();
 203   reset();
 204 }
 205 
 206 bool PtrQueueSet::process_or_enqueue_complete_buffer(BufferNode* node) {
 207   if (Thread::current()->is_Java_thread()) {


< prev index next >