< prev index next >

src/share/vm/gc/g1/ptrQueue.cpp

Print this page




 221       BufferNode* node = BufferNode::make_node_from_buffer(_buf, index());
 222       if (qset()->process_or_enqueue_complete_buffer(node)) {
 223         // Recycle the buffer. No allocation.
 224         assert(_buf == BufferNode::make_buffer_from_node(node), "invariant");
 225         assert(capacity() == qset()->buffer_size(), "invariant");
 226         reset();
 227         return;
 228       }
 229     }
 230   }
 231   // Set capacity in case this is the first allocation.
 232   set_capacity(qset()->buffer_size());
 233   // Allocate a new buffer.
 234   _buf = qset()->allocate_buffer();
 235   reset();
 236 }
 237 
 238 bool PtrQueueSet::process_or_enqueue_complete_buffer(BufferNode* node) {
 239   if (Thread::current()->is_Java_thread()) {
 240     // We don't lock. It is fine to be epsilon-precise here.
 241     if (_max_completed_queue == 0 || _max_completed_queue > 0 &&
 242         _n_completed_buffers >= _max_completed_queue + _completed_queue_padding) {

 243       bool b = mut_process_buffer(node);
 244       if (b) {
 245         // True here means that the buffer hasn't been deallocated and the caller may reuse it.
 246         return true;
 247       }
 248     }
 249   }
 250   // The buffer will be enqueued. The caller will have to get a new one.
 251   enqueue_complete_buffer(node);
 252   return false;
 253 }
 254 
 255 void PtrQueueSet::enqueue_complete_buffer(BufferNode* cbn) {
 256   MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
 257   cbn->set_next(NULL);
 258   if (_completed_buffers_tail == NULL) {
 259     assert(_completed_buffers_head == NULL, "Well-formedness");
 260     _completed_buffers_head = cbn;
 261     _completed_buffers_tail = cbn;
 262   } else {




 221       BufferNode* node = BufferNode::make_node_from_buffer(_buf, index());
 222       if (qset()->process_or_enqueue_complete_buffer(node)) {
 223         // Recycle the buffer. No allocation.
 224         assert(_buf == BufferNode::make_buffer_from_node(node), "invariant");
 225         assert(capacity() == qset()->buffer_size(), "invariant");
 226         reset();
 227         return;
 228       }
 229     }
 230   }
 231   // Set capacity in case this is the first allocation.
 232   set_capacity(qset()->buffer_size());
 233   // Allocate a new buffer.
 234   _buf = qset()->allocate_buffer();
 235   reset();
 236 }
 237 
 238 bool PtrQueueSet::process_or_enqueue_complete_buffer(BufferNode* node) {
 239   if (Thread::current()->is_Java_thread()) {
 240     // We don't lock. It is fine to be epsilon-precise here.
 241     if (_max_completed_queue == 0 ||
 242         (_max_completed_queue > 0 &&
 243           _n_completed_buffers >= _max_completed_queue + _completed_queue_padding)) {
 244       bool b = mut_process_buffer(node);
 245       if (b) {
 246         // True here means that the buffer hasn't been deallocated and the caller may reuse it.
 247         return true;
 248       }
 249     }
 250   }
 251   // The buffer will be enqueued. The caller will have to get a new one.
 252   enqueue_complete_buffer(node);
 253   return false;
 254 }
 255 
 256 void PtrQueueSet::enqueue_complete_buffer(BufferNode* cbn) {
 257   MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
 258   cbn->set_next(NULL);
 259   if (_completed_buffers_tail == NULL) {
 260     assert(_completed_buffers_head == NULL, "Well-formedness");
 261     _completed_buffers_head = cbn;
 262     _completed_buffers_tail = cbn;
 263   } else {


< prev index next >