1 /*
   2  * Copyright (c) 2001, 2009, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 # include "incls/_precompiled.incl"
  26 # include "incls/_ptrQueue.cpp.incl"
  27 
  28 PtrQueue::PtrQueue(PtrQueueSet* qset_, bool perm, bool active) :
  29   _qset(qset_), _buf(NULL), _index(0), _active(active),
  30   _perm(perm), _lock(NULL)
  31 {}
  32 
  33 void PtrQueue::flush() {
  34   if (!_perm && _buf != NULL) {
  35     if (_index == _sz) {
  36       // No work to do.
  37       qset()->deallocate_buffer(_buf);
  38     } else {
  39       // We must NULL out the unused entries, then enqueue.
  40       for (size_t i = 0; i < _index; i += oopSize) {
  41         _buf[byte_index_to_index((int)i)] = NULL;
  42       }
  43       qset()->enqueue_complete_buffer(_buf);
  44     }
  45     _buf = NULL;
  46     _index = 0;
  47   }
  48 }
  49 
  50 
  51 static int byte_index_to_index(int ind) {
  52   assert((ind % oopSize) == 0, "Invariant.");
  53   return ind / oopSize;
  54 }
  55 
  56 static int index_to_byte_index(int byte_ind) {
  57   return byte_ind * oopSize;
  58 }
  59 
  60 void PtrQueue::enqueue_known_active(void* ptr) {
  61   assert(0 <= _index && _index <= _sz, "Invariant.");
  62   assert(_index == 0 || _buf != NULL, "invariant");
  63 
  64   while (_index == 0) {
  65     handle_zero_index();
  66   }
  67 
  68   assert(_index > 0, "postcondition");
  69   _index -= oopSize;
  70   _buf[byte_index_to_index((int)_index)] = ptr;
  71   assert(0 <= _index && _index <= _sz, "Invariant.");
  72 }
  73 
  74 void PtrQueue::locking_enqueue_completed_buffer(void** buf) {
  75   assert(_lock->owned_by_self(), "Required.");
  76 
  77   // We have to unlock _lock (which may be Shared_DirtyCardQ_lock) before
  78   // we acquire DirtyCardQ_CBL_mon inside enqeue_complete_buffer as they
  79   // have the same rank and we may get the "possible deadlock" message
  80   _lock->unlock();
  81 
  82   qset()->enqueue_complete_buffer(buf);
  83   // We must relock only because the caller will unlock, for the normal
  84   // case.
  85   _lock->lock_without_safepoint_check();
  86 }
  87 
  88 
  89 PtrQueueSet::PtrQueueSet(bool notify_when_complete) :
  90   _max_completed_queue(0),
  91   _cbl_mon(NULL), _fl_lock(NULL),
  92   _notify_when_complete(notify_when_complete),
  93   _sz(0),
  94   _completed_buffers_head(NULL),
  95   _completed_buffers_tail(NULL),
  96   _n_completed_buffers(0),
  97   _process_completed_threshold(0), _process_completed(false),
  98   _buf_free_list(NULL), _buf_free_list_sz(0)
  99 {
 100   _fl_owner = this;
 101 }
 102 
 103 void** PtrQueueSet::allocate_buffer() {
 104   assert(_sz > 0, "Didn't set a buffer size.");
 105   MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag);
 106   if (_fl_owner->_buf_free_list != NULL) {
 107     void** res = BufferNode::make_buffer_from_node(_fl_owner->_buf_free_list);
 108     _fl_owner->_buf_free_list = _fl_owner->_buf_free_list->next();
 109     _fl_owner->_buf_free_list_sz--;
 110     return res;
 111   } else {
 112     // Allocate space for the BufferNode in front of the buffer.
 113     char *b =  NEW_C_HEAP_ARRAY(char, _sz + BufferNode::aligned_size());
 114     return BufferNode::make_buffer_from_block(b);
 115   }
 116 }
 117 
 118 void PtrQueueSet::deallocate_buffer(void** buf) {
 119   assert(_sz > 0, "Didn't set a buffer size.");
 120   MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag);
 121   BufferNode *node = BufferNode::make_node_from_buffer(buf);
 122   node->set_next(_fl_owner->_buf_free_list);
 123   _fl_owner->_buf_free_list = node;
 124   _fl_owner->_buf_free_list_sz++;
 125 }
 126 
 127 void PtrQueueSet::reduce_free_list() {
 128   assert(_fl_owner == this, "Free list reduction is allowed only for the owner");
 129   // For now we'll adopt the strategy of deleting half.
 130   MutexLockerEx x(_fl_lock, Mutex::_no_safepoint_check_flag);
 131   size_t n = _buf_free_list_sz / 2;
 132   while (n > 0) {
 133     assert(_buf_free_list != NULL, "_buf_free_list_sz must be wrong.");
 134     void* b = BufferNode::make_block_from_node(_buf_free_list);
 135     _buf_free_list = _buf_free_list->next();
 136     FREE_C_HEAP_ARRAY(char, b);
 137     _buf_free_list_sz --;
 138     n--;
 139   }
 140 }
 141 
 142 void PtrQueue::handle_zero_index() {
 143   assert(0 == _index, "Precondition.");
 144   // This thread records the full buffer and allocates a new one (while
 145   // holding the lock if there is one).
 146   if (_buf != NULL) {
 147     if (_lock) {
 148       assert(_lock->owned_by_self(), "Required.");
 149 
 150       // The current PtrQ may be the shared dirty card queue and
 151       // may be being manipulated by more than one worker thread
 152       // during a pause. Since the enqueuing of the completed
 153       // buffer unlocks the Shared_DirtyCardQ_lock more than one
 154       // worker thread can 'race' on reading the shared queue attributes
 155       // (_buf and _index) and multiple threads can call into this
 156       // routine for the same buffer. This will cause the completed
 157       // buffer to be added to the CBL multiple times.
 158 
 159       // We "claim" the current buffer by caching value of _buf in
 160       // a local and clearing the field while holding _lock. When
 161       // _lock is released (while enqueueing the completed buffer)
 162       // the thread that acquires _lock will skip this code,
 163       // preventing the subsequent the multiple enqueue, and
 164       // install a newly allocated buffer below.
 165 
 166       void** buf = _buf;   // local pointer to completed buffer
 167       _buf = NULL;         // clear shared _buf field
 168 
 169       locking_enqueue_completed_buffer(buf);  // enqueue completed buffer
 170 
 171       // While the current thread was enqueuing the buffer another thread
 172       // may have a allocated a new buffer and inserted it into this pointer
 173       // queue. If that happens then we just return so that the current
 174       // thread doesn't overwrite the buffer allocated by the other thread
 175       // and potentially losing some dirtied cards.
 176 
 177       if (_buf != NULL) return;
 178     } else {
 179       if (qset()->process_or_enqueue_complete_buffer(_buf)) {
 180         // Recycle the buffer. No allocation.
 181         _sz = qset()->buffer_size();
 182         _index = _sz;
 183         return;
 184       }
 185     }
 186   }
 187   // Reallocate the buffer
 188   _buf = qset()->allocate_buffer();
 189   _sz = qset()->buffer_size();
 190   _index = _sz;
 191   assert(0 <= _index && _index <= _sz, "Invariant.");
 192 }
 193 
 194 bool PtrQueueSet::process_or_enqueue_complete_buffer(void** buf) {
 195   if (Thread::current()->is_Java_thread()) {
 196     // We don't lock. It is fine to be epsilon-precise here.
 197     if (_max_completed_queue == 0 || _max_completed_queue > 0 &&
 198         _n_completed_buffers >= _max_completed_queue + _completed_queue_padding) {
 199       bool b = mut_process_buffer(buf);
 200       if (b) {
 201         // True here means that the buffer hasn't been deallocated and the caller may reuse it.
 202         return true;
 203       }
 204     }
 205   }
 206   // The buffer will be enqueued. The caller will have to get a new one.
 207   enqueue_complete_buffer(buf);
 208   return false;
 209 }
 210 
 211 void PtrQueueSet::enqueue_complete_buffer(void** buf, size_t index) {
 212   MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
 213   BufferNode* cbn = BufferNode::new_from_buffer(buf);
 214   cbn->set_index(index);
 215   if (_completed_buffers_tail == NULL) {
 216     assert(_completed_buffers_head == NULL, "Well-formedness");
 217     _completed_buffers_head = cbn;
 218     _completed_buffers_tail = cbn;
 219   } else {
 220     _completed_buffers_tail->set_next(cbn);
 221     _completed_buffers_tail = cbn;
 222   }
 223   _n_completed_buffers++;
 224 
 225   if (!_process_completed && _process_completed_threshold >= 0 &&
 226       _n_completed_buffers >= _process_completed_threshold) {
 227     _process_completed = true;
 228     if (_notify_when_complete)
 229       _cbl_mon->notify();
 230   }
 231   debug_only(assert_completed_buffer_list_len_correct_locked());
 232 }
 233 
 234 int PtrQueueSet::completed_buffers_list_length() {
 235   int n = 0;
 236   BufferNode* cbn = _completed_buffers_head;
 237   while (cbn != NULL) {
 238     n++;
 239     cbn = cbn->next();
 240   }
 241   return n;
 242 }
 243 
 244 void PtrQueueSet::assert_completed_buffer_list_len_correct() {
 245   MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
 246   assert_completed_buffer_list_len_correct_locked();
 247 }
 248 
 249 void PtrQueueSet::assert_completed_buffer_list_len_correct_locked() {
 250   guarantee(completed_buffers_list_length() ==  _n_completed_buffers,
 251             "Completed buffer length is wrong.");
 252 }
 253 
 254 void PtrQueueSet::set_buffer_size(size_t sz) {
 255   assert(_sz == 0 && sz > 0, "Should be called only once.");
 256   _sz = sz * oopSize;
 257 }
 258 
 259 // Merge lists of buffers. Notify the processing threads.
 260 // The source queue is emptied as a result. The queues
 261 // must share the monitor.
 262 void PtrQueueSet::merge_bufferlists(PtrQueueSet *src) {
 263   assert(_cbl_mon == src->_cbl_mon, "Should share the same lock");
 264   MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
 265   if (_completed_buffers_tail == NULL) {
 266     assert(_completed_buffers_head == NULL, "Well-formedness");
 267     _completed_buffers_head = src->_completed_buffers_head;
 268     _completed_buffers_tail = src->_completed_buffers_tail;
 269   } else {
 270     assert(_completed_buffers_head != NULL, "Well formedness");
 271     if (src->_completed_buffers_head != NULL) {
 272       _completed_buffers_tail->set_next(src->_completed_buffers_head);
 273       _completed_buffers_tail = src->_completed_buffers_tail;
 274     }
 275   }
 276   _n_completed_buffers += src->_n_completed_buffers;
 277 
 278   src->_n_completed_buffers = 0;
 279   src->_completed_buffers_head = NULL;
 280   src->_completed_buffers_tail = NULL;
 281 
 282   assert(_completed_buffers_head == NULL && _completed_buffers_tail == NULL ||
 283          _completed_buffers_head != NULL && _completed_buffers_tail != NULL,
 284          "Sanity");
 285 }
 286 
 287 void PtrQueueSet::notify_if_necessary() {
 288   MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
 289   if (_n_completed_buffers >= _process_completed_threshold || _max_completed_queue == 0) {
 290     _process_completed = true;
 291     if (_notify_when_complete)
 292       _cbl_mon->notify();
 293   }
 294 }