1 /* 2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/ptrQueue.hpp" 27 #include "memory/allocation.hpp" 28 #include "memory/allocation.inline.hpp" 29 #include "runtime/mutex.hpp" 30 #include "runtime/mutexLocker.hpp" 31 #include "runtime/thread.inline.hpp" 32 33 #include <new> 34 35 PtrQueue::PtrQueue(PtrQueueSet* qset, bool permanent, bool active) : 36 _qset(qset), _buf(NULL), _index(0), _sz(0), _active(active), 37 _permanent(permanent), _lock(NULL) 38 {} 39 40 PtrQueue::~PtrQueue() { 41 assert(_permanent || (_buf == NULL), "queue must be flushed before delete"); 42 } 43 44 void PtrQueue::flush_impl() { 45 if (!_permanent && _buf != NULL) { 46 BufferNode* node = BufferNode::make_node_from_buffer(_buf, _index); 47 if (_index == _sz) { 48 // No work to do. 49 qset()->deallocate_buffer(node); 50 } else { 51 qset()->enqueue_complete_buffer(node); 52 } 53 _buf = NULL; 54 _index = 0; 55 } 56 } 57 58 59 void PtrQueue::enqueue_known_active(void* ptr) { 60 assert(_index <= _sz, "Invariant."); 61 assert(_index == 0 || _buf != NULL, "invariant"); 62 63 while (_index == 0) { 64 handle_zero_index(); 65 } 66 67 assert(_index > 0, "postcondition"); 68 _index -= sizeof(void*); 69 _buf[byte_index_to_index(_index)] = ptr; 70 assert(_index <= _sz, "Invariant."); 71 } 72 73 void PtrQueue::locking_enqueue_completed_buffer(BufferNode* node) { 74 assert(_lock->owned_by_self(), "Required."); 75 76 // We have to unlock _lock (which may be Shared_DirtyCardQ_lock) before 77 // we acquire DirtyCardQ_CBL_mon inside enqueue_complete_buffer as they 78 // have the same rank and we may get the "possible deadlock" message 79 _lock->unlock(); 80 81 qset()->enqueue_complete_buffer(node); 82 // We must relock only because the caller will unlock, for the normal 83 // case. 84 _lock->lock_without_safepoint_check(); 85 } 86 87 88 BufferNode* BufferNode::allocate(size_t byte_size) { 89 assert(byte_size > 0, "precondition"); 90 assert(is_size_aligned(byte_size, sizeof(void**)), 91 "Invalid buffer size " SIZE_FORMAT, byte_size); 92 void* data = NEW_C_HEAP_ARRAY(char, buffer_offset() + byte_size, mtGC); 93 return new (data) BufferNode; 94 } 95 96 void BufferNode::deallocate(BufferNode* node) { 97 node->~BufferNode(); 98 FREE_C_HEAP_ARRAY(char, node); 99 } 100 101 PtrQueueSet::PtrQueueSet(bool notify_when_complete) : 102 _max_completed_queue(0), 103 _cbl_mon(NULL), _fl_lock(NULL), 104 _notify_when_complete(notify_when_complete), 105 _sz(0), 106 _completed_buffers_head(NULL), 107 _completed_buffers_tail(NULL), 108 _n_completed_buffers(0), 109 _process_completed_threshold(0), _process_completed(false), 110 _buf_free_list(NULL), _buf_free_list_sz(0) 111 { 112 _fl_owner = this; 113 } 114 115 PtrQueueSet::~PtrQueueSet() { 116 // There are presently only a couple (derived) instances ever 117 // created, and they are permanent, so no harm currently done by 118 // doing nothing here. 119 } 120 121 void PtrQueueSet::initialize(Monitor* cbl_mon, 122 Mutex* fl_lock, 123 int process_completed_threshold, 124 int max_completed_queue, 125 PtrQueueSet *fl_owner) { 126 _max_completed_queue = max_completed_queue; 127 _process_completed_threshold = process_completed_threshold; 128 _completed_queue_padding = 0; 129 assert(cbl_mon != NULL && fl_lock != NULL, "Init order issue?"); 130 _cbl_mon = cbl_mon; 131 _fl_lock = fl_lock; 132 _fl_owner = (fl_owner != NULL) ? fl_owner : this; 133 } 134 135 void** PtrQueueSet::allocate_buffer() { 136 assert(_sz > 0, "Didn't set a buffer size."); 137 BufferNode* node = NULL; 138 { 139 MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag); 140 node = _fl_owner->_buf_free_list; 141 if (node != NULL) { 142 _fl_owner->_buf_free_list = node->next(); 143 _fl_owner->_buf_free_list_sz--; 144 } 145 } 146 if (node == NULL) { 147 node = BufferNode::allocate(_sz); 148 } else { 149 // Reinitialize buffer obtained from free list. 150 node->set_index(0); 151 node->set_next(NULL); 152 } 153 return BufferNode::make_buffer_from_node(node); 154 } 155 156 void PtrQueueSet::deallocate_buffer(BufferNode* node) { 157 assert(_sz > 0, "Didn't set a buffer size."); 158 MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag); 159 node->set_next(_fl_owner->_buf_free_list); 160 _fl_owner->_buf_free_list = node; 161 _fl_owner->_buf_free_list_sz++; 162 } 163 164 void PtrQueueSet::reduce_free_list() { 165 assert(_fl_owner == this, "Free list reduction is allowed only for the owner"); 166 // For now we'll adopt the strategy of deleting half. 167 MutexLockerEx x(_fl_lock, Mutex::_no_safepoint_check_flag); 168 size_t n = _buf_free_list_sz / 2; 169 for (size_t i = 0; i < n; ++i) { 170 assert(_buf_free_list != NULL, 171 "_buf_free_list_sz is wrong: " SIZE_FORMAT, _buf_free_list_sz); 172 BufferNode* node = _buf_free_list; 173 _buf_free_list = node->next(); 174 _buf_free_list_sz--; 175 BufferNode::deallocate(node); 176 } 177 } 178 179 void PtrQueue::handle_zero_index() { 180 assert(_index == 0, "Precondition."); 181 182 // This thread records the full buffer and allocates a new one (while 183 // holding the lock if there is one). 184 if (_buf != NULL) { 185 if (!should_enqueue_buffer()) { 186 assert(_index > 0, "the buffer can only be re-used if it's not full"); 187 return; 188 } 189 190 if (_lock) { 191 assert(_lock->owned_by_self(), "Required."); 192 193 // The current PtrQ may be the shared dirty card queue and 194 // may be being manipulated by more than one worker thread 195 // during a pause. Since the enqueueing of the completed 196 // buffer unlocks the Shared_DirtyCardQ_lock more than one 197 // worker thread can 'race' on reading the shared queue attributes 198 // (_buf and _index) and multiple threads can call into this 199 // routine for the same buffer. This will cause the completed 200 // buffer to be added to the CBL multiple times. 201 202 // We "claim" the current buffer by caching value of _buf in 203 // a local and clearing the field while holding _lock. When 204 // _lock is released (while enqueueing the completed buffer) 205 // the thread that acquires _lock will skip this code, 206 // preventing the subsequent the multiple enqueue, and 207 // install a newly allocated buffer below. 208 209 BufferNode* node = BufferNode::make_node_from_buffer(_buf, _index); 210 _buf = NULL; // clear shared _buf field 211 212 locking_enqueue_completed_buffer(node); // enqueue completed buffer 213 214 // While the current thread was enqueueing the buffer another thread 215 // may have a allocated a new buffer and inserted it into this pointer 216 // queue. If that happens then we just return so that the current 217 // thread doesn't overwrite the buffer allocated by the other thread 218 // and potentially losing some dirtied cards. 219 220 if (_buf != NULL) return; 221 } else { 222 BufferNode* node = BufferNode::make_node_from_buffer(_buf, _index); 223 if (qset()->process_or_enqueue_complete_buffer(node)) { 224 // Recycle the buffer. No allocation. 225 assert(_buf == BufferNode::make_buffer_from_node(node), "invariant"); 226 assert(_sz == qset()->buffer_size(), "invariant"); 227 _index = _sz; 228 return; 229 } 230 } 231 } 232 // Reallocate the buffer 233 _buf = qset()->allocate_buffer(); 234 _sz = qset()->buffer_size(); 235 _index = _sz; 236 } 237 238 bool PtrQueueSet::process_or_enqueue_complete_buffer(BufferNode* node) { 239 if (Thread::current()->is_Java_thread()) { 240 // We don't lock. It is fine to be epsilon-precise here. 241 if (_max_completed_queue == 0 || _max_completed_queue > 0 && 242 _n_completed_buffers >= _max_completed_queue + _completed_queue_padding) { 243 bool b = mut_process_buffer(node); 244 if (b) { 245 // True here means that the buffer hasn't been deallocated and the caller may reuse it. 246 return true; 247 } 248 } 249 } 250 // The buffer will be enqueued. The caller will have to get a new one. 251 enqueue_complete_buffer(node); 252 return false; 253 } 254 255 void PtrQueueSet::enqueue_complete_buffer(BufferNode* cbn) { 256 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); 257 cbn->set_next(NULL); 258 if (_completed_buffers_tail == NULL) { 259 assert(_completed_buffers_head == NULL, "Well-formedness"); 260 _completed_buffers_head = cbn; 261 _completed_buffers_tail = cbn; 262 } else { 263 _completed_buffers_tail->set_next(cbn); 264 _completed_buffers_tail = cbn; 265 } 266 _n_completed_buffers++; 267 268 if (!_process_completed && _process_completed_threshold >= 0 && 269 _n_completed_buffers >= (size_t)_process_completed_threshold) { 270 _process_completed = true; 271 if (_notify_when_complete) { 272 _cbl_mon->notify(); 273 } 274 } 275 DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked()); 276 } 277 278 size_t PtrQueueSet::completed_buffers_list_length() { 279 size_t n = 0; 280 BufferNode* cbn = _completed_buffers_head; 281 while (cbn != NULL) { 282 n++; 283 cbn = cbn->next(); 284 } 285 return n; 286 } 287 288 void PtrQueueSet::assert_completed_buffer_list_len_correct() { 289 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); 290 assert_completed_buffer_list_len_correct_locked(); 291 } 292 293 void PtrQueueSet::assert_completed_buffer_list_len_correct_locked() { 294 guarantee(completed_buffers_list_length() == _n_completed_buffers, 295 "Completed buffer length is wrong."); 296 } 297 298 void PtrQueueSet::set_buffer_size(size_t sz) { 299 assert(_sz == 0 && sz > 0, "Should be called only once."); 300 _sz = sz * sizeof(void*); 301 } 302 303 // Merge lists of buffers. Notify the processing threads. 304 // The source queue is emptied as a result. The queues 305 // must share the monitor. 306 void PtrQueueSet::merge_bufferlists(PtrQueueSet *src) { 307 assert(_cbl_mon == src->_cbl_mon, "Should share the same lock"); 308 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); 309 if (_completed_buffers_tail == NULL) { 310 assert(_completed_buffers_head == NULL, "Well-formedness"); 311 _completed_buffers_head = src->_completed_buffers_head; 312 _completed_buffers_tail = src->_completed_buffers_tail; 313 } else { 314 assert(_completed_buffers_head != NULL, "Well formedness"); 315 if (src->_completed_buffers_head != NULL) { 316 _completed_buffers_tail->set_next(src->_completed_buffers_head); 317 _completed_buffers_tail = src->_completed_buffers_tail; 318 } 319 } 320 _n_completed_buffers += src->_n_completed_buffers; 321 322 src->_n_completed_buffers = 0; 323 src->_completed_buffers_head = NULL; 324 src->_completed_buffers_tail = NULL; 325 326 assert(_completed_buffers_head == NULL && _completed_buffers_tail == NULL || 327 _completed_buffers_head != NULL && _completed_buffers_tail != NULL, 328 "Sanity"); 329 } 330 331 void PtrQueueSet::notify_if_necessary() { 332 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); 333 assert(_process_completed_threshold >= 0, "_process_completed is negative"); 334 if (_n_completed_buffers >= (size_t)_process_completed_threshold || _max_completed_queue == 0) { 335 _process_completed = true; 336 if (_notify_when_complete) 337 _cbl_mon->notify(); 338 } 339 }