1 /* 2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/shared/ptrQueue.hpp" 27 #include "memory/allocation.hpp" 28 #include "memory/allocation.inline.hpp" 29 #include "runtime/atomic.hpp" 30 #include "runtime/mutex.hpp" 31 #include "runtime/mutexLocker.hpp" 32 #include "runtime/thread.inline.hpp" 33 34 #include <new> 35 36 PtrQueue::PtrQueue(PtrQueueSet* qset, bool permanent, bool active) : 37 _qset(qset), 38 _active(active), 39 _permanent(permanent), 40 _index(0), 41 _capacity_in_bytes(0), 42 _buf(NULL), 43 _lock(NULL) 44 {} 45 46 PtrQueue::~PtrQueue() { 47 assert(_permanent || (_buf == NULL), "queue must be flushed before delete"); 48 } 49 50 void PtrQueue::flush_impl() { 51 if (_buf != NULL) { 52 BufferNode* node = BufferNode::make_node_from_buffer(_buf, index()); 53 if (is_empty()) { 54 // No work to do. 55 qset()->deallocate_buffer(node); 56 } else { 57 qset()->enqueue_complete_buffer(node); 58 } 59 _buf = NULL; 60 set_index(0); 61 } 62 } 63 64 65 void PtrQueue::enqueue_known_active(void* ptr) { 66 while (_index == 0) { 67 handle_zero_index(); 68 } 69 70 assert(_buf != NULL, "postcondition"); 71 assert(index() > 0, "postcondition"); 72 assert(index() <= capacity(), "invariant"); 73 _index -= _element_size; 74 _buf[index()] = ptr; 75 } 76 77 void PtrQueue::locking_enqueue_completed_buffer(BufferNode* node) { 78 assert(_lock->owned_by_self(), "Required."); 79 qset()->enqueue_complete_buffer(node); 80 } 81 82 83 BufferNode* BufferNode::allocate(size_t size) { 84 size_t byte_size = size * sizeof(void*); 85 void* data = NEW_C_HEAP_ARRAY(char, buffer_offset() + byte_size, mtGC); 86 return new (data) BufferNode; 87 } 88 89 void BufferNode::deallocate(BufferNode* node) { 90 node->~BufferNode(); 91 FREE_C_HEAP_ARRAY(char, node); 92 } 93 94 BufferNode::Allocator::Allocator(size_t buffer_size, Mutex* lock) : 95 _buffer_size(buffer_size), 96 _lock(lock), 97 _free_list(NULL), 98 _free_count(0) 99 { 100 assert(lock != NULL, "precondition"); 101 } 102 103 BufferNode::Allocator::~Allocator() { 104 while (_free_list != NULL) { 105 BufferNode* node = _free_list; 106 _free_list = node->next(); 107 BufferNode::deallocate(node); 108 } 109 } 110 111 size_t BufferNode::Allocator::free_count() const { 112 return Atomic::load(&_free_count); 113 } 114 115 BufferNode* BufferNode::Allocator::allocate() { 116 BufferNode* node = NULL; 117 { 118 MutexLockerEx ml(_lock, Mutex::_no_safepoint_check_flag); 119 node = _free_list; 120 if (node != NULL) { 121 _free_list = node->next(); 122 --_free_count; 123 node->set_next(NULL); 124 node->set_index(0); 125 return node; 126 } 127 } 128 return BufferNode::allocate(_buffer_size); 129 } 130 131 void BufferNode::Allocator::release(BufferNode* node) { 132 MutexLockerEx ml(_lock, Mutex::_no_safepoint_check_flag); 133 node->set_next(_free_list); 134 _free_list = node; 135 ++_free_count; 136 } 137 138 void BufferNode::Allocator::reduce_free_list() { 139 BufferNode* head = NULL; 140 { 141 MutexLockerEx ml(_lock, Mutex::_no_safepoint_check_flag); 142 // For now, delete half. 143 size_t remove = _free_count / 2; 144 if (remove > 0) { 145 head = _free_list; 146 BufferNode* tail = head; 147 BufferNode* prev = NULL; 148 for (size_t i = 0; i < remove; ++i, prev = tail, tail = tail->next()) { 149 assert(tail != NULL, "free list size is wrong"); 150 } 151 assert(prev != NULL, "invariant"); 152 assert(prev->next() == tail, "invariant"); 153 prev->set_next(NULL); 154 _free_list = tail; 155 _free_count -= remove; 156 } 157 } 158 while (head != NULL) { 159 BufferNode* next = head->next(); 160 BufferNode::deallocate(head); 161 head = next; 162 } 163 } 164 165 PtrQueueSet::PtrQueueSet(bool notify_when_complete) : 166 _allocator(NULL), 167 _cbl_mon(NULL), 168 _completed_buffers_head(NULL), 169 _completed_buffers_tail(NULL), 170 _n_completed_buffers(0), 171 _process_completed_threshold(0), 172 _process_completed(false), 173 _all_active(false), 174 _notify_when_complete(notify_when_complete), 175 _max_completed_queue(0), 176 _completed_queue_padding(0) 177 {} 178 179 PtrQueueSet::~PtrQueueSet() { 180 // There are presently only a couple (derived) instances ever 181 // created, and they are permanent, so no harm currently done by 182 // doing nothing here. 183 } 184 185 void PtrQueueSet::initialize(Monitor* cbl_mon, 186 BufferNode::Allocator* allocator, 187 int process_completed_threshold, 188 int max_completed_queue) { 189 _max_completed_queue = max_completed_queue; 190 _process_completed_threshold = process_completed_threshold; 191 _completed_queue_padding = 0; 192 assert(cbl_mon != NULL && allocator != NULL, "Init order issue?"); 193 _cbl_mon = cbl_mon; 194 _allocator = allocator; 195 } 196 197 void** PtrQueueSet::allocate_buffer() { 198 BufferNode* node = _allocator->allocate(); 199 return BufferNode::make_buffer_from_node(node); 200 } 201 202 void PtrQueueSet::deallocate_buffer(BufferNode* node) { 203 _allocator->release(node); 204 } 205 206 void PtrQueue::handle_zero_index() { 207 assert(index() == 0, "precondition"); 208 209 // This thread records the full buffer and allocates a new one (while 210 // holding the lock if there is one). 211 if (_buf != NULL) { 212 if (!should_enqueue_buffer()) { 213 assert(index() > 0, "the buffer can only be re-used if it's not full"); 214 return; 215 } 216 217 if (_lock) { 218 assert(_lock->owned_by_self(), "Required."); 219 220 BufferNode* node = BufferNode::make_node_from_buffer(_buf, index()); 221 _buf = NULL; // clear shared _buf field 222 223 locking_enqueue_completed_buffer(node); // enqueue completed buffer 224 assert(_buf == NULL, "multiple enqueuers appear to be racing"); 225 } else { 226 BufferNode* node = BufferNode::make_node_from_buffer(_buf, index()); 227 if (qset()->process_or_enqueue_complete_buffer(node)) { 228 // Recycle the buffer. No allocation. 229 assert(_buf == BufferNode::make_buffer_from_node(node), "invariant"); 230 assert(capacity() == qset()->buffer_size(), "invariant"); 231 reset(); 232 return; 233 } 234 } 235 } 236 // Set capacity in case this is the first allocation. 237 set_capacity(qset()->buffer_size()); 238 // Allocate a new buffer. 239 _buf = qset()->allocate_buffer(); 240 reset(); 241 } 242 243 bool PtrQueueSet::process_or_enqueue_complete_buffer(BufferNode* node) { 244 if (Thread::current()->is_Java_thread()) { 245 // We don't lock. It is fine to be epsilon-precise here. 246 if (_max_completed_queue == 0 || 247 (_max_completed_queue > 0 && 248 _n_completed_buffers >= _max_completed_queue + _completed_queue_padding)) { 249 bool b = mut_process_buffer(node); 250 if (b) { 251 // True here means that the buffer hasn't been deallocated and the caller may reuse it. 252 return true; 253 } 254 } 255 } 256 // The buffer will be enqueued. The caller will have to get a new one. 257 enqueue_complete_buffer(node); 258 return false; 259 } 260 261 void PtrQueueSet::enqueue_complete_buffer(BufferNode* cbn) { 262 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); 263 cbn->set_next(NULL); 264 if (_completed_buffers_tail == NULL) { 265 assert(_completed_buffers_head == NULL, "Well-formedness"); 266 _completed_buffers_head = cbn; 267 _completed_buffers_tail = cbn; 268 } else { 269 _completed_buffers_tail->set_next(cbn); 270 _completed_buffers_tail = cbn; 271 } 272 _n_completed_buffers++; 273 274 if (!_process_completed && _process_completed_threshold >= 0 && 275 _n_completed_buffers >= (size_t)_process_completed_threshold) { 276 _process_completed = true; 277 if (_notify_when_complete) { 278 _cbl_mon->notify(); 279 } 280 } 281 DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked()); 282 } 283 284 size_t PtrQueueSet::completed_buffers_list_length() { 285 size_t n = 0; 286 BufferNode* cbn = _completed_buffers_head; 287 while (cbn != NULL) { 288 n++; 289 cbn = cbn->next(); 290 } 291 return n; 292 } 293 294 void PtrQueueSet::assert_completed_buffer_list_len_correct() { 295 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); 296 assert_completed_buffer_list_len_correct_locked(); 297 } 298 299 void PtrQueueSet::assert_completed_buffer_list_len_correct_locked() { 300 guarantee(completed_buffers_list_length() == _n_completed_buffers, 301 "Completed buffer length is wrong."); 302 } 303 304 // Merge lists of buffers. Notify the processing threads. 305 // The source queue is emptied as a result. The queues 306 // must share the monitor. 307 void PtrQueueSet::merge_bufferlists(PtrQueueSet *src) { 308 assert(_cbl_mon == src->_cbl_mon, "Should share the same lock"); 309 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); 310 if (_completed_buffers_tail == NULL) { 311 assert(_completed_buffers_head == NULL, "Well-formedness"); 312 _completed_buffers_head = src->_completed_buffers_head; 313 _completed_buffers_tail = src->_completed_buffers_tail; 314 } else { 315 assert(_completed_buffers_head != NULL, "Well formedness"); 316 if (src->_completed_buffers_head != NULL) { 317 _completed_buffers_tail->set_next(src->_completed_buffers_head); 318 _completed_buffers_tail = src->_completed_buffers_tail; 319 } 320 } 321 _n_completed_buffers += src->_n_completed_buffers; 322 323 src->_n_completed_buffers = 0; 324 src->_completed_buffers_head = NULL; 325 src->_completed_buffers_tail = NULL; 326 327 assert(_completed_buffers_head == NULL && _completed_buffers_tail == NULL || 328 _completed_buffers_head != NULL && _completed_buffers_tail != NULL, 329 "Sanity"); 330 } 331 332 void PtrQueueSet::notify_if_necessary() { 333 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); 334 assert(_process_completed_threshold >= 0, "_process_completed is negative"); 335 if (_n_completed_buffers >= (size_t)_process_completed_threshold || _max_completed_queue == 0) { 336 _process_completed = true; 337 if (_notify_when_complete) 338 _cbl_mon->notify(); 339 } 340 }