1 /* 2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/shared/ptrQueue.hpp" 27 #include "memory/allocation.hpp" 28 #include "memory/allocation.inline.hpp" 29 #include "runtime/atomic.hpp" 30 #include "runtime/mutex.hpp" 31 #include "runtime/mutexLocker.hpp" 32 #include "runtime/thread.inline.hpp" 33 34 #include <new> 35 36 PtrQueue::PtrQueue(PtrQueueSet* qset, bool permanent, bool active) : 37 _qset(qset), 38 _active(active), 39 _permanent(permanent), 40 _index(0), 41 _capacity_in_bytes(0), 42 _buf(NULL), 43 _lock(NULL) 44 {} 45 46 PtrQueue::~PtrQueue() { 47 assert(_permanent || (_buf == NULL), "queue must be flushed before delete"); 48 } 49 50 void PtrQueue::flush_impl() { 51 if (_buf != NULL) { 52 BufferNode* node = BufferNode::make_node_from_buffer(_buf, index()); 53 if (is_empty()) { 54 // No work to do. 55 qset()->deallocate_buffer(node); 56 } else { 57 qset()->enqueue_completed_buffer(node); 58 } 59 _buf = NULL; 60 set_index(0); 61 } 62 } 63 64 65 void PtrQueue::enqueue_known_active(void* ptr) { 66 while (_index == 0) { 67 handle_zero_index(); 68 } 69 70 assert(_buf != NULL, "postcondition"); 71 assert(index() > 0, "postcondition"); 72 assert(index() <= capacity(), "invariant"); 73 _index -= _element_size; 74 _buf[index()] = ptr; 75 } 76 77 BufferNode* BufferNode::allocate(size_t size) { 78 size_t byte_size = size * sizeof(void*); 79 void* data = NEW_C_HEAP_ARRAY(char, buffer_offset() + byte_size, mtGC); 80 return new (data) BufferNode; 81 } 82 83 void BufferNode::deallocate(BufferNode* node) { 84 node->~BufferNode(); 85 FREE_C_HEAP_ARRAY(char, node); 86 } 87 88 BufferNode::Allocator::Allocator(size_t buffer_size, Mutex* lock) : 89 _buffer_size(buffer_size), 90 _lock(lock), 91 _free_list(NULL), 92 _free_count(0) 93 { 94 assert(lock != NULL, "precondition"); 95 } 96 97 BufferNode::Allocator::~Allocator() { 98 while (_free_list != NULL) { 99 BufferNode* node = _free_list; 100 _free_list = node->next(); 101 BufferNode::deallocate(node); 102 } 103 } 104 105 size_t BufferNode::Allocator::free_count() const { 106 return Atomic::load(&_free_count); 107 } 108 109 BufferNode* BufferNode::Allocator::allocate() { 110 BufferNode* node = NULL; 111 { 112 MutexLockerEx ml(_lock, Mutex::_no_safepoint_check_flag); 113 node = _free_list; 114 if (node != NULL) { 115 _free_list = node->next(); 116 --_free_count; 117 node->set_next(NULL); 118 node->set_index(0); 119 return node; 120 } 121 } 122 return BufferNode::allocate(_buffer_size); 123 } 124 125 void BufferNode::Allocator::release(BufferNode* node) { 126 MutexLockerEx ml(_lock, Mutex::_no_safepoint_check_flag); 127 node->set_next(_free_list); 128 _free_list = node; 129 ++_free_count; 130 } 131 132 void BufferNode::Allocator::reduce_free_list() { 133 BufferNode* head = NULL; 134 { 135 MutexLockerEx ml(_lock, Mutex::_no_safepoint_check_flag); 136 // For now, delete half. 137 size_t remove = _free_count / 2; 138 if (remove > 0) { 139 head = _free_list; 140 BufferNode* tail = head; 141 BufferNode* prev = NULL; 142 for (size_t i = 0; i < remove; ++i) { 143 assert(tail != NULL, "free list size is wrong"); 144 prev = tail; 145 tail = tail->next(); 146 } 147 assert(prev != NULL, "invariant"); 148 assert(prev->next() == tail, "invariant"); 149 prev->set_next(NULL); 150 _free_list = tail; 151 _free_count -= remove; 152 } 153 } 154 while (head != NULL) { 155 BufferNode* next = head->next(); 156 BufferNode::deallocate(head); 157 head = next; 158 } 159 } 160 161 PtrQueueSet::PtrQueueSet(bool notify_when_complete) : 162 _allocator(NULL), 163 _cbl_mon(NULL), 164 _completed_buffers_head(NULL), 165 _completed_buffers_tail(NULL), 166 _n_completed_buffers(0), 167 _process_completed_buffers_threshold(ProcessCompletedBuffersThresholdNever), 168 _process_completed_buffers(false), 169 _notify_when_complete(notify_when_complete), 170 _max_completed_buffers(MaxCompletedBuffersUnlimited), 171 _completed_buffers_padding(0), 172 _all_active(false) 173 {} 174 175 PtrQueueSet::~PtrQueueSet() { 176 // There are presently only a couple (derived) instances ever 177 // created, and they are permanent, so no harm currently done by 178 // doing nothing here. 179 } 180 181 void PtrQueueSet::initialize(Monitor* cbl_mon, 182 BufferNode::Allocator* allocator) { 183 assert(cbl_mon != NULL && allocator != NULL, "Init order issue?"); 184 _cbl_mon = cbl_mon; 185 _allocator = allocator; 186 } 187 188 void** PtrQueueSet::allocate_buffer() { 189 BufferNode* node = _allocator->allocate(); 190 return BufferNode::make_buffer_from_node(node); 191 } 192 193 void PtrQueueSet::deallocate_buffer(BufferNode* node) { 194 _allocator->release(node); 195 } 196 197 void PtrQueue::handle_zero_index() { 198 assert(index() == 0, "precondition"); 199 200 // This thread records the full buffer and allocates a new one (while 201 // holding the lock if there is one). 202 if (_buf != NULL) { 203 if (!should_enqueue_buffer()) { 204 assert(index() > 0, "the buffer can only be re-used if it's not full"); 205 return; 206 } 207 208 if (_lock) { 209 assert(_lock->owned_by_self(), "Required."); 210 211 BufferNode* node = BufferNode::make_node_from_buffer(_buf, index()); 212 _buf = NULL; // clear shared _buf field 213 214 qset()->enqueue_completed_buffer(node); 215 assert(_buf == NULL, "multiple enqueuers appear to be racing"); 216 } else { 217 BufferNode* node = BufferNode::make_node_from_buffer(_buf, index()); 218 if (qset()->process_or_enqueue_completed_buffer(node)) { 219 // Recycle the buffer. No allocation. 220 assert(_buf == BufferNode::make_buffer_from_node(node), "invariant"); 221 assert(capacity() == qset()->buffer_size(), "invariant"); 222 reset(); 223 return; 224 } 225 } 226 } 227 // Set capacity in case this is the first allocation. 228 set_capacity(qset()->buffer_size()); 229 // Allocate a new buffer. 230 _buf = qset()->allocate_buffer(); 231 reset(); 232 } 233 234 bool PtrQueueSet::process_or_enqueue_completed_buffer(BufferNode* node) { 235 if (Thread::current()->is_Java_thread()) { 236 // If the number of buffers exceeds the limit, make this Java 237 // thread do the processing itself. We don't lock to access 238 // buffer count or padding; it is fine to be imprecise here. The 239 // add of padding could overflow, which is treated as unlimited. 240 size_t limit = _max_completed_buffers + _completed_buffers_padding; 241 if ((_n_completed_buffers > limit) && (limit >= _max_completed_buffers)) { 242 if (mut_process_buffer(node)) { 243 // Successfully processed; return true to allow buffer reuse. 244 return true; 245 } 246 } 247 } 248 // The buffer will be enqueued. The caller will have to get a new one. 249 enqueue_completed_buffer(node); 250 return false; 251 } 252 253 void PtrQueueSet::enqueue_completed_buffer(BufferNode* cbn) { 254 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); 255 cbn->set_next(NULL); 256 if (_completed_buffers_tail == NULL) { 257 assert(_completed_buffers_head == NULL, "Well-formedness"); 258 _completed_buffers_head = cbn; 259 _completed_buffers_tail = cbn; 260 } else { 261 _completed_buffers_tail->set_next(cbn); 262 _completed_buffers_tail = cbn; 263 } 264 _n_completed_buffers++; 265 266 if (!_process_completed_buffers && 267 (_n_completed_buffers > _process_completed_buffers_threshold)) { 268 _process_completed_buffers = true; 269 if (_notify_when_complete) { 270 _cbl_mon->notify(); 271 } 272 } 273 assert_completed_buffers_list_len_correct_locked(); 274 } 275 276 BufferNode* PtrQueueSet::get_completed_buffer(size_t stop_at) { 277 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); 278 279 if (_n_completed_buffers <= stop_at) { 280 return NULL; 281 } 282 283 assert(_n_completed_buffers > 0, "invariant"); 284 assert(_completed_buffers_head != NULL, "invariant"); 285 assert(_completed_buffers_tail != NULL, "invariant"); 286 287 BufferNode* bn = _completed_buffers_head; 288 _n_completed_buffers--; 289 _completed_buffers_head = bn->next(); 290 if (_completed_buffers_head == NULL) { 291 assert(_n_completed_buffers == 0, "invariant"); 292 _completed_buffers_tail = NULL; 293 _process_completed_buffers = false; 294 } 295 assert_completed_buffers_list_len_correct_locked(); 296 bn->set_next(NULL); 297 return bn; 298 } 299 300 void PtrQueueSet::abandon_completed_buffers() { 301 BufferNode* buffers_to_delete = NULL; 302 { 303 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); 304 buffers_to_delete = _completed_buffers_head; 305 _completed_buffers_head = NULL; 306 _completed_buffers_tail = NULL; 307 _n_completed_buffers = 0; 308 _process_completed_buffers = false; 309 } 310 while (buffers_to_delete != NULL) { 311 BufferNode* bn = buffers_to_delete; 312 buffers_to_delete = bn->next(); 313 bn->set_next(NULL); 314 deallocate_buffer(bn); 315 } 316 } 317 318 #ifdef ASSERT 319 320 void PtrQueueSet::assert_completed_buffers_list_len_correct_locked() { 321 assert_lock_strong(_cbl_mon); 322 size_t n = 0; 323 for (BufferNode* bn = _completed_buffers_head; bn != NULL; bn = bn->next()) { 324 ++n; 325 } 326 assert(n == _n_completed_buffers, 327 "Completed buffer length is wrong: counted: " SIZE_FORMAT 328 ", expected: " SIZE_FORMAT, n, _n_completed_buffers); 329 } 330 331 #endif // ASSERT 332 333 // Merge lists of buffers. Notify the processing threads. 334 // The source queue is emptied as a result. The queues 335 // must share the monitor. 336 void PtrQueueSet::merge_bufferlists(PtrQueueSet *src) { 337 assert(_cbl_mon == src->_cbl_mon, "Should share the same lock"); 338 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); 339 if (_completed_buffers_tail == NULL) { 340 assert(_completed_buffers_head == NULL, "Well-formedness"); 341 _completed_buffers_head = src->_completed_buffers_head; 342 _completed_buffers_tail = src->_completed_buffers_tail; 343 } else { 344 assert(_completed_buffers_head != NULL, "Well formedness"); 345 if (src->_completed_buffers_head != NULL) { 346 _completed_buffers_tail->set_next(src->_completed_buffers_head); 347 _completed_buffers_tail = src->_completed_buffers_tail; 348 } 349 } 350 _n_completed_buffers += src->_n_completed_buffers; 351 352 src->_n_completed_buffers = 0; 353 src->_completed_buffers_head = NULL; 354 src->_completed_buffers_tail = NULL; 355 src->_process_completed_buffers = false; 356 357 assert(_completed_buffers_head == NULL && _completed_buffers_tail == NULL || 358 _completed_buffers_head != NULL && _completed_buffers_tail != NULL, 359 "Sanity"); 360 assert_completed_buffers_list_len_correct_locked(); 361 } 362 363 void PtrQueueSet::notify_if_necessary() { 364 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); 365 if (_n_completed_buffers > _process_completed_buffers_threshold) { 366 _process_completed_buffers = true; 367 if (_notify_when_complete) 368 _cbl_mon->notify(); 369 } 370 }