1 /* 2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/shared/ptrQueue.hpp" 27 #include "memory/allocation.hpp" 28 #include "memory/allocation.inline.hpp" 29 #include "runtime/atomic.hpp" 30 #include "runtime/mutex.hpp" 31 #include "runtime/mutexLocker.hpp" 32 #include "runtime/thread.inline.hpp" 33 34 #include <new> 35 36 PtrQueue::PtrQueue(PtrQueueSet* qset, bool permanent, bool active) : 37 _qset(qset), 38 _active(active), 39 _permanent(permanent), 40 _index(0), 41 _capacity_in_bytes(0), 42 _buf(NULL), 43 _lock(NULL) 44 {} 45 46 PtrQueue::~PtrQueue() { 47 assert(_permanent || (_buf == NULL), "queue must be flushed before delete"); 48 } 49 50 void PtrQueue::flush_impl() { 51 if (_buf != NULL) { 52 BufferNode* node = BufferNode::make_node_from_buffer(_buf, index()); 53 if (is_empty()) { 54 // No work to do. 55 qset()->deallocate_buffer(node); 56 } else { 57 qset()->enqueue_complete_buffer(node); 58 } 59 _buf = NULL; 60 set_index(0); 61 } 62 } 63 64 65 void PtrQueue::enqueue_known_active(void* ptr) { 66 while (_index == 0) { 67 handle_zero_index(); 68 } 69 70 assert(_buf != NULL, "postcondition"); 71 assert(index() > 0, "postcondition"); 72 assert(index() <= capacity(), "invariant"); 73 _index -= _element_size; 74 _buf[index()] = ptr; 75 } 76 77 BufferNode* BufferNode::allocate(size_t size) { 78 size_t byte_size = size * sizeof(void*); 79 void* data = NEW_C_HEAP_ARRAY(char, buffer_offset() + byte_size, mtGC); 80 return new (data) BufferNode; 81 } 82 83 void BufferNode::deallocate(BufferNode* node) { 84 node->~BufferNode(); 85 FREE_C_HEAP_ARRAY(char, node); 86 } 87 88 BufferNode::Allocator::Allocator(size_t buffer_size, Mutex* lock) : 89 _buffer_size(buffer_size), 90 _lock(lock), 91 _free_list(NULL), 92 _free_count(0) 93 { 94 assert(lock != NULL, "precondition"); 95 } 96 97 BufferNode::Allocator::~Allocator() { 98 while (_free_list != NULL) { 99 BufferNode* node = _free_list; 100 _free_list = node->next(); 101 BufferNode::deallocate(node); 102 } 103 } 104 105 size_t BufferNode::Allocator::free_count() const { 106 return Atomic::load(&_free_count); 107 } 108 109 BufferNode* BufferNode::Allocator::allocate() { 110 BufferNode* node = NULL; 111 { 112 MutexLockerEx ml(_lock, Mutex::_no_safepoint_check_flag); 113 node = _free_list; 114 if (node != NULL) { 115 _free_list = node->next(); 116 --_free_count; 117 node->set_next(NULL); 118 node->set_index(0); 119 return node; 120 } 121 } 122 return BufferNode::allocate(_buffer_size); 123 } 124 125 void BufferNode::Allocator::release(BufferNode* node) { 126 MutexLockerEx ml(_lock, Mutex::_no_safepoint_check_flag); 127 node->set_next(_free_list); 128 _free_list = node; 129 ++_free_count; 130 } 131 132 void BufferNode::Allocator::reduce_free_list() { 133 BufferNode* head = NULL; 134 { 135 MutexLockerEx ml(_lock, Mutex::_no_safepoint_check_flag); 136 // For now, delete half. 137 size_t remove = _free_count / 2; 138 if (remove > 0) { 139 head = _free_list; 140 BufferNode* tail = head; 141 BufferNode* prev = NULL; 142 for (size_t i = 0; i < remove; ++i) { 143 assert(tail != NULL, "free list size is wrong"); 144 prev = tail; 145 tail = tail->next(); 146 } 147 assert(prev != NULL, "invariant"); 148 assert(prev->next() == tail, "invariant"); 149 prev->set_next(NULL); 150 _free_list = tail; 151 _free_count -= remove; 152 } 153 } 154 while (head != NULL) { 155 BufferNode* next = head->next(); 156 BufferNode::deallocate(head); 157 head = next; 158 } 159 } 160 161 PtrQueueSet::PtrQueueSet(bool notify_when_complete) : 162 _allocator(NULL), 163 _cbl_mon(NULL), 164 _completed_buffers_head(NULL), 165 _completed_buffers_tail(NULL), 166 _n_completed_buffers(0), 167 _process_completed_buffers_threshold(ProcessCompletedBuffersThresholdNever), 168 _process_completed(false), 169 _all_active(false), 170 _notify_when_complete(notify_when_complete), 171 _max_completed_buffers(MaxCompletedBuffersUnlimited), 172 _completed_buffers_padding(0) 173 {} 174 175 PtrQueueSet::~PtrQueueSet() { 176 // There are presently only a couple (derived) instances ever 177 // created, and they are permanent, so no harm currently done by 178 // doing nothing here. 179 } 180 181 void PtrQueueSet::initialize(Monitor* cbl_mon, 182 BufferNode::Allocator* allocator) { 183 assert(cbl_mon != NULL && allocator != NULL, "Init order issue?"); 184 _cbl_mon = cbl_mon; 185 _allocator = allocator; 186 } 187 188 void** PtrQueueSet::allocate_buffer() { 189 BufferNode* node = _allocator->allocate(); 190 return BufferNode::make_buffer_from_node(node); 191 } 192 193 void PtrQueueSet::deallocate_buffer(BufferNode* node) { 194 _allocator->release(node); 195 } 196 197 void PtrQueue::handle_zero_index() { 198 assert(index() == 0, "precondition"); 199 200 // This thread records the full buffer and allocates a new one (while 201 // holding the lock if there is one). 202 if (_buf != NULL) { 203 if (!should_enqueue_buffer()) { 204 assert(index() > 0, "the buffer can only be re-used if it's not full"); 205 return; 206 } 207 208 if (_lock) { 209 assert(_lock->owned_by_self(), "Required."); 210 211 BufferNode* node = BufferNode::make_node_from_buffer(_buf, index()); 212 _buf = NULL; // clear shared _buf field 213 214 qset()->enqueue_complete_buffer(node); 215 assert(_buf == NULL, "multiple enqueuers appear to be racing"); 216 } else { 217 BufferNode* node = BufferNode::make_node_from_buffer(_buf, index()); 218 if (qset()->process_or_enqueue_complete_buffer(node)) { 219 // Recycle the buffer. No allocation. 220 assert(_buf == BufferNode::make_buffer_from_node(node), "invariant"); 221 assert(capacity() == qset()->buffer_size(), "invariant"); 222 reset(); 223 return; 224 } 225 } 226 } 227 // Set capacity in case this is the first allocation. 228 set_capacity(qset()->buffer_size()); 229 // Allocate a new buffer. 230 _buf = qset()->allocate_buffer(); 231 reset(); 232 } 233 234 bool PtrQueueSet::process_or_enqueue_complete_buffer(BufferNode* node) { 235 if (Thread::current()->is_Java_thread()) { 236 // If the number of buffers exceeds the limit, make this Java 237 // thread do the processing itself. We don't lock to access 238 // buffer count or padding; it is fine to be imprecise here. The 239 // add of padding could overflow, which is treated as unlimited. 240 size_t limit = _max_completed_buffers + _completed_buffers_padding; 241 if ((_n_completed_buffers > limit) && (limit >= _max_completed_buffers)) { 242 if (mut_process_buffer(node)) { 243 // Successfully processed; return true to allow buffer reuse. 244 return true; 245 } 246 } 247 } 248 // The buffer will be enqueued. The caller will have to get a new one. 249 enqueue_complete_buffer(node); 250 return false; 251 } 252 253 void PtrQueueSet::enqueue_complete_buffer(BufferNode* cbn) { 254 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); 255 cbn->set_next(NULL); 256 if (_completed_buffers_tail == NULL) { 257 assert(_completed_buffers_head == NULL, "Well-formedness"); 258 _completed_buffers_head = cbn; 259 _completed_buffers_tail = cbn; 260 } else { 261 _completed_buffers_tail->set_next(cbn); 262 _completed_buffers_tail = cbn; 263 } 264 _n_completed_buffers++; 265 266 if (!_process_completed && 267 (_n_completed_buffers > _process_completed_buffers_threshold)) { 268 _process_completed = true; 269 if (_notify_when_complete) { 270 _cbl_mon->notify(); 271 } 272 } 273 DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked()); 274 } 275 276 size_t PtrQueueSet::completed_buffers_list_length() { 277 size_t n = 0; 278 BufferNode* cbn = _completed_buffers_head; 279 while (cbn != NULL) { 280 n++; 281 cbn = cbn->next(); 282 } 283 return n; 284 } 285 286 void PtrQueueSet::assert_completed_buffer_list_len_correct() { 287 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); 288 assert_completed_buffer_list_len_correct_locked(); 289 } 290 291 void PtrQueueSet::assert_completed_buffer_list_len_correct_locked() { 292 guarantee(completed_buffers_list_length() == _n_completed_buffers, 293 "Completed buffer length is wrong."); 294 } 295 296 // Merge lists of buffers. Notify the processing threads. 297 // The source queue is emptied as a result. The queues 298 // must share the monitor. 299 void PtrQueueSet::merge_bufferlists(PtrQueueSet *src) { 300 assert(_cbl_mon == src->_cbl_mon, "Should share the same lock"); 301 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); 302 if (_completed_buffers_tail == NULL) { 303 assert(_completed_buffers_head == NULL, "Well-formedness"); 304 _completed_buffers_head = src->_completed_buffers_head; 305 _completed_buffers_tail = src->_completed_buffers_tail; 306 } else { 307 assert(_completed_buffers_head != NULL, "Well formedness"); 308 if (src->_completed_buffers_head != NULL) { 309 _completed_buffers_tail->set_next(src->_completed_buffers_head); 310 _completed_buffers_tail = src->_completed_buffers_tail; 311 } 312 } 313 _n_completed_buffers += src->_n_completed_buffers; 314 315 src->_n_completed_buffers = 0; 316 src->_completed_buffers_head = NULL; 317 src->_completed_buffers_tail = NULL; 318 319 assert(_completed_buffers_head == NULL && _completed_buffers_tail == NULL || 320 _completed_buffers_head != NULL && _completed_buffers_tail != NULL, 321 "Sanity"); 322 } 323 324 void PtrQueueSet::notify_if_necessary() { 325 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); 326 if (_n_completed_buffers > _process_completed_buffers_threshold) { 327 _process_completed = true; 328 if (_notify_when_complete) 329 _cbl_mon->notify(); 330 } 331 }