1 /* 2 * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/shared/ptrQueue.hpp" 27 #include "logging/log.hpp" 28 #include "memory/allocation.hpp" 29 #include "memory/allocation.inline.hpp" 30 #include "runtime/atomic.hpp" 31 #include "runtime/mutex.hpp" 32 #include "runtime/mutexLocker.hpp" 33 #include "runtime/thread.inline.hpp" 34 #include "utilities/globalCounter.inline.hpp" 35 36 #include <new> 37 38 PtrQueue::PtrQueue(PtrQueueSet* qset, bool active) : 39 _qset(qset), 40 _active(active), 41 _index(0), 42 _capacity_in_bytes(index_to_byte_index(qset->buffer_size())), 43 _buf(NULL) 44 {} 45 46 PtrQueue::~PtrQueue() { 47 assert(_buf == NULL, "queue must be flushed before delete"); 48 } 49 50 void PtrQueue::flush_impl() { 51 if (_buf != NULL) { 52 BufferNode* node = BufferNode::make_node_from_buffer(_buf, index()); 53 if (is_empty()) { 54 // No work to do. 55 qset()->deallocate_buffer(node); 56 } else { 57 qset()->enqueue_completed_buffer(node); 58 } 59 _buf = NULL; 60 set_index(0); 61 } 62 } 63 64 void PtrQueue::enqueue_known_active(void* ptr) { 65 while (_index == 0) { 66 handle_zero_index(); 67 } 68 69 assert(_buf != NULL, "postcondition"); 70 assert(index() > 0, "postcondition"); 71 assert(index() <= capacity(), "invariant"); 72 _index -= _element_size; 73 _buf[index()] = ptr; 74 } 75 76 void PtrQueue::handle_zero_index() { 77 assert(index() == 0, "precondition"); 78 79 if (_buf != NULL) { 80 handle_completed_buffer(); 81 } else { 82 allocate_buffer(); 83 } 84 } 85 86 void PtrQueue::allocate_buffer() { 87 _buf = qset()->allocate_buffer(); 88 reset(); 89 } 90 91 void PtrQueue::enqueue_completed_buffer() { 92 assert(_buf != NULL, "precondition"); 93 BufferNode* node = BufferNode::make_node_from_buffer(_buf, index()); 94 qset()->enqueue_completed_buffer(node); 95 allocate_buffer(); 96 } 97 98 BufferNode* BufferNode::allocate(size_t size) { 99 size_t byte_size = size * sizeof(void*); 100 void* data = NEW_C_HEAP_ARRAY(char, buffer_offset() + byte_size, mtGC); 101 return new (data) BufferNode; 102 } 103 104 void BufferNode::deallocate(BufferNode* node) { 105 node->~BufferNode(); 106 FREE_C_HEAP_ARRAY(char, node); 107 } 108 109 BufferNode::Allocator::Allocator(const char* name, size_t buffer_size) : 110 _buffer_size(buffer_size), 111 _pending_list(), 112 _free_list(), 113 _pending_count(0), 114 _free_count(0), 115 _transfer_lock(false) 116 { 117 strncpy(_name, name, sizeof(_name) - 1); 118 _name[sizeof(_name) - 1] = '\0'; 119 } 120 121 BufferNode::Allocator::~Allocator() { 122 delete_list(_free_list.pop_all()); 123 delete_list(_pending_list.pop_all()); 124 } 125 126 void BufferNode::Allocator::delete_list(BufferNode* list) { 127 while (list != NULL) { 128 BufferNode* next = list->next(); 129 DEBUG_ONLY(list->set_next(NULL);) 130 BufferNode::deallocate(list); 131 list = next; 132 } 133 } 134 135 size_t BufferNode::Allocator::free_count() const { 136 return Atomic::load(&_free_count); 137 } 138 139 BufferNode* BufferNode::Allocator::allocate() { 140 BufferNode* node; 141 { 142 // Protect against ABA; see release(). 143 GlobalCounter::CriticalSection cs(Thread::current()); 144 node = _free_list.pop(); 145 } 146 if (node == NULL) { 147 node = BufferNode::allocate(_buffer_size); 148 } else { 149 // Decrement count after getting buffer from free list. This, along 150 // with incrementing count before adding to free list, ensures count 151 // never underflows. 152 size_t count = Atomic::sub(&_free_count, 1u); 153 assert((count + 1) != 0, "_free_count underflow"); 154 } 155 return node; 156 } 157 158 // To solve the ABA problem for lock-free stack pop, allocate does the 159 // pop inside a critical section, and release synchronizes on the 160 // critical sections before adding to the _free_list. But we don't 161 // want to make every release have to do a synchronize. Instead, we 162 // initially place released nodes on the _pending_list, and transfer 163 // them to the _free_list in batches. Only one transfer at a time is 164 // permitted, with a lock bit to control access to that phase. A 165 // transfer takes all the nodes from the _pending_list, synchronizes on 166 // the _free_list pops, and then adds the former pending nodes to the 167 // _free_list. While that's happening, other threads might be adding 168 // other nodes to the _pending_list, to be dealt with by some later 169 // transfer. 170 void BufferNode::Allocator::release(BufferNode* node) { 171 assert(node != NULL, "precondition"); 172 assert(node->next() == NULL, "precondition"); 173 174 // Desired minimum transfer batch size. There is relatively little 175 // importance to the specific number. It shouldn't be too big, else 176 // we're wasting space when the release rate is low. If the release 177 // rate is high, we might accumulate more than this before being 178 // able to start a new transfer, but that's okay. Also note that 179 // the allocation rate and the release rate are going to be fairly 180 // similar, due to how the buffers are used. 181 const size_t trigger_transfer = 10; 182 183 // Add to pending list. Update count first so no underflow in transfer. 184 size_t pending_count = Atomic::add(&_pending_count, 1u); 185 _pending_list.push(*node); 186 if (pending_count > trigger_transfer) { 187 try_transfer_pending(); 188 } 189 } 190 191 // Try to transfer nodes from _pending_list to _free_list, with a 192 // synchronization delay for any in-progress pops from the _free_list, 193 // to solve ABA there. Return true if performed a (possibly empty) 194 // transfer, false if blocked from doing so by some other thread's 195 // in-progress transfer. 196 bool BufferNode::Allocator::try_transfer_pending() { 197 // Attempt to claim the lock. 198 if (Atomic::load(&_transfer_lock) || // Skip CAS if likely to fail. 199 Atomic::cmpxchg(&_transfer_lock, false, true)) { 200 return false; 201 } 202 // Have the lock; perform the transfer. 203 204 // Claim all the pending nodes. 205 BufferNode* first = _pending_list.pop_all(); 206 if (first != NULL) { 207 // Prepare to add the claimed nodes, and update _pending_count. 208 BufferNode* last = first; 209 size_t count = 1; 210 for (BufferNode* next = first->next(); next != NULL; next = next->next()) { 211 last = next; 212 ++count; 213 } 214 Atomic::sub(&_pending_count, count); 215 216 // Wait for any in-progress pops, to avoid ABA for them. 217 GlobalCounter::write_synchronize(); 218 219 // Add synchronized nodes to _free_list. 220 // Update count first so no underflow in allocate(). 221 Atomic::add(&_free_count, count); 222 _free_list.prepend(*first, *last); 223 log_trace(gc, ptrqueue, freelist) 224 ("Transferred %s pending to free: " SIZE_FORMAT, name(), count); 225 } 226 Atomic::release_store(&_transfer_lock, false); 227 return true; 228 } 229 230 size_t BufferNode::Allocator::reduce_free_list(size_t remove_goal) { 231 try_transfer_pending(); 232 size_t removed = 0; 233 for ( ; removed < remove_goal; ++removed) { 234 BufferNode* node = _free_list.pop(); 235 if (node == NULL) break; 236 BufferNode::deallocate(node); 237 } 238 size_t new_count = Atomic::sub(&_free_count, removed); 239 log_debug(gc, ptrqueue, freelist) 240 ("Reduced %s free list by " SIZE_FORMAT " to " SIZE_FORMAT, 241 name(), removed, new_count); 242 return removed; 243 } 244 245 PtrQueueSet::PtrQueueSet(BufferNode::Allocator* allocator) : 246 _allocator(allocator), 247 _all_active(false) 248 {} 249 250 PtrQueueSet::~PtrQueueSet() {} 251 252 void** PtrQueueSet::allocate_buffer() { 253 BufferNode* node = _allocator->allocate(); 254 return BufferNode::make_buffer_from_node(node); 255 } 256 257 void PtrQueueSet::deallocate_buffer(BufferNode* node) { 258 _allocator->release(node); 259 }