1 /* 2 * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/shared/ptrQueue.hpp" 27 #include "logging/log.hpp" 28 #include "memory/allocation.hpp" 29 #include "memory/allocation.inline.hpp" 30 #include "runtime/atomic.hpp" 31 #include "runtime/mutex.hpp" 32 #include "runtime/mutexLocker.hpp" 33 #include "runtime/orderAccess.hpp" 34 #include "runtime/thread.inline.hpp" 35 #include "utilities/globalCounter.inline.hpp" 36 37 #include <new> 38 39 PtrQueue::PtrQueue(PtrQueueSet* qset, bool active) : 40 _qset(qset), 41 _active(active), 42 _index(0), 43 _capacity_in_bytes(index_to_byte_index(qset->buffer_size())), 44 _buf(NULL) 45 {} 46 47 PtrQueue::~PtrQueue() { 48 assert(_buf == NULL, "queue must be flushed before delete"); 49 } 50 51 void PtrQueue::flush_impl() { 52 if (_buf != NULL) { 53 BufferNode* node = BufferNode::make_node_from_buffer(_buf, index()); 54 if (is_empty()) { 55 // No work to do. 56 qset()->deallocate_buffer(node); 57 } else { 58 qset()->enqueue_completed_buffer(node); 59 } 60 _buf = NULL; 61 set_index(0); 62 } 63 } 64 65 void PtrQueue::enqueue_known_active(void* ptr) { 66 while (_index == 0) { 67 handle_zero_index(); 68 } 69 70 assert(_buf != NULL, "postcondition"); 71 assert(index() > 0, "postcondition"); 72 assert(index() <= capacity(), "invariant"); 73 _index -= _element_size; 74 _buf[index()] = ptr; 75 } 76 77 void PtrQueue::handle_zero_index() { 78 assert(index() == 0, "precondition"); 79 80 if (_buf != NULL) { 81 handle_completed_buffer(); 82 } else { 83 allocate_buffer(); 84 } 85 } 86 87 void PtrQueue::allocate_buffer() { 88 _buf = qset()->allocate_buffer(); 89 reset(); 90 } 91 92 void PtrQueue::enqueue_completed_buffer() { 93 assert(_buf != NULL, "precondition"); 94 BufferNode* node = BufferNode::make_node_from_buffer(_buf, index()); 95 qset()->enqueue_completed_buffer(node); 96 allocate_buffer(); 97 } 98 99 BufferNode* BufferNode::allocate(size_t size) { 100 size_t byte_size = size * sizeof(void*); 101 void* data = NEW_C_HEAP_ARRAY(char, buffer_offset() + byte_size, mtGC); 102 return new (data) BufferNode; 103 } 104 105 void BufferNode::deallocate(BufferNode* node) { 106 node->~BufferNode(); 107 FREE_C_HEAP_ARRAY(char, node); 108 } 109 110 BufferNode::Allocator::Allocator(const char* name, size_t buffer_size) : 111 _buffer_size(buffer_size), 112 _pending_list(), 113 _free_list(), 114 _pending_count(0), 115 _free_count(0), 116 _transfer_lock(false) 117 { 118 strncpy(_name, name, sizeof(_name) - 1); 119 _name[sizeof(_name) - 1] = '\0'; 120 } 121 122 BufferNode::Allocator::~Allocator() { 123 delete_list(_free_list.pop_all()); 124 delete_list(_pending_list.pop_all()); 125 } 126 127 void BufferNode::Allocator::delete_list(BufferNode* list) { 128 while (list != NULL) { 129 BufferNode* next = list->next(); 130 DEBUG_ONLY(list->set_next(NULL);) 131 BufferNode::deallocate(list); 132 list = next; 133 } 134 } 135 136 size_t BufferNode::Allocator::free_count() const { 137 return Atomic::load(&_free_count); 138 } 139 140 BufferNode* BufferNode::Allocator::allocate() { 141 BufferNode* node; 142 { 143 // Protect against ABA; see release(). 144 GlobalCounter::CriticalSection cs(Thread::current()); 145 node = _free_list.pop(); 146 } 147 if (node == NULL) { 148 node = BufferNode::allocate(_buffer_size); 149 } else { 150 // Decrement count after getting buffer from free list. This, along 151 // with incrementing count before adding to free list, ensures count 152 // never underflows. 153 size_t count = Atomic::sub(&_free_count, 1u); 154 assert((count + 1) != 0, "_free_count underflow"); 155 } 156 return node; 157 } 158 159 // To solve the ABA problem for lock-free stack pop, allocate does the 160 // pop inside a critical section, and release synchronizes on the 161 // critical sections before adding to the _free_list. But we don't 162 // want to make every release have to do a synchronize. Instead, we 163 // initially place released nodes on the _pending_list, and transfer 164 // them to the _free_list in batches. Only one transfer at a time is 165 // permitted, with a lock bit to control access to that phase. A 166 // transfer takes all the nodes from the _pending_list, synchronizes on 167 // the _free_list pops, and then adds the former pending nodes to the 168 // _free_list. While that's happening, other threads might be adding 169 // other nodes to the _pending_list, to be dealt with by some later 170 // transfer. 171 void BufferNode::Allocator::release(BufferNode* node) { 172 assert(node != NULL, "precondition"); 173 assert(node->next() == NULL, "precondition"); 174 175 // Desired minimum transfer batch size. There is relatively little 176 // importance to the specific number. It shouldn't be too big, else 177 // we're wasting space when the release rate is low. If the release 178 // rate is high, we might accumulate more than this before being 179 // able to start a new transfer, but that's okay. Also note that 180 // the allocation rate and the release rate are going to be fairly 181 // similar, due to how the buffers are used. 182 const size_t trigger_transfer = 10; 183 184 // Add to pending list. Update count first so no underflow in transfer. 185 size_t pending_count = Atomic::add(&_pending_count, 1u); 186 _pending_list.push(*node); 187 if (pending_count > trigger_transfer) { 188 try_transfer_pending(); 189 } 190 } 191 192 // Try to transfer nodes from _pending_list to _free_list, with a 193 // synchronization delay for any in-progress pops from the _free_list, 194 // to solve ABA there. Return true if performed a (possibly empty) 195 // transfer, false if blocked from doing so by some other thread's 196 // in-progress transfer. 197 bool BufferNode::Allocator::try_transfer_pending() { 198 // Attempt to claim the lock. 199 if (Atomic::load(&_transfer_lock) || // Skip CAS if likely to fail. 200 Atomic::cmpxchg(&_transfer_lock, false, true)) { 201 return false; 202 } 203 // Have the lock; perform the transfer. 204 205 // Claim all the pending nodes. 206 BufferNode* first = _pending_list.pop_all(); 207 if (first != NULL) { 208 // Prepare to add the claimed nodes, and update _pending_count. 209 BufferNode* last = first; 210 size_t count = 1; 211 for (BufferNode* next = first->next(); next != NULL; next = next->next()) { 212 last = next; 213 ++count; 214 } 215 Atomic::sub(&_pending_count, count); 216 217 // Wait for any in-progress pops, to avoid ABA for them. 218 GlobalCounter::write_synchronize(); 219 220 // Add synchronized nodes to _free_list. 221 // Update count first so no underflow in allocate(). 222 Atomic::add(&_free_count, count); 223 _free_list.prepend(*first, *last); 224 log_trace(gc, ptrqueue, freelist) 225 ("Transferred %s pending to free: " SIZE_FORMAT, name(), count); 226 } 227 Atomic::release_store(&_transfer_lock, false); 228 return true; 229 } 230 231 size_t BufferNode::Allocator::reduce_free_list(size_t remove_goal) { 232 try_transfer_pending(); 233 size_t removed = 0; 234 for ( ; removed < remove_goal; ++removed) { 235 BufferNode* node = _free_list.pop(); 236 if (node == NULL) break; 237 BufferNode::deallocate(node); 238 } 239 size_t new_count = Atomic::sub(&_free_count, removed); 240 log_debug(gc, ptrqueue, freelist) 241 ("Reduced %s free list by " SIZE_FORMAT " to " SIZE_FORMAT, 242 name(), removed, new_count); 243 return removed; 244 } 245 246 PtrQueueSet::PtrQueueSet(BufferNode::Allocator* allocator) : 247 _allocator(allocator), 248 _all_active(false) 249 {} 250 251 PtrQueueSet::~PtrQueueSet() {} 252 253 void** PtrQueueSet::allocate_buffer() { 254 BufferNode* node = _allocator->allocate(); 255 return BufferNode::make_buffer_from_node(node); 256 } 257 258 void PtrQueueSet::deallocate_buffer(BufferNode* node) { 259 _allocator->release(node); 260 }