1 /* 2 * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_GC_SHARED_PTRQUEUE_HPP 26 #define SHARE_GC_SHARED_PTRQUEUE_HPP 27 28 #include "memory/padded.hpp" 29 #include "utilities/align.hpp" 30 #include "utilities/debug.hpp" 31 #include "utilities/lockFreeStack.hpp" 32 #include "utilities/macros.hpp" 33 #include "utilities/sizes.hpp" 34 35 class Mutex; 36 class Monitor; 37 38 // There are various techniques that require threads to be able to log 39 // addresses. For example, a generational write barrier might log 40 // the addresses of modified old-generation objects. This type supports 41 // this operation. 42 43 class BufferNode; 44 class PtrQueueSet; 45 class PtrQueue { 46 friend class VMStructs; 47 48 NONCOPYABLE(PtrQueue); 49 50 // The ptr queue set to which this queue belongs. 51 PtrQueueSet* const _qset; 52 53 // Whether updates should be logged. 54 bool _active; 55 56 // The (byte) index at which an object was last enqueued. Starts at 57 // capacity_in_bytes (indicating an empty buffer) and goes towards zero. 58 // Value is always pointer-size aligned. 59 size_t _index; 60 61 // Size of the current buffer, in bytes. 62 // Value is always pointer-size aligned. 63 size_t _capacity_in_bytes; 64 65 static const size_t _element_size = sizeof(void*); 66 67 // Get the capacity, in bytes. The capacity must have been set. 68 size_t capacity_in_bytes() const { 69 assert(_capacity_in_bytes > 0, "capacity not set"); 70 return _capacity_in_bytes; 71 } 72 73 static size_t byte_index_to_index(size_t ind) { 74 assert(is_aligned(ind, _element_size), "precondition"); 75 return ind / _element_size; 76 } 77 78 static size_t index_to_byte_index(size_t ind) { 79 return ind * _element_size; 80 } 81 82 protected: 83 // The buffer. 84 void** _buf; 85 86 size_t index() const { 87 return byte_index_to_index(_index); 88 } 89 90 void set_index(size_t new_index) { 91 size_t byte_index = index_to_byte_index(new_index); 92 assert(byte_index <= capacity_in_bytes(), "precondition"); 93 _index = byte_index; 94 } 95 96 size_t capacity() const { 97 return byte_index_to_index(capacity_in_bytes()); 98 } 99 100 PtrQueueSet* qset() const { return _qset; } 101 102 // Process queue entries and release resources. 103 void flush_impl(); 104 105 // Process (some of) the buffer and leave it in place for further use, 106 // or enqueue the buffer and allocate a new one. 107 virtual void handle_completed_buffer() = 0; 108 109 void allocate_buffer(); 110 111 // Enqueue the current buffer in the qset and allocate a new buffer. 112 void enqueue_completed_buffer(); 113 114 // Initialize this queue to contain a null buffer, and be part of the 115 // given PtrQueueSet. 116 PtrQueue(PtrQueueSet* qset, bool active = false); 117 118 // Requires queue flushed. 119 ~PtrQueue(); 120 121 public: 122 123 // Forcibly set empty. 124 void reset() { 125 if (_buf != NULL) { 126 _index = capacity_in_bytes(); 127 } 128 } 129 130 void enqueue(volatile void* ptr) { 131 enqueue((void*)(ptr)); 132 } 133 134 // Enqueues the given "obj". 135 void enqueue(void* ptr) { 136 if (!_active) return; 137 else enqueue_known_active(ptr); 138 } 139 140 void handle_zero_index(); 141 142 void enqueue_known_active(void* ptr); 143 144 // Return the size of the in-use region. 145 size_t size() const { 146 size_t result = 0; 147 if (_buf != NULL) { 148 assert(_index <= capacity_in_bytes(), "Invariant"); 149 result = byte_index_to_index(capacity_in_bytes() - _index); 150 } 151 return result; 152 } 153 154 bool is_empty() const { 155 return _buf == NULL || capacity_in_bytes() == _index; 156 } 157 158 // Set the "active" property of the queue to "b". An enqueue to an 159 // inactive thread is a no-op. Setting a queue to inactive resets its 160 // log to the empty state. 161 void set_active(bool b) { 162 _active = b; 163 if (!b && _buf != NULL) { 164 reset(); 165 } else if (b && _buf != NULL) { 166 assert(index() == capacity(), 167 "invariant: queues are empty when activated."); 168 } 169 } 170 171 bool is_active() const { return _active; } 172 173 // To support compiler. 174 175 protected: 176 template<typename Derived> 177 static ByteSize byte_offset_of_index() { 178 return byte_offset_of(Derived, _index); 179 } 180 181 static ByteSize byte_width_of_index() { return in_ByteSize(sizeof(size_t)); } 182 183 template<typename Derived> 184 static ByteSize byte_offset_of_buf() { 185 return byte_offset_of(Derived, _buf); 186 } 187 188 static ByteSize byte_width_of_buf() { return in_ByteSize(_element_size); } 189 190 template<typename Derived> 191 static ByteSize byte_offset_of_active() { 192 return byte_offset_of(Derived, _active); 193 } 194 195 static ByteSize byte_width_of_active() { return in_ByteSize(sizeof(bool)); } 196 197 }; 198 199 class BufferNode { 200 size_t _index; 201 BufferNode* volatile _next; 202 void* _buffer[1]; // Pseudo flexible array member. 203 204 BufferNode() : _index(0), _next(NULL) { } 205 ~BufferNode() { } 206 207 NONCOPYABLE(BufferNode); 208 209 static size_t buffer_offset() { 210 return offset_of(BufferNode, _buffer); 211 } 212 213 static BufferNode* volatile* next_ptr(BufferNode& bn) { return &bn._next; } 214 215 // Allocate a new BufferNode with the "buffer" having size elements. 216 static BufferNode* allocate(size_t size); 217 218 // Free a BufferNode. 219 static void deallocate(BufferNode* node); 220 221 public: 222 typedef LockFreeStack<BufferNode, &next_ptr> Stack; 223 224 BufferNode* next() const { return _next; } 225 void set_next(BufferNode* n) { _next = n; } 226 size_t index() const { return _index; } 227 void set_index(size_t i) { _index = i; } 228 229 // Return the BufferNode containing the buffer, after setting its index. 230 static BufferNode* make_node_from_buffer(void** buffer, size_t index) { 231 BufferNode* node = 232 reinterpret_cast<BufferNode*>( 233 reinterpret_cast<char*>(buffer) - buffer_offset()); 234 node->set_index(index); 235 return node; 236 } 237 238 // Return the buffer for node. 239 static void** make_buffer_from_node(BufferNode *node) { 240 // &_buffer[0] might lead to index out of bounds warnings. 241 return reinterpret_cast<void**>( 242 reinterpret_cast<char*>(node) + buffer_offset()); 243 } 244 245 class Allocator; // Free-list based allocator. 246 class TestSupport; // Unit test support. 247 }; 248 249 // Allocation is based on a lock-free free list of nodes, linked through 250 // BufferNode::_next (see BufferNode::Stack). To solve the ABA problem, 251 // popping a node from the free list is performed within a GlobalCounter 252 // critical section, and pushing nodes onto the free list is done after 253 // a GlobalCounter synchronization associated with the nodes to be pushed. 254 // This is documented behavior so that other parts of the node life-cycle 255 // can depend on and make use of it too. 256 class BufferNode::Allocator { 257 friend class TestSupport; 258 259 // Since we don't expect many instances, and measured >15% speedup 260 // on stress gtest, padding seems like a good tradeoff here. 261 #define DECLARE_PADDED_MEMBER(Id, Type, Name) \ 262 Type Name; DEFINE_PAD_MINUS_SIZE(Id, DEFAULT_CACHE_LINE_SIZE, sizeof(Type)) 263 264 const size_t _buffer_size; 265 char _name[DEFAULT_CACHE_LINE_SIZE - sizeof(size_t)]; // Use name as padding. 266 DECLARE_PADDED_MEMBER(1, Stack, _pending_list); 267 DECLARE_PADDED_MEMBER(2, Stack, _free_list); 268 DECLARE_PADDED_MEMBER(3, volatile size_t, _pending_count); 269 DECLARE_PADDED_MEMBER(4, volatile size_t, _free_count); 270 DECLARE_PADDED_MEMBER(5, volatile bool, _transfer_lock); 271 272 #undef DECLARE_PADDED_MEMBER 273 274 void delete_list(BufferNode* list); 275 bool try_transfer_pending(); 276 277 NONCOPYABLE(Allocator); 278 279 public: 280 Allocator(const char* name, size_t buffer_size); 281 ~Allocator(); 282 283 const char* name() const { return _name; } 284 size_t buffer_size() const { return _buffer_size; } 285 size_t free_count() const; 286 BufferNode* allocate(); 287 void release(BufferNode* node); 288 289 // Deallocate some of the available buffers. remove_goal is the target 290 // number to remove. Returns the number actually deallocated, which may 291 // be less than the goal if there were fewer available. 292 size_t reduce_free_list(size_t remove_goal); 293 }; 294 295 // A PtrQueueSet represents resources common to a set of pointer queues. 296 // In particular, the individual queues allocate buffers from this shared 297 // set, and return completed buffers to the set. 298 class PtrQueueSet { 299 BufferNode::Allocator* _allocator; 300 301 NONCOPYABLE(PtrQueueSet); 302 303 protected: 304 bool _all_active; 305 306 // Create an empty ptr queue set. 307 PtrQueueSet(BufferNode::Allocator* allocator); 308 ~PtrQueueSet(); 309 310 public: 311 312 // Return the associated BufferNode allocator. 313 BufferNode::Allocator* allocator() const { return _allocator; } 314 315 // Return the buffer for a BufferNode of size buffer_size(). 316 void** allocate_buffer(); 317 318 // Return an empty buffer to the free list. The node is required 319 // to have been allocated with a size of buffer_size(). 320 void deallocate_buffer(BufferNode* node); 321 322 // A completed buffer is a buffer the mutator is finished with, and 323 // is ready to be processed by the collector. It need not be full. 324 325 // Adds node to the completed buffer list. 326 virtual void enqueue_completed_buffer(BufferNode* node) = 0; 327 328 bool is_active() { return _all_active; } 329 330 size_t buffer_size() const { 331 return _allocator->buffer_size(); 332 } 333 }; 334 335 #endif // SHARE_GC_SHARED_PTRQUEUE_HPP