1 /* 2 * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_G1_PTRQUEUE_HPP 26 #define SHARE_VM_GC_G1_PTRQUEUE_HPP 27 28 #include "memory/allocation.hpp" 29 #include "utilities/sizes.hpp" 30 31 // There are various techniques that require threads to be able to log 32 // addresses. For example, a generational write barrier might log 33 // the addresses of modified old-generation objects. This type supports 34 // this operation. 35 36 class BufferNode; 37 class PtrQueueSet; 38 class PtrQueue VALUE_OBJ_CLASS_SPEC { 39 friend class VMStructs; 40 41 // Noncopyable - not defined. 42 PtrQueue(const PtrQueue&); 43 PtrQueue& operator=(const PtrQueue&); 44 45 // The ptr queue set to which this queue belongs. 46 PtrQueueSet* const _qset; 47 48 // Whether updates should be logged. 49 bool _active; 50 51 // If true, the queue is permanent, and doesn't need to deallocate 52 // its buffer in the destructor (since that obtains a lock which may not 53 // be legally locked by then. 54 const bool _permanent; 55 56 // The (byte) index at which an object was last enqueued. Starts at 57 // capacity_in_bytes (indicating an empty buffer) and goes towards zero. 58 // Value is always pointer-size aligned. 59 size_t _index; 60 61 // Size of the current buffer, in bytes. 62 // Value is always pointer-size aligned. 63 size_t _capacity_in_bytes; 64 65 static const size_t _element_size = sizeof(void*); 66 67 // Get the capacity, in bytes. The capacity must have been set. 68 size_t capacity_in_bytes() const { 69 assert(_capacity_in_bytes > 0, "capacity not set"); 70 return _capacity_in_bytes; 71 } 72 73 void set_capacity(size_t entries) { 74 size_t byte_capacity = index_to_byte_index(entries); 75 assert(_capacity_in_bytes == 0 || _capacity_in_bytes == byte_capacity, 76 "changing capacity " SIZE_FORMAT " -> " SIZE_FORMAT, 77 _capacity_in_bytes, byte_capacity); 78 _capacity_in_bytes = byte_capacity; 79 } 80 81 static size_t byte_index_to_index(size_t ind) { 82 assert(is_aligned(ind, _element_size), "precondition"); 83 return ind / _element_size; 84 } 85 86 static size_t index_to_byte_index(size_t ind) { 87 return ind * _element_size; 88 } 89 90 protected: 91 // The buffer. 92 void** _buf; 93 94 size_t index() const { 95 return byte_index_to_index(_index); 96 } 97 98 void set_index(size_t new_index) { 99 size_t byte_index = index_to_byte_index(new_index); 100 assert(byte_index <= capacity_in_bytes(), "precondition"); 101 _index = byte_index; 102 } 103 104 size_t capacity() const { 105 return byte_index_to_index(capacity_in_bytes()); 106 } 107 108 // If there is a lock associated with this buffer, this is that lock. 109 Mutex* _lock; 110 111 PtrQueueSet* qset() { return _qset; } 112 bool is_permanent() const { return _permanent; } 113 114 // Process queue entries and release resources. 115 void flush_impl(); 116 117 // Initialize this queue to contain a null buffer, and be part of the 118 // given PtrQueueSet. 119 PtrQueue(PtrQueueSet* qset, bool permanent = false, bool active = false); 120 121 // Requires queue flushed or permanent. 122 ~PtrQueue(); 123 124 public: 125 126 // Associate a lock with a ptr queue. 127 void set_lock(Mutex* lock) { _lock = lock; } 128 129 // Forcibly set empty. 130 void reset() { 131 if (_buf != NULL) { 132 _index = capacity_in_bytes(); 133 } 134 } 135 136 void enqueue(volatile void* ptr) { 137 enqueue((void*)(ptr)); 138 } 139 140 // Enqueues the given "obj". 141 void enqueue(void* ptr) { 142 if (!_active) return; 143 else enqueue_known_active(ptr); 144 } 145 146 // This method is called when we're doing the zero index handling 147 // and gives a chance to the queues to do any pre-enqueueing 148 // processing they might want to do on the buffer. It should return 149 // true if the buffer should be enqueued, or false if enough 150 // entries were cleared from it so that it can be re-used. It should 151 // not return false if the buffer is still full (otherwise we can 152 // get into an infinite loop). 153 virtual bool should_enqueue_buffer() { return true; } 154 void handle_zero_index(); 155 void locking_enqueue_completed_buffer(BufferNode* node); 156 157 void enqueue_known_active(void* ptr); 158 159 // Return the size of the in-use region. 160 size_t size() const { 161 size_t result = 0; 162 if (_buf != NULL) { 163 assert(_index <= capacity_in_bytes(), "Invariant"); 164 result = byte_index_to_index(capacity_in_bytes() - _index); 165 } 166 return result; 167 } 168 169 bool is_empty() const { 170 return _buf == NULL || capacity_in_bytes() == _index; 171 } 172 173 // Set the "active" property of the queue to "b". An enqueue to an 174 // inactive thread is a no-op. Setting a queue to inactive resets its 175 // log to the empty state. 176 void set_active(bool b) { 177 _active = b; 178 if (!b && _buf != NULL) { 179 reset(); 180 } else if (b && _buf != NULL) { 181 assert(index() == capacity(), 182 "invariant: queues are empty when activated."); 183 } 184 } 185 186 bool is_active() const { return _active; } 187 188 // To support compiler. 189 190 protected: 191 template<typename Derived> 192 static ByteSize byte_offset_of_index() { 193 return byte_offset_of(Derived, _index); 194 } 195 196 static ByteSize byte_width_of_index() { return in_ByteSize(sizeof(size_t)); } 197 198 template<typename Derived> 199 static ByteSize byte_offset_of_buf() { 200 return byte_offset_of(Derived, _buf); 201 } 202 203 static ByteSize byte_width_of_buf() { return in_ByteSize(_element_size); } 204 205 template<typename Derived> 206 static ByteSize byte_offset_of_active() { 207 return byte_offset_of(Derived, _active); 208 } 209 210 static ByteSize byte_width_of_active() { return in_ByteSize(sizeof(bool)); } 211 212 }; 213 214 class BufferNode { 215 size_t _index; 216 BufferNode* _next; 217 void* _buffer[1]; // Pseudo flexible array member. 218 219 BufferNode() : _index(0), _next(NULL) { } 220 ~BufferNode() { } 221 222 static size_t buffer_offset() { 223 return offset_of(BufferNode, _buffer); 224 } 225 226 public: 227 BufferNode* next() const { return _next; } 228 void set_next(BufferNode* n) { _next = n; } 229 size_t index() const { return _index; } 230 void set_index(size_t i) { _index = i; } 231 232 // Allocate a new BufferNode with the "buffer" having size elements. 233 static BufferNode* allocate(size_t size); 234 235 // Free a BufferNode. 236 static void deallocate(BufferNode* node); 237 238 // Return the BufferNode containing the buffer, after setting its index. 239 static BufferNode* make_node_from_buffer(void** buffer, size_t index) { 240 BufferNode* node = 241 reinterpret_cast<BufferNode*>( 242 reinterpret_cast<char*>(buffer) - buffer_offset()); 243 node->set_index(index); 244 return node; 245 } 246 247 // Return the buffer for node. 248 static void** make_buffer_from_node(BufferNode *node) { 249 // &_buffer[0] might lead to index out of bounds warnings. 250 return reinterpret_cast<void**>( 251 reinterpret_cast<char*>(node) + buffer_offset()); 252 } 253 }; 254 255 // A PtrQueueSet represents resources common to a set of pointer queues. 256 // In particular, the individual queues allocate buffers from this shared 257 // set, and return completed buffers to the set. 258 // All these variables are are protected by the TLOQ_CBL_mon. XXX ??? 259 class PtrQueueSet VALUE_OBJ_CLASS_SPEC { 260 private: 261 // The size of all buffers in the set. 262 size_t _buffer_size; 263 264 protected: 265 Monitor* _cbl_mon; // Protects the fields below. 266 BufferNode* _completed_buffers_head; 267 BufferNode* _completed_buffers_tail; 268 size_t _n_completed_buffers; 269 int _process_completed_threshold; 270 volatile bool _process_completed; 271 272 // This (and the interpretation of the first element as a "next" 273 // pointer) are protected by the TLOQ_FL_lock. 274 Mutex* _fl_lock; 275 BufferNode* _buf_free_list; 276 size_t _buf_free_list_sz; 277 // Queue set can share a freelist. The _fl_owner variable 278 // specifies the owner. It is set to "this" by default. 279 PtrQueueSet* _fl_owner; 280 281 bool _all_active; 282 283 // If true, notify_all on _cbl_mon when the threshold is reached. 284 bool _notify_when_complete; 285 286 // Maximum number of elements allowed on completed queue: after that, 287 // enqueuer does the work itself. Zero indicates no maximum. 288 int _max_completed_queue; 289 size_t _completed_queue_padding; 290 291 size_t completed_buffers_list_length(); 292 void assert_completed_buffer_list_len_correct_locked(); 293 void assert_completed_buffer_list_len_correct(); 294 295 protected: 296 // A mutator thread does the the work of processing a buffer. 297 // Returns "true" iff the work is complete (and the buffer may be 298 // deallocated). 299 virtual bool mut_process_buffer(BufferNode* node) { 300 ShouldNotReachHere(); 301 return false; 302 } 303 304 // Create an empty ptr queue set. 305 PtrQueueSet(bool notify_when_complete = false); 306 ~PtrQueueSet(); 307 308 // Because of init-order concerns, we can't pass these as constructor 309 // arguments. 310 void initialize(Monitor* cbl_mon, 311 Mutex* fl_lock, 312 int process_completed_threshold, 313 int max_completed_queue, 314 PtrQueueSet *fl_owner = NULL); 315 316 public: 317 318 // Return the buffer for a BufferNode of size buffer_size(). 319 void** allocate_buffer(); 320 321 // Return an empty buffer to the free list. The node is required 322 // to have been allocated with a size of buffer_size(). 323 void deallocate_buffer(BufferNode* node); 324 325 // Declares that "buf" is a complete buffer. 326 void enqueue_complete_buffer(BufferNode* node); 327 328 // To be invoked by the mutator. 329 bool process_or_enqueue_complete_buffer(BufferNode* node); 330 331 bool completed_buffers_exist_dirty() { 332 return _n_completed_buffers > 0; 333 } 334 335 bool process_completed_buffers() { return _process_completed; } 336 void set_process_completed(bool x) { _process_completed = x; } 337 338 bool is_active() { return _all_active; } 339 340 // Set the buffer size. Should be called before any "enqueue" operation 341 // can be called. And should only be called once. 342 void set_buffer_size(size_t sz); 343 344 // Get the buffer size. Must have been set. 345 size_t buffer_size() const { 346 assert(_buffer_size > 0, "buffer size not set"); 347 return _buffer_size; 348 } 349 350 // Get/Set the number of completed buffers that triggers log processing. 351 void set_process_completed_threshold(int sz) { _process_completed_threshold = sz; } 352 int process_completed_threshold() const { return _process_completed_threshold; } 353 354 // Must only be called at a safe point. Indicates that the buffer free 355 // list size may be reduced, if that is deemed desirable. 356 void reduce_free_list(); 357 358 size_t completed_buffers_num() { return _n_completed_buffers; } 359 360 void merge_bufferlists(PtrQueueSet* src); 361 362 void set_max_completed_queue(int m) { _max_completed_queue = m; } 363 int max_completed_queue() { return _max_completed_queue; } 364 365 void set_completed_queue_padding(size_t padding) { _completed_queue_padding = padding; } 366 size_t completed_queue_padding() { return _completed_queue_padding; } 367 368 // Notify the consumer if the number of buffers crossed the threshold 369 void notify_if_necessary(); 370 }; 371 372 #endif // SHARE_VM_GC_G1_PTRQUEUE_HPP