1 /* 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_PTRQUEUE_HPP 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_PTRQUEUE_HPP 27 28 #include "memory/allocation.hpp" 29 #include "utilities/sizes.hpp" 30 31 // There are various techniques that require threads to be able to log 32 // addresses. For example, a generational write barrier might log 33 // the addresses of modified old-generation objects. This type supports 34 // this operation. 35 36 // The definition of placement operator new(size_t, void*) in the <new>. 37 #include <new> 38 39 class PtrQueueSet; 40 class PtrQueue VALUE_OBJ_CLASS_SPEC { 41 friend class VMStructs; 42 43 protected: 44 // The ptr queue set to which this queue belongs. 45 PtrQueueSet* _qset; 46 47 // Whether updates should be logged. 48 bool _active; 49 50 // The buffer. 51 void** _buf; 52 // The index at which an object was last enqueued. Starts at "_sz" 53 // (indicating an empty buffer) and goes towards zero. 54 size_t _index; 55 56 // The size of the buffer. 57 size_t _sz; 58 59 // If true, the queue is permanent, and doesn't need to deallocate 60 // its buffer in the destructor (since that obtains a lock which may not 61 // be legally locked by then. 62 bool _perm; 63 64 // If there is a lock associated with this buffer, this is that lock. 65 Mutex* _lock; 66 67 PtrQueueSet* qset() { return _qset; } 68 69 public: 70 // Initialize this queue to contain a null buffer, and be part of the 71 // given PtrQueueSet. 72 PtrQueue(PtrQueueSet* qset, bool perm = false, bool active = false); 73 // Release any contained resources. 74 virtual void flush(); 75 // Calls flush() when destroyed. 76 ~PtrQueue() { flush(); } 77 78 // Associate a lock with a ptr queue. 79 void set_lock(Mutex* lock) { _lock = lock; } 80 81 void reset() { if (_buf != NULL) _index = _sz; } 82 83 void enqueue(volatile void* ptr) { 84 enqueue((void*)(ptr)); 85 } 86 87 // Enqueues the given "obj". 88 void enqueue(void* ptr) { 89 if (!_active) return; 90 else enqueue_known_active(ptr); 91 } 92 93 // This method is called when we're doing the zero index handling 94 // and gives a chance to the queues to do any pre-enqueueing 95 // processing they might want to do on the buffer. It should return 96 // true if the buffer should be enqueued, or false if enough 97 // entries were cleared from it so that it can be re-used. It should 98 // not return false if the buffer is still full (otherwise we can 99 // get into an infinite loop). 100 virtual bool should_enqueue_buffer() { return true; } 101 void handle_zero_index(); 102 void locking_enqueue_completed_buffer(void** buf); 103 104 void enqueue_known_active(void* ptr); 105 106 size_t size() { 107 assert(_sz >= _index, "Invariant."); 108 return _buf == NULL ? 0 : _sz - _index; 109 } 110 111 bool is_empty() { 112 return _buf == NULL || _sz == _index; 113 } 114 115 // Set the "active" property of the queue to "b". An enqueue to an 116 // inactive thread is a no-op. Setting a queue to inactive resets its 117 // log to the empty state. 118 void set_active(bool b) { 119 _active = b; 120 if (!b && _buf != NULL) { 121 _index = _sz; 122 } else if (b && _buf != NULL) { 123 assert(_index == _sz, "invariant: queues are empty when activated."); 124 } 125 } 126 127 bool is_active() { return _active; } 128 129 static int byte_index_to_index(int ind) { 130 assert((ind % oopSize) == 0, "Invariant."); 131 return ind / oopSize; 132 } 133 134 static int index_to_byte_index(int byte_ind) { 135 return byte_ind * oopSize; 136 } 137 138 // To support compiler. 139 static ByteSize byte_offset_of_index() { 140 return byte_offset_of(PtrQueue, _index); 141 } 142 static ByteSize byte_width_of_index() { return in_ByteSize(sizeof(size_t)); } 143 144 static ByteSize byte_offset_of_buf() { 145 return byte_offset_of(PtrQueue, _buf); 146 } 147 static ByteSize byte_width_of_buf() { return in_ByteSize(sizeof(void*)); } 148 149 static ByteSize byte_offset_of_active() { 150 return byte_offset_of(PtrQueue, _active); 151 } 152 static ByteSize byte_width_of_active() { return in_ByteSize(sizeof(bool)); } 153 154 }; 155 156 class BufferNode { 157 size_t _index; 158 BufferNode* _next; 159 public: 160 BufferNode() : _index(0), _next(NULL) { } 161 BufferNode* next() const { return _next; } 162 void set_next(BufferNode* n) { _next = n; } 163 size_t index() const { return _index; } 164 void set_index(size_t i) { _index = i; } 165 166 // Align the size of the structure to the size of the pointer 167 static size_t aligned_size() { 168 static const size_t alignment = round_to(sizeof(BufferNode), sizeof(void*)); 169 return alignment; 170 } 171 172 // BufferNode is allocated before the buffer. 173 // The chunk of memory that holds both of them is a block. 174 175 // Produce a new BufferNode given a buffer. 176 static BufferNode* new_from_buffer(void** buf) { 177 return new (make_block_from_buffer(buf)) BufferNode; 178 } 179 180 // The following are the required conversion routines: 181 static BufferNode* make_node_from_buffer(void** buf) { 182 return (BufferNode*)make_block_from_buffer(buf); 183 } 184 static void** make_buffer_from_node(BufferNode *node) { 185 return make_buffer_from_block(node); 186 } 187 static void* make_block_from_node(BufferNode *node) { 188 return (void*)node; 189 } 190 static void** make_buffer_from_block(void* p) { 191 return (void**)((char*)p + aligned_size()); 192 } 193 static void* make_block_from_buffer(void** p) { 194 return (void*)((char*)p - aligned_size()); 195 } 196 }; 197 198 // A PtrQueueSet represents resources common to a set of pointer queues. 199 // In particular, the individual queues allocate buffers from this shared 200 // set, and return completed buffers to the set. 201 // All these variables are are protected by the TLOQ_CBL_mon. XXX ??? 202 class PtrQueueSet VALUE_OBJ_CLASS_SPEC { 203 protected: 204 Monitor* _cbl_mon; // Protects the fields below. 205 BufferNode* _completed_buffers_head; 206 BufferNode* _completed_buffers_tail; 207 int _n_completed_buffers; 208 int _process_completed_threshold; 209 volatile bool _process_completed; 210 211 // This (and the interpretation of the first element as a "next" 212 // pointer) are protected by the TLOQ_FL_lock. 213 Mutex* _fl_lock; 214 BufferNode* _buf_free_list; 215 size_t _buf_free_list_sz; 216 // Queue set can share a freelist. The _fl_owner variable 217 // specifies the owner. It is set to "this" by default. 218 PtrQueueSet* _fl_owner; 219 220 // The size of all buffers in the set. 221 size_t _sz; 222 223 bool _all_active; 224 225 // If true, notify_all on _cbl_mon when the threshold is reached. 226 bool _notify_when_complete; 227 228 // Maximum number of elements allowed on completed queue: after that, 229 // enqueuer does the work itself. Zero indicates no maximum. 230 int _max_completed_queue; 231 int _completed_queue_padding; 232 233 int completed_buffers_list_length(); 234 void assert_completed_buffer_list_len_correct_locked(); 235 void assert_completed_buffer_list_len_correct(); 236 237 protected: 238 // A mutator thread does the the work of processing a buffer. 239 // Returns "true" iff the work is complete (and the buffer may be 240 // deallocated). 241 virtual bool mut_process_buffer(void** buf) { 242 ShouldNotReachHere(); 243 return false; 244 } 245 246 public: 247 // Create an empty ptr queue set. 248 PtrQueueSet(bool notify_when_complete = false); 249 250 // Because of init-order concerns, we can't pass these as constructor 251 // arguments. 252 void initialize(Monitor* cbl_mon, Mutex* fl_lock, 253 int process_completed_threshold, 254 int max_completed_queue, 255 PtrQueueSet *fl_owner = NULL) { 256 _max_completed_queue = max_completed_queue; 257 _process_completed_threshold = process_completed_threshold; 258 _completed_queue_padding = 0; 259 assert(cbl_mon != NULL && fl_lock != NULL, "Init order issue?"); 260 _cbl_mon = cbl_mon; 261 _fl_lock = fl_lock; 262 _fl_owner = (fl_owner != NULL) ? fl_owner : this; 263 } 264 265 // Return an empty oop array of size _sz (required to be non-zero). 266 void** allocate_buffer(); 267 268 // Return an empty buffer to the free list. The "buf" argument is 269 // required to be a pointer to the head of an array of length "_sz". 270 void deallocate_buffer(void** buf); 271 272 // Declares that "buf" is a complete buffer. 273 void enqueue_complete_buffer(void** buf, size_t index = 0); 274 275 // To be invoked by the mutator. 276 bool process_or_enqueue_complete_buffer(void** buf); 277 278 bool completed_buffers_exist_dirty() { 279 return _n_completed_buffers > 0; 280 } 281 282 bool process_completed_buffers() { return _process_completed; } 283 void set_process_completed(bool x) { _process_completed = x; } 284 285 bool is_active() { return _all_active; } 286 287 // Set the buffer size. Should be called before any "enqueue" operation 288 // can be called. And should only be called once. 289 void set_buffer_size(size_t sz); 290 291 // Get the buffer size. 292 size_t buffer_size() { return _sz; } 293 294 // Get/Set the number of completed buffers that triggers log processing. 295 void set_process_completed_threshold(int sz) { _process_completed_threshold = sz; } 296 int process_completed_threshold() const { return _process_completed_threshold; } 297 298 // Must only be called at a safe point. Indicates that the buffer free 299 // list size may be reduced, if that is deemed desirable. 300 void reduce_free_list(); 301 302 int completed_buffers_num() { return _n_completed_buffers; } 303 304 void merge_bufferlists(PtrQueueSet* src); 305 306 void set_max_completed_queue(int m) { _max_completed_queue = m; } 307 int max_completed_queue() { return _max_completed_queue; } 308 309 void set_completed_queue_padding(int padding) { _completed_queue_padding = padding; } 310 int completed_queue_padding() { return _completed_queue_padding; } 311 312 // Notify the consumer if the number of buffers crossed the threshold 313 void notify_if_necessary(); 314 }; 315 316 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_PTRQUEUE_HPP