1 /*
   2  * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_GC_SHARED_PTRQUEUE_HPP
  26 #define SHARE_GC_SHARED_PTRQUEUE_HPP
  27 
  28 #include "utilities/align.hpp"
  29 #include "utilities/sizes.hpp"
  30 
  31 class Mutex;
  32 
  33 // There are various techniques that require threads to be able to log
  34 // addresses.  For example, a generational write barrier might log
  35 // the addresses of modified old-generation objects.  This type supports
  36 // this operation.
  37 
  38 class BufferNode;
  39 class PtrQueueSet;
  40 class PtrQueue {
  41   friend class VMStructs;
  42 
  43   // Noncopyable - not defined.
  44   PtrQueue(const PtrQueue&);
  45   PtrQueue& operator=(const PtrQueue&);
  46 
  47   // The ptr queue set to which this queue belongs.
  48   PtrQueueSet* const _qset;
  49 
  50   // Whether updates should be logged.
  51   bool _active;
  52 
  53   // If true, the queue is permanent, and doesn't need to deallocate
  54   // its buffer in the destructor (since that obtains a lock which may not
  55   // be legally locked by then.
  56   const bool _permanent;
  57 
  58   // The (byte) index at which an object was last enqueued.  Starts at
  59   // capacity_in_bytes (indicating an empty buffer) and goes towards zero.
  60   // Value is always pointer-size aligned.
  61   size_t _index;
  62 
  63   // Size of the current buffer, in bytes.
  64   // Value is always pointer-size aligned.
  65   size_t _capacity_in_bytes;
  66 
  67   static const size_t _element_size = sizeof(void*);
  68 
  69   // Get the capacity, in bytes.  The capacity must have been set.
  70   size_t capacity_in_bytes() const {
  71     assert(_capacity_in_bytes > 0, "capacity not set");
  72     return _capacity_in_bytes;
  73   }
  74 
  75   void set_capacity(size_t entries) {
  76     size_t byte_capacity = index_to_byte_index(entries);
  77     assert(_capacity_in_bytes == 0 || _capacity_in_bytes == byte_capacity,
  78            "changing capacity " SIZE_FORMAT " -> " SIZE_FORMAT,
  79            _capacity_in_bytes, byte_capacity);
  80     _capacity_in_bytes = byte_capacity;
  81   }
  82 
  83   static size_t byte_index_to_index(size_t ind) {
  84     assert(is_aligned(ind, _element_size), "precondition");
  85     return ind / _element_size;
  86   }
  87 
  88   static size_t index_to_byte_index(size_t ind) {
  89     return ind * _element_size;
  90   }
  91 
  92 protected:
  93   // The buffer.
  94   void** _buf;
  95 
  96   size_t index() const {
  97     return byte_index_to_index(_index);
  98   }
  99 
 100   void set_index(size_t new_index) {
 101     size_t byte_index = index_to_byte_index(new_index);
 102     assert(byte_index <= capacity_in_bytes(), "precondition");
 103     _index = byte_index;
 104   }
 105 
 106   size_t capacity() const {
 107     return byte_index_to_index(capacity_in_bytes());
 108   }
 109 
 110   // If there is a lock associated with this buffer, this is that lock.
 111   Mutex* _lock;
 112 
 113   PtrQueueSet* qset() { return _qset; }
 114   bool is_permanent() const { return _permanent; }
 115 
 116   // Process queue entries and release resources.
 117   void flush_impl();
 118 
 119   // Initialize this queue to contain a null buffer, and be part of the
 120   // given PtrQueueSet.
 121   PtrQueue(PtrQueueSet* qset, bool permanent = false, bool active = false);
 122 
 123   // Requires queue flushed or permanent.
 124   ~PtrQueue();
 125 
 126 public:
 127 
 128   // Associate a lock with a ptr queue.
 129   void set_lock(Mutex* lock) { _lock = lock; }
 130 
 131   // Forcibly set empty.
 132   void reset() {
 133     if (_buf != NULL) {
 134       _index = capacity_in_bytes();
 135     }
 136   }
 137 
 138   void enqueue(volatile void* ptr) {
 139     enqueue((void*)(ptr));
 140   }
 141 
 142   // Enqueues the given "obj".
 143   void enqueue(void* ptr) {
 144     if (!_active) return;
 145     else enqueue_known_active(ptr);
 146   }
 147 
 148   // This method is called when we're doing the zero index handling
 149   // and gives a chance to the queues to do any pre-enqueueing
 150   // processing they might want to do on the buffer. It should return
 151   // true if the buffer should be enqueued, or false if enough
 152   // entries were cleared from it so that it can be re-used. It should
 153   // not return false if the buffer is still full (otherwise we can
 154   // get into an infinite loop).
 155   virtual bool should_enqueue_buffer() { return true; }
 156   void handle_zero_index();
 157 
 158   void enqueue_known_active(void* ptr);
 159 
 160   // Return the size of the in-use region.
 161   size_t size() const {
 162     size_t result = 0;
 163     if (_buf != NULL) {
 164       assert(_index <= capacity_in_bytes(), "Invariant");
 165       result = byte_index_to_index(capacity_in_bytes() - _index);
 166     }
 167     return result;
 168   }
 169 
 170   bool is_empty() const {
 171     return _buf == NULL || capacity_in_bytes() == _index;
 172   }
 173 
 174   // Set the "active" property of the queue to "b".  An enqueue to an
 175   // inactive thread is a no-op.  Setting a queue to inactive resets its
 176   // log to the empty state.
 177   void set_active(bool b) {
 178     _active = b;
 179     if (!b && _buf != NULL) {
 180       reset();
 181     } else if (b && _buf != NULL) {
 182       assert(index() == capacity(),
 183              "invariant: queues are empty when activated.");
 184     }
 185   }
 186 
 187   bool is_active() const { return _active; }
 188 
 189   // To support compiler.
 190 
 191 protected:
 192   template<typename Derived>
 193   static ByteSize byte_offset_of_index() {
 194     return byte_offset_of(Derived, _index);
 195   }
 196 
 197   static ByteSize byte_width_of_index() { return in_ByteSize(sizeof(size_t)); }
 198 
 199   template<typename Derived>
 200   static ByteSize byte_offset_of_buf() {
 201     return byte_offset_of(Derived, _buf);
 202   }
 203 
 204   static ByteSize byte_width_of_buf() { return in_ByteSize(_element_size); }
 205 
 206   template<typename Derived>
 207   static ByteSize byte_offset_of_active() {
 208     return byte_offset_of(Derived, _active);
 209   }
 210 
 211   static ByteSize byte_width_of_active() { return in_ByteSize(sizeof(bool)); }
 212 
 213 };
 214 
 215 class BufferNode {
 216   size_t _index;
 217   BufferNode* _next;
 218   void* _buffer[1];             // Pseudo flexible array member.
 219 
 220   BufferNode() : _index(0), _next(NULL) { }
 221   ~BufferNode() { }
 222 
 223   static size_t buffer_offset() {
 224     return offset_of(BufferNode, _buffer);
 225   }
 226 
 227 AIX_ONLY(public:)               // xlC 12 on AIX doesn't implement C++ DR45.
 228   // Allocate a new BufferNode with the "buffer" having size elements.
 229   static BufferNode* allocate(size_t size);
 230 
 231   // Free a BufferNode.
 232   static void deallocate(BufferNode* node);
 233 
 234 public:
 235   BufferNode* next() const     { return _next;  }
 236   void set_next(BufferNode* n) { _next = n;     }
 237   size_t index() const         { return _index; }
 238   void set_index(size_t i)     { _index = i; }
 239 
 240   // Return the BufferNode containing the buffer, after setting its index.
 241   static BufferNode* make_node_from_buffer(void** buffer, size_t index) {
 242     BufferNode* node =
 243       reinterpret_cast<BufferNode*>(
 244         reinterpret_cast<char*>(buffer) - buffer_offset());
 245     node->set_index(index);
 246     return node;
 247   }
 248 
 249   // Return the buffer for node.
 250   static void** make_buffer_from_node(BufferNode *node) {
 251     // &_buffer[0] might lead to index out of bounds warnings.
 252     return reinterpret_cast<void**>(
 253       reinterpret_cast<char*>(node) + buffer_offset());
 254   }
 255 
 256   // Free-list based allocator.
 257   class Allocator {
 258     size_t _buffer_size;
 259     Mutex* _lock;
 260     BufferNode* _free_list;
 261     volatile size_t _free_count;
 262 
 263   public:
 264     Allocator(size_t buffer_size, Mutex* lock);
 265     ~Allocator();
 266 
 267     size_t buffer_size() const { return _buffer_size; }
 268     size_t free_count() const;
 269     BufferNode* allocate();
 270     void release(BufferNode* node);
 271     void reduce_free_list();
 272   };
 273 };
 274 
 275 // A PtrQueueSet represents resources common to a set of pointer queues.
 276 // In particular, the individual queues allocate buffers from this shared
 277 // set, and return completed buffers to the set.
 278 // All these variables are are protected by the TLOQ_CBL_mon. XXX ???
 279 class PtrQueueSet {
 280   BufferNode::Allocator* _allocator;
 281 
 282 protected:
 283   Monitor* _cbl_mon;  // Protects the fields below.
 284   BufferNode* _completed_buffers_head;
 285   BufferNode* _completed_buffers_tail;
 286   size_t _n_completed_buffers;
 287   int _process_completed_threshold;
 288   volatile bool _process_completed;
 289 
 290   bool _all_active;
 291 
 292   // If true, notify_all on _cbl_mon when the threshold is reached.
 293   bool _notify_when_complete;
 294 
 295   // Maximum number of elements allowed on completed queue: after that,
 296   // enqueuer does the work itself.  Zero indicates no maximum.
 297   int _max_completed_queue;
 298   size_t _completed_queue_padding;
 299 
 300   size_t completed_buffers_list_length();
 301   void assert_completed_buffer_list_len_correct_locked();
 302   void assert_completed_buffer_list_len_correct();
 303 
 304 protected:
 305   // A mutator thread does the the work of processing a buffer.
 306   // Returns "true" iff the work is complete (and the buffer may be
 307   // deallocated).
 308   virtual bool mut_process_buffer(BufferNode* node) {
 309     ShouldNotReachHere();
 310     return false;
 311   }
 312 
 313   // Create an empty ptr queue set.
 314   PtrQueueSet(bool notify_when_complete = false);
 315   ~PtrQueueSet();
 316 
 317   // Because of init-order concerns, we can't pass these as constructor
 318   // arguments.
 319   void initialize(Monitor* cbl_mon,
 320                   BufferNode::Allocator* allocator,
 321                   int process_completed_threshold,
 322                   int max_completed_queue);
 323 
 324 public:
 325 
 326   // Return the buffer for a BufferNode of size buffer_size().
 327   void** allocate_buffer();
 328 
 329   // Return an empty buffer to the free list.  The node is required
 330   // to have been allocated with a size of buffer_size().
 331   void deallocate_buffer(BufferNode* node);
 332 
 333   // Declares that "buf" is a complete buffer.
 334   void enqueue_complete_buffer(BufferNode* node);
 335 
 336   // To be invoked by the mutator.
 337   bool process_or_enqueue_complete_buffer(BufferNode* node);
 338 
 339   bool completed_buffers_exist_dirty() {
 340     return _n_completed_buffers > 0;
 341   }
 342 
 343   bool process_completed_buffers() { return _process_completed; }
 344   void set_process_completed(bool x) { _process_completed = x; }
 345 
 346   bool is_active() { return _all_active; }
 347 
 348   size_t buffer_size() const {
 349     return _allocator->buffer_size();
 350   }
 351 
 352   // Get/Set the number of completed buffers that triggers log processing.
 353   void set_process_completed_threshold(int sz) { _process_completed_threshold = sz; }
 354   int process_completed_threshold() const { return _process_completed_threshold; }
 355 
 356   size_t completed_buffers_num() { return _n_completed_buffers; }
 357 
 358   void merge_bufferlists(PtrQueueSet* src);
 359 
 360   void set_max_completed_queue(int m) { _max_completed_queue = m; }
 361   int max_completed_queue() { return _max_completed_queue; }
 362 
 363   void set_completed_queue_padding(size_t padding) { _completed_queue_padding = padding; }
 364   size_t completed_queue_padding() { return _completed_queue_padding; }
 365 
 366   // Notify the consumer if the number of buffers crossed the threshold
 367   void notify_if_necessary();
 368 };
 369 
 370 #endif // SHARE_GC_SHARED_PTRQUEUE_HPP