1 /*
   2  * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_GC_SHARED_PTRQUEUE_HPP
  26 #define SHARE_GC_SHARED_PTRQUEUE_HPP
  27 
  28 #include "utilities/align.hpp"
  29 #include "utilities/sizes.hpp"
  30 
  31 class Mutex;
  32 
  33 // There are various techniques that require threads to be able to log
  34 // addresses.  For example, a generational write barrier might log
  35 // the addresses of modified old-generation objects.  This type supports
  36 // this operation.
  37 
  38 class BufferNode;
  39 class PtrQueueSet;
  40 class PtrQueue {
  41   friend class VMStructs;
  42 
  43   // Noncopyable - not defined.
  44   PtrQueue(const PtrQueue&);
  45   PtrQueue& operator=(const PtrQueue&);
  46 
  47   // The ptr queue set to which this queue belongs.
  48   PtrQueueSet* const _qset;
  49 
  50   // Whether updates should be logged.
  51   bool _active;
  52 
  53   // If true, the queue is permanent, and doesn't need to deallocate
  54   // its buffer in the destructor (since that obtains a lock which may not
  55   // be legally locked by then.
  56   const bool _permanent;
  57 
  58   // The (byte) index at which an object was last enqueued.  Starts at
  59   // capacity_in_bytes (indicating an empty buffer) and goes towards zero.
  60   // Value is always pointer-size aligned.
  61   size_t _index;
  62 
  63   // Size of the current buffer, in bytes.
  64   // Value is always pointer-size aligned.
  65   size_t _capacity_in_bytes;
  66 
  67   static const size_t _element_size = sizeof(void*);
  68 
  69   // Get the capacity, in bytes.  The capacity must have been set.
  70   size_t capacity_in_bytes() const {
  71     assert(_capacity_in_bytes > 0, "capacity not set");
  72     return _capacity_in_bytes;
  73   }
  74 
  75   void set_capacity(size_t entries) {
  76     size_t byte_capacity = index_to_byte_index(entries);
  77     assert(_capacity_in_bytes == 0 || _capacity_in_bytes == byte_capacity,
  78            "changing capacity " SIZE_FORMAT " -> " SIZE_FORMAT,
  79            _capacity_in_bytes, byte_capacity);
  80     _capacity_in_bytes = byte_capacity;
  81   }
  82 
  83   static size_t byte_index_to_index(size_t ind) {
  84     assert(is_aligned(ind, _element_size), "precondition");
  85     return ind / _element_size;
  86   }
  87 
  88   static size_t index_to_byte_index(size_t ind) {
  89     return ind * _element_size;
  90   }
  91 
  92 protected:
  93   // The buffer.
  94   void** _buf;
  95 
  96   size_t index() const {
  97     return byte_index_to_index(_index);
  98   }
  99 
 100   void set_index(size_t new_index) {
 101     size_t byte_index = index_to_byte_index(new_index);
 102     assert(byte_index <= capacity_in_bytes(), "precondition");
 103     _index = byte_index;
 104   }
 105 
 106   size_t capacity() const {
 107     return byte_index_to_index(capacity_in_bytes());
 108   }
 109 
 110   // If there is a lock associated with this buffer, this is that lock.
 111   Mutex* _lock;
 112 
 113   PtrQueueSet* qset() { return _qset; }
 114   bool is_permanent() const { return _permanent; }
 115 
 116   // Process queue entries and release resources.
 117   void flush_impl();
 118 
 119   // Initialize this queue to contain a null buffer, and be part of the
 120   // given PtrQueueSet.
 121   PtrQueue(PtrQueueSet* qset, bool permanent = false, bool active = false);
 122 
 123   // Requires queue flushed or permanent.
 124   ~PtrQueue();
 125 
 126 public:
 127 
 128   // Associate a lock with a ptr queue.
 129   void set_lock(Mutex* lock) { _lock = lock; }
 130 
 131   // Forcibly set empty.
 132   void reset() {
 133     if (_buf != NULL) {
 134       _index = capacity_in_bytes();
 135     }
 136   }
 137 
 138   void enqueue(volatile void* ptr) {
 139     enqueue((void*)(ptr));
 140   }
 141 
 142   // Enqueues the given "obj".
 143   void enqueue(void* ptr) {
 144     if (!_active) return;
 145     else enqueue_known_active(ptr);
 146   }
 147 
 148   // This method is called when we're doing the zero index handling
 149   // and gives a chance to the queues to do any pre-enqueueing
 150   // processing they might want to do on the buffer. It should return
 151   // true if the buffer should be enqueued, or false if enough
 152   // entries were cleared from it so that it can be re-used. It should
 153   // not return false if the buffer is still full (otherwise we can
 154   // get into an infinite loop).
 155   virtual bool should_enqueue_buffer() { return true; }
 156   void handle_zero_index();
 157   void locking_enqueue_completed_buffer(BufferNode* node);
 158 
 159   void enqueue_known_active(void* ptr);
 160 
 161   // Return the size of the in-use region.
 162   size_t size() const {
 163     size_t result = 0;
 164     if (_buf != NULL) {
 165       assert(_index <= capacity_in_bytes(), "Invariant");
 166       result = byte_index_to_index(capacity_in_bytes() - _index);
 167     }
 168     return result;
 169   }
 170 
 171   bool is_empty() const {
 172     return _buf == NULL || capacity_in_bytes() == _index;
 173   }
 174 
 175   // Set the "active" property of the queue to "b".  An enqueue to an
 176   // inactive thread is a no-op.  Setting a queue to inactive resets its
 177   // log to the empty state.
 178   void set_active(bool b) {
 179     _active = b;
 180     if (!b && _buf != NULL) {
 181       reset();
 182     } else if (b && _buf != NULL) {
 183       assert(index() == capacity(),
 184              "invariant: queues are empty when activated.");
 185     }
 186   }
 187 
 188   bool is_active() const { return _active; }
 189 
 190   // To support compiler.
 191 
 192 protected:
 193   template<typename Derived>
 194   static ByteSize byte_offset_of_index() {
 195     return byte_offset_of(Derived, _index);
 196   }
 197 
 198   static ByteSize byte_width_of_index() { return in_ByteSize(sizeof(size_t)); }
 199 
 200   template<typename Derived>
 201   static ByteSize byte_offset_of_buf() {
 202     return byte_offset_of(Derived, _buf);
 203   }
 204 
 205   static ByteSize byte_width_of_buf() { return in_ByteSize(_element_size); }
 206 
 207   template<typename Derived>
 208   static ByteSize byte_offset_of_active() {
 209     return byte_offset_of(Derived, _active);
 210   }
 211 
 212   static ByteSize byte_width_of_active() { return in_ByteSize(sizeof(bool)); }
 213 
 214 };
 215 
 216 class BufferNode {
 217   size_t _index;
 218   BufferNode* _next;
 219   void* _buffer[1];             // Pseudo flexible array member.
 220 
 221   BufferNode() : _index(0), _next(NULL) { }
 222   ~BufferNode() { }
 223 
 224   static size_t buffer_offset() {
 225     return offset_of(BufferNode, _buffer);
 226   }
 227 
 228 AIX_ONLY(public:)               // xlC 12 on AIX doesn't implement C++ DR45.
 229   // Allocate a new BufferNode with the "buffer" having size elements.
 230   static BufferNode* allocate(size_t size);
 231 
 232   // Free a BufferNode.
 233   static void deallocate(BufferNode* node);
 234 
 235 public:
 236   BufferNode* next() const     { return _next;  }
 237   void set_next(BufferNode* n) { _next = n;     }
 238   size_t index() const         { return _index; }
 239   void set_index(size_t i)     { _index = i; }
 240 
 241   // Return the BufferNode containing the buffer, after setting its index.
 242   static BufferNode* make_node_from_buffer(void** buffer, size_t index) {
 243     BufferNode* node =
 244       reinterpret_cast<BufferNode*>(
 245         reinterpret_cast<char*>(buffer) - buffer_offset());
 246     node->set_index(index);
 247     return node;
 248   }
 249 
 250   // Return the buffer for node.
 251   static void** make_buffer_from_node(BufferNode *node) {
 252     // &_buffer[0] might lead to index out of bounds warnings.
 253     return reinterpret_cast<void**>(
 254       reinterpret_cast<char*>(node) + buffer_offset());
 255   }
 256 
 257   // Free-list based allocator.
 258   class Allocator {
 259     size_t _buffer_size;
 260     Mutex* _lock;
 261     BufferNode* _free_list;
 262     volatile size_t _free_count;
 263 
 264   public:
 265     Allocator(size_t buffer_size, Mutex* lock);
 266     ~Allocator();
 267 
 268     size_t buffer_size() const { return _buffer_size; }
 269     size_t free_count() const;
 270     BufferNode* allocate();
 271     void release(BufferNode* node);
 272     void reduce_free_list();
 273   };
 274 };
 275 
 276 // A PtrQueueSet represents resources common to a set of pointer queues.
 277 // In particular, the individual queues allocate buffers from this shared
 278 // set, and return completed buffers to the set.
 279 // All these variables are are protected by the TLOQ_CBL_mon. XXX ???
 280 class PtrQueueSet {
 281   BufferNode::Allocator* _allocator;
 282 
 283 protected:
 284   Monitor* _cbl_mon;  // Protects the fields below.
 285   BufferNode* _completed_buffers_head;
 286   BufferNode* _completed_buffers_tail;
 287   size_t _n_completed_buffers;
 288   int _process_completed_threshold;
 289   volatile bool _process_completed;
 290 
 291   bool _all_active;
 292 
 293   // If true, notify_all on _cbl_mon when the threshold is reached.
 294   bool _notify_when_complete;
 295 
 296   // Maximum number of elements allowed on completed queue: after that,
 297   // enqueuer does the work itself.  Zero indicates no maximum.
 298   int _max_completed_queue;
 299   size_t _completed_queue_padding;
 300 
 301   size_t completed_buffers_list_length();
 302   void assert_completed_buffer_list_len_correct_locked();
 303   void assert_completed_buffer_list_len_correct();
 304 
 305 protected:
 306   // A mutator thread does the the work of processing a buffer.
 307   // Returns "true" iff the work is complete (and the buffer may be
 308   // deallocated).
 309   virtual bool mut_process_buffer(BufferNode* node) {
 310     ShouldNotReachHere();
 311     return false;
 312   }
 313 
 314   // Create an empty ptr queue set.
 315   PtrQueueSet(bool notify_when_complete = false);
 316   ~PtrQueueSet();
 317 
 318   // Because of init-order concerns, we can't pass these as constructor
 319   // arguments.
 320   void initialize(Monitor* cbl_mon,
 321                   BufferNode::Allocator* allocator,
 322                   int process_completed_threshold,
 323                   int max_completed_queue);
 324 
 325 public:
 326 
 327   // Return the buffer for a BufferNode of size buffer_size().
 328   void** allocate_buffer();
 329 
 330   // Return an empty buffer to the free list.  The node is required
 331   // to have been allocated with a size of buffer_size().
 332   void deallocate_buffer(BufferNode* node);
 333 
 334   // Declares that "buf" is a complete buffer.
 335   void enqueue_complete_buffer(BufferNode* node);
 336 
 337   // To be invoked by the mutator.
 338   bool process_or_enqueue_complete_buffer(BufferNode* node);
 339 
 340   bool completed_buffers_exist_dirty() {
 341     return _n_completed_buffers > 0;
 342   }
 343 
 344   bool process_completed_buffers() { return _process_completed; }
 345   void set_process_completed(bool x) { _process_completed = x; }
 346 
 347   bool is_active() { return _all_active; }
 348 
 349   size_t buffer_size() const {
 350     return _allocator->buffer_size();
 351   }
 352 
 353   // Get/Set the number of completed buffers that triggers log processing.
 354   void set_process_completed_threshold(int sz) { _process_completed_threshold = sz; }
 355   int process_completed_threshold() const { return _process_completed_threshold; }
 356 
 357   size_t completed_buffers_num() { return _n_completed_buffers; }
 358 
 359   void merge_bufferlists(PtrQueueSet* src);
 360 
 361   void set_max_completed_queue(int m) { _max_completed_queue = m; }
 362   int max_completed_queue() { return _max_completed_queue; }
 363 
 364   void set_completed_queue_padding(size_t padding) { _completed_queue_padding = padding; }
 365   size_t completed_queue_padding() { return _completed_queue_padding; }
 366 
 367   // Notify the consumer if the number of buffers crossed the threshold
 368   void notify_if_necessary();
 369 };
 370 
 371 #endif // SHARE_GC_SHARED_PTRQUEUE_HPP