< prev index next >

src/hotspot/share/gc/shared/ptrQueue.hpp

Print this page
rev 53140 : [mq]: new_allocator
rev 53141 : imported patch stack_access
   1 /*
   2  * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_GC_SHARED_PTRQUEUE_HPP
  26 #define SHARE_GC_SHARED_PTRQUEUE_HPP
  27 

  28 #include "utilities/align.hpp"


  29 #include "utilities/sizes.hpp"
  30 
  31 class Mutex;
  32 
  33 // There are various techniques that require threads to be able to log
  34 // addresses.  For example, a generational write barrier might log
  35 // the addresses of modified old-generation objects.  This type supports
  36 // this operation.
  37 
  38 class BufferNode;
  39 class PtrQueueSet;
  40 class PtrQueue {
  41   friend class VMStructs;
  42 
  43   // Noncopyable - not defined.
  44   PtrQueue(const PtrQueue&);
  45   PtrQueue& operator=(const PtrQueue&);
  46 
  47   // The ptr queue set to which this queue belongs.
  48   PtrQueueSet* const _qset;


 197   static ByteSize byte_width_of_index() { return in_ByteSize(sizeof(size_t)); }
 198 
 199   template<typename Derived>
 200   static ByteSize byte_offset_of_buf() {
 201     return byte_offset_of(Derived, _buf);
 202   }
 203 
 204   static ByteSize byte_width_of_buf() { return in_ByteSize(_element_size); }
 205 
 206   template<typename Derived>
 207   static ByteSize byte_offset_of_active() {
 208     return byte_offset_of(Derived, _active);
 209   }
 210 
 211   static ByteSize byte_width_of_active() { return in_ByteSize(sizeof(bool)); }
 212 
 213 };
 214 
 215 class BufferNode {
 216   size_t _index;
 217   BufferNode* _next;
 218   void* _buffer[1];             // Pseudo flexible array member.
 219 
 220   BufferNode() : _index(0), _next(NULL) { }
 221   ~BufferNode() { }
 222 
 223   static size_t buffer_offset() {
 224     return offset_of(BufferNode, _buffer);
 225   }
 226 


 227 AIX_ONLY(public:)               // xlC 12 on AIX doesn't implement C++ DR45.
 228   // Allocate a new BufferNode with the "buffer" having size elements.
 229   static BufferNode* allocate(size_t size);
 230 
 231   // Free a BufferNode.
 232   static void deallocate(BufferNode* node);
 233 
 234 public:


 235   BufferNode* next() const     { return _next;  }
 236   void set_next(BufferNode* n) { _next = n;     }
 237   size_t index() const         { return _index; }
 238   void set_index(size_t i)     { _index = i; }
 239 
 240   // Return the BufferNode containing the buffer, after setting its index.
 241   static BufferNode* make_node_from_buffer(void** buffer, size_t index) {
 242     BufferNode* node =
 243       reinterpret_cast<BufferNode*>(
 244         reinterpret_cast<char*>(buffer) - buffer_offset());
 245     node->set_index(index);
 246     return node;
 247   }
 248 
 249   // Return the buffer for node.
 250   static void** make_buffer_from_node(BufferNode *node) {
 251     // &_buffer[0] might lead to index out of bounds warnings.
 252     return reinterpret_cast<void**>(
 253       reinterpret_cast<char*>(node) + buffer_offset());
 254   }
 255 
 256   // Free-list based allocator.
 257   class Allocator {
 258     size_t _buffer_size;
 259     Mutex* _lock;
 260     BufferNode* _free_list;
 261     volatile size_t _free_count;




















 262 
 263   public:
 264     Allocator(size_t buffer_size, Mutex* lock);





 265     ~Allocator();
 266 

 267     size_t buffer_size() const { return _buffer_size; }
 268     size_t free_count() const;
 269     BufferNode* allocate();
 270     void release(BufferNode* node);
 271     void reduce_free_list();
 272   };



 273 };
 274 
 275 // A PtrQueueSet represents resources common to a set of pointer queues.
 276 // In particular, the individual queues allocate buffers from this shared
 277 // set, and return completed buffers to the set.
 278 class PtrQueueSet {
 279   BufferNode::Allocator* _allocator;
 280 
 281   Monitor* _cbl_mon;  // Protects the fields below.
 282   BufferNode* _completed_buffers_head;
 283   BufferNode* _completed_buffers_tail;
 284   size_t _n_completed_buffers;
 285 
 286   size_t _process_completed_buffers_threshold;
 287   volatile bool _process_completed_buffers;
 288 
 289   // If true, notify_all on _cbl_mon when the threshold is reached.
 290   bool _notify_when_complete;
 291 
 292   // Maximum number of elements allowed on completed queue: after that,


   1 /*
   2  * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_GC_SHARED_PTRQUEUE_HPP
  26 #define SHARE_GC_SHARED_PTRQUEUE_HPP
  27 
  28 #include "memory/padded.hpp"
  29 #include "utilities/align.hpp"
  30 #include "utilities/debug.hpp"
  31 #include "utilities/lockFreeStack.hpp"
  32 #include "utilities/sizes.hpp"
  33 
  34 class Mutex;
  35 
  36 // There are various techniques that require threads to be able to log
  37 // addresses.  For example, a generational write barrier might log
  38 // the addresses of modified old-generation objects.  This type supports
  39 // this operation.
  40 
  41 class BufferNode;
  42 class PtrQueueSet;
  43 class PtrQueue {
  44   friend class VMStructs;
  45 
  46   // Noncopyable - not defined.
  47   PtrQueue(const PtrQueue&);
  48   PtrQueue& operator=(const PtrQueue&);
  49 
  50   // The ptr queue set to which this queue belongs.
  51   PtrQueueSet* const _qset;


 200   static ByteSize byte_width_of_index() { return in_ByteSize(sizeof(size_t)); }
 201 
 202   template<typename Derived>
 203   static ByteSize byte_offset_of_buf() {
 204     return byte_offset_of(Derived, _buf);
 205   }
 206 
 207   static ByteSize byte_width_of_buf() { return in_ByteSize(_element_size); }
 208 
 209   template<typename Derived>
 210   static ByteSize byte_offset_of_active() {
 211     return byte_offset_of(Derived, _active);
 212   }
 213 
 214   static ByteSize byte_width_of_active() { return in_ByteSize(sizeof(bool)); }
 215 
 216 };
 217 
 218 class BufferNode {
 219   size_t _index;
 220   BufferNode* volatile _next;
 221   void* _buffer[1];             // Pseudo flexible array member.
 222 
 223   BufferNode() : _index(0), _next(NULL) { }
 224   ~BufferNode() { }
 225 
 226   static size_t buffer_offset() {
 227     return offset_of(BufferNode, _buffer);
 228   }
 229 
 230   static BufferNode* volatile* next_ptr(BufferNode& bn) { return &bn._next; }
 231 
 232 AIX_ONLY(public:)               // xlC 12 on AIX doesn't implement C++ DR45.
 233   // Allocate a new BufferNode with the "buffer" having size elements.
 234   static BufferNode* allocate(size_t size);
 235 
 236   // Free a BufferNode.
 237   static void deallocate(BufferNode* node);
 238 
 239 public:
 240   typedef LockFreeStack<BufferNode, &next_ptr> Stack;
 241 
 242   BufferNode* next() const     { return _next;  }
 243   void set_next(BufferNode* n) { _next = n;     }
 244   size_t index() const         { return _index; }
 245   void set_index(size_t i)     { _index = i; }
 246 
 247   // Return the BufferNode containing the buffer, after setting its index.
 248   static BufferNode* make_node_from_buffer(void** buffer, size_t index) {
 249     BufferNode* node =
 250       reinterpret_cast<BufferNode*>(
 251         reinterpret_cast<char*>(buffer) - buffer_offset());
 252     node->set_index(index);
 253     return node;
 254   }
 255 
 256   // Return the buffer for node.
 257   static void** make_buffer_from_node(BufferNode *node) {
 258     // &_buffer[0] might lead to index out of bounds warnings.
 259     return reinterpret_cast<void**>(
 260       reinterpret_cast<char*>(node) + buffer_offset());
 261   }
 262 
 263   class Allocator;              // Free-list based allocator.
 264   class TestSupport;            // Unit test support.
 265 };
 266 
 267 // Allocation is based on a lock-free free list of nodes, linked through
 268 // BufferNode::_next (see BufferNode::Stack).  To solve the ABA problem,
 269 // popping a node from the free list is performed within a GlobalCounter
 270 // critical section, and pushing nodes onto the free list is done after
 271 // a GlobalCounter synchronization associated with the nodes to be pushed.
 272 // This is documented behavior so that other parts of the node life-cycle
 273 // can depend on and make use of it too.
 274 class BufferNode::Allocator {
 275   friend class TestSupport;
 276 
 277   // Since we don't expect many instances, and measured >15% speedup
 278   // on stress gtest, padding seems like a good tradeoff here.
 279 #define DECLARE_PADDED_MEMBER(Id, Type, Name) \
 280   Type Name; DEFINE_PAD_MINUS_SIZE(Id, DEFAULT_CACHE_LINE_SIZE, sizeof(Type))
 281 
 282   const size_t _buffer_size;
 283   char _name[DEFAULT_CACHE_LINE_SIZE - sizeof(size_t)]; // Use name as padding.
 284   DECLARE_PADDED_MEMBER(1, Stack, _pending_list);
 285   DECLARE_PADDED_MEMBER(2, Stack, _free_list);
 286   DECLARE_PADDED_MEMBER(3, volatile size_t, _pending_count);
 287   DECLARE_PADDED_MEMBER(4, volatile size_t, _free_count);
 288   DECLARE_PADDED_MEMBER(5, volatile bool, _transfer_lock);
 289 
 290 #undef DECLARE_PADDED_MEMBER
 291 
 292   void delete_list(BufferNode* list);
 293   bool try_transfer_pending();
 294 
 295 public:
 296   Allocator(const char* name, size_t buffer_size);
 297   ~Allocator();
 298 
 299   const char* name() const { return _name; }
 300   size_t buffer_size() const { return _buffer_size; }
 301   size_t free_count() const;
 302   BufferNode* allocate();
 303   void release(BufferNode* node);
 304 
 305   // Deallocate some of the available buffers.  remove_goal is the target
 306   // number to remove.  Returns the number actually deallocated, which may
 307   // be less than the goal if there were fewer available.
 308   size_t reduce_free_list(size_t remove_goal);
 309 };
 310 
 311 // A PtrQueueSet represents resources common to a set of pointer queues.
 312 // In particular, the individual queues allocate buffers from this shared
 313 // set, and return completed buffers to the set.
 314 class PtrQueueSet {
 315   BufferNode::Allocator* _allocator;
 316 
 317   Monitor* _cbl_mon;  // Protects the fields below.
 318   BufferNode* _completed_buffers_head;
 319   BufferNode* _completed_buffers_tail;
 320   size_t _n_completed_buffers;
 321 
 322   size_t _process_completed_buffers_threshold;
 323   volatile bool _process_completed_buffers;
 324 
 325   // If true, notify_all on _cbl_mon when the threshold is reached.
 326   bool _notify_when_complete;
 327 
 328   // Maximum number of elements allowed on completed queue: after that,


< prev index next >