< prev index next >

src/hotspot/share/gc/shared/ptrQueue.hpp

Print this page
rev 57095 : [mq]: use
rev 57096 : [mq]: trailing_semi


  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_GC_SHARED_PTRQUEUE_HPP
  26 #define SHARE_GC_SHARED_PTRQUEUE_HPP
  27 
  28 #include "memory/padded.hpp"
  29 #include "utilities/align.hpp"
  30 #include "utilities/debug.hpp"
  31 #include "utilities/lockFreeStack.hpp"

  32 #include "utilities/sizes.hpp"
  33 
  34 class Mutex;
  35 class Monitor;
  36 
  37 // There are various techniques that require threads to be able to log
  38 // addresses.  For example, a generational write barrier might log
  39 // the addresses of modified old-generation objects.  This type supports
  40 // this operation.
  41 
  42 class BufferNode;
  43 class PtrQueueSet;
  44 class PtrQueue {
  45   friend class VMStructs;
  46 
  47   // Noncopyable - not defined.
  48   PtrQueue(const PtrQueue&);
  49   PtrQueue& operator=(const PtrQueue&);
  50 
  51   // The ptr queue set to which this queue belongs.
  52   PtrQueueSet* const _qset;
  53 
  54   // Whether updates should be logged.
  55   bool _active;
  56 
  57   // The (byte) index at which an object was last enqueued.  Starts at
  58   // capacity_in_bytes (indicating an empty buffer) and goes towards zero.
  59   // Value is always pointer-size aligned.
  60   size_t _index;
  61 
  62   // Size of the current buffer, in bytes.
  63   // Value is always pointer-size aligned.
  64   size_t _capacity_in_bytes;
  65 
  66   static const size_t _element_size = sizeof(void*);
  67 
  68   // Get the capacity, in bytes.  The capacity must have been set.
  69   size_t capacity_in_bytes() const {


 188 
 189   static ByteSize byte_width_of_buf() { return in_ByteSize(_element_size); }
 190 
 191   template<typename Derived>
 192   static ByteSize byte_offset_of_active() {
 193     return byte_offset_of(Derived, _active);
 194   }
 195 
 196   static ByteSize byte_width_of_active() { return in_ByteSize(sizeof(bool)); }
 197 
 198 };
 199 
 200 class BufferNode {
 201   size_t _index;
 202   BufferNode* volatile _next;
 203   void* _buffer[1];             // Pseudo flexible array member.
 204 
 205   BufferNode() : _index(0), _next(NULL) { }
 206   ~BufferNode() { }
 207 


 208   static size_t buffer_offset() {
 209     return offset_of(BufferNode, _buffer);
 210   }
 211 
 212   static BufferNode* volatile* next_ptr(BufferNode& bn) { return &bn._next; }
 213 
 214   // Allocate a new BufferNode with the "buffer" having size elements.
 215   static BufferNode* allocate(size_t size);
 216 
 217   // Free a BufferNode.
 218   static void deallocate(BufferNode* node);
 219 
 220 public:
 221   typedef LockFreeStack<BufferNode, &next_ptr> Stack;
 222 
 223   BufferNode* next() const     { return _next;  }
 224   void set_next(BufferNode* n) { _next = n;     }
 225   size_t index() const         { return _index; }
 226   void set_index(size_t i)     { _index = i; }
 227 


 256   friend class TestSupport;
 257 
 258   // Since we don't expect many instances, and measured >15% speedup
 259   // on stress gtest, padding seems like a good tradeoff here.
 260 #define DECLARE_PADDED_MEMBER(Id, Type, Name) \
 261   Type Name; DEFINE_PAD_MINUS_SIZE(Id, DEFAULT_CACHE_LINE_SIZE, sizeof(Type))
 262 
 263   const size_t _buffer_size;
 264   char _name[DEFAULT_CACHE_LINE_SIZE - sizeof(size_t)]; // Use name as padding.
 265   DECLARE_PADDED_MEMBER(1, Stack, _pending_list);
 266   DECLARE_PADDED_MEMBER(2, Stack, _free_list);
 267   DECLARE_PADDED_MEMBER(3, volatile size_t, _pending_count);
 268   DECLARE_PADDED_MEMBER(4, volatile size_t, _free_count);
 269   DECLARE_PADDED_MEMBER(5, volatile bool, _transfer_lock);
 270 
 271 #undef DECLARE_PADDED_MEMBER
 272 
 273   void delete_list(BufferNode* list);
 274   bool try_transfer_pending();
 275 


 276 public:
 277   Allocator(const char* name, size_t buffer_size);
 278   ~Allocator();
 279 
 280   const char* name() const { return _name; }
 281   size_t buffer_size() const { return _buffer_size; }
 282   size_t free_count() const;
 283   BufferNode* allocate();
 284   void release(BufferNode* node);
 285 
 286   // Deallocate some of the available buffers.  remove_goal is the target
 287   // number to remove.  Returns the number actually deallocated, which may
 288   // be less than the goal if there were fewer available.
 289   size_t reduce_free_list(size_t remove_goal);
 290 };
 291 
 292 // A PtrQueueSet represents resources common to a set of pointer queues.
 293 // In particular, the individual queues allocate buffers from this shared
 294 // set, and return completed buffers to the set.
 295 class PtrQueueSet {
 296   BufferNode::Allocator* _allocator;
 297 
 298   // Noncopyable - not defined.
 299   PtrQueueSet(const PtrQueueSet&);
 300   PtrQueueSet& operator=(const PtrQueueSet&);
 301 
 302 protected:
 303   bool _all_active;
 304 
 305   // Create an empty ptr queue set.
 306   PtrQueueSet(BufferNode::Allocator* allocator);
 307   ~PtrQueueSet();
 308 
 309 public:
 310 
 311   // Return the associated BufferNode allocator.
 312   BufferNode::Allocator* allocator() const { return _allocator; }
 313 
 314   // Return the buffer for a BufferNode of size buffer_size().
 315   void** allocate_buffer();
 316 
 317   // Return an empty buffer to the free list.  The node is required
 318   // to have been allocated with a size of buffer_size().
 319   void deallocate_buffer(BufferNode* node);
 320 


  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_GC_SHARED_PTRQUEUE_HPP
  26 #define SHARE_GC_SHARED_PTRQUEUE_HPP
  27 
  28 #include "memory/padded.hpp"
  29 #include "utilities/align.hpp"
  30 #include "utilities/debug.hpp"
  31 #include "utilities/lockFreeStack.hpp"
  32 #include "utilities/macros.hpp"
  33 #include "utilities/sizes.hpp"
  34 
  35 class Mutex;
  36 class Monitor;
  37 
  38 // There are various techniques that require threads to be able to log
  39 // addresses.  For example, a generational write barrier might log
  40 // the addresses of modified old-generation objects.  This type supports
  41 // this operation.
  42 
  43 class BufferNode;
  44 class PtrQueueSet;
  45 class PtrQueue {
  46   friend class VMStructs;
  47 
  48   NONCOPYABLE(PtrQueue);


  49 
  50   // The ptr queue set to which this queue belongs.
  51   PtrQueueSet* const _qset;
  52 
  53   // Whether updates should be logged.
  54   bool _active;
  55 
  56   // The (byte) index at which an object was last enqueued.  Starts at
  57   // capacity_in_bytes (indicating an empty buffer) and goes towards zero.
  58   // Value is always pointer-size aligned.
  59   size_t _index;
  60 
  61   // Size of the current buffer, in bytes.
  62   // Value is always pointer-size aligned.
  63   size_t _capacity_in_bytes;
  64 
  65   static const size_t _element_size = sizeof(void*);
  66 
  67   // Get the capacity, in bytes.  The capacity must have been set.
  68   size_t capacity_in_bytes() const {


 187 
 188   static ByteSize byte_width_of_buf() { return in_ByteSize(_element_size); }
 189 
 190   template<typename Derived>
 191   static ByteSize byte_offset_of_active() {
 192     return byte_offset_of(Derived, _active);
 193   }
 194 
 195   static ByteSize byte_width_of_active() { return in_ByteSize(sizeof(bool)); }
 196 
 197 };
 198 
 199 class BufferNode {
 200   size_t _index;
 201   BufferNode* volatile _next;
 202   void* _buffer[1];             // Pseudo flexible array member.
 203 
 204   BufferNode() : _index(0), _next(NULL) { }
 205   ~BufferNode() { }
 206 
 207   NONCOPYABLE(BufferNode);
 208 
 209   static size_t buffer_offset() {
 210     return offset_of(BufferNode, _buffer);
 211   }
 212 
 213   static BufferNode* volatile* next_ptr(BufferNode& bn) { return &bn._next; }
 214 
 215   // Allocate a new BufferNode with the "buffer" having size elements.
 216   static BufferNode* allocate(size_t size);
 217 
 218   // Free a BufferNode.
 219   static void deallocate(BufferNode* node);
 220 
 221 public:
 222   typedef LockFreeStack<BufferNode, &next_ptr> Stack;
 223 
 224   BufferNode* next() const     { return _next;  }
 225   void set_next(BufferNode* n) { _next = n;     }
 226   size_t index() const         { return _index; }
 227   void set_index(size_t i)     { _index = i; }
 228 


 257   friend class TestSupport;
 258 
 259   // Since we don't expect many instances, and measured >15% speedup
 260   // on stress gtest, padding seems like a good tradeoff here.
 261 #define DECLARE_PADDED_MEMBER(Id, Type, Name) \
 262   Type Name; DEFINE_PAD_MINUS_SIZE(Id, DEFAULT_CACHE_LINE_SIZE, sizeof(Type))
 263 
 264   const size_t _buffer_size;
 265   char _name[DEFAULT_CACHE_LINE_SIZE - sizeof(size_t)]; // Use name as padding.
 266   DECLARE_PADDED_MEMBER(1, Stack, _pending_list);
 267   DECLARE_PADDED_MEMBER(2, Stack, _free_list);
 268   DECLARE_PADDED_MEMBER(3, volatile size_t, _pending_count);
 269   DECLARE_PADDED_MEMBER(4, volatile size_t, _free_count);
 270   DECLARE_PADDED_MEMBER(5, volatile bool, _transfer_lock);
 271 
 272 #undef DECLARE_PADDED_MEMBER
 273 
 274   void delete_list(BufferNode* list);
 275   bool try_transfer_pending();
 276 
 277   NONCOPYABLE(Allocator);
 278 
 279 public:
 280   Allocator(const char* name, size_t buffer_size);
 281   ~Allocator();
 282 
 283   const char* name() const { return _name; }
 284   size_t buffer_size() const { return _buffer_size; }
 285   size_t free_count() const;
 286   BufferNode* allocate();
 287   void release(BufferNode* node);
 288 
 289   // Deallocate some of the available buffers.  remove_goal is the target
 290   // number to remove.  Returns the number actually deallocated, which may
 291   // be less than the goal if there were fewer available.
 292   size_t reduce_free_list(size_t remove_goal);
 293 };
 294 
 295 // A PtrQueueSet represents resources common to a set of pointer queues.
 296 // In particular, the individual queues allocate buffers from this shared
 297 // set, and return completed buffers to the set.
 298 class PtrQueueSet {
 299   BufferNode::Allocator* _allocator;
 300 
 301   NONCOPYABLE(PtrQueueSet);


 302 
 303 protected:
 304   bool _all_active;
 305 
 306   // Create an empty ptr queue set.
 307   PtrQueueSet(BufferNode::Allocator* allocator);
 308   ~PtrQueueSet();
 309 
 310 public:
 311 
 312   // Return the associated BufferNode allocator.
 313   BufferNode::Allocator* allocator() const { return _allocator; }
 314 
 315   // Return the buffer for a BufferNode of size buffer_size().
 316   void** allocate_buffer();
 317 
 318   // Return an empty buffer to the free list.  The node is required
 319   // to have been allocated with a size of buffer_size().
 320   void deallocate_buffer(BufferNode* node);
 321 
< prev index next >