< prev index next >

src/share/vm/gc/g1/ptrQueue.hpp

Print this page
rev 9215 : imported patch remove_dead_code
rev 9216 : imported patch rename_perm
rev 9219 : [mq]: access
rev 9220 : [mq]: noncopyable
rev 9221 : [mq]: simplify_loops


  23  */
  24 
  25 #ifndef SHARE_VM_GC_G1_PTRQUEUE_HPP
  26 #define SHARE_VM_GC_G1_PTRQUEUE_HPP
  27 
  28 #include "memory/allocation.hpp"
  29 #include "utilities/sizes.hpp"
  30 
  31 // There are various techniques that require threads to be able to log
  32 // addresses.  For example, a generational write barrier might log
  33 // the addresses of modified old-generation objects.  This type supports
  34 // this operation.
  35 
  36 // The definition of placement operator new(size_t, void*) in the <new>.
  37 #include <new>
  38 
  39 class PtrQueueSet;
  40 class PtrQueue VALUE_OBJ_CLASS_SPEC {
  41   friend class VMStructs;
  42 
  43 protected:



  44   // The ptr queue set to which this queue belongs.
  45   PtrQueueSet* _qset;
  46 
  47   // Whether updates should be logged.
  48   bool _active;
  49 






  50   // The buffer.
  51   void** _buf;
  52   // The index at which an object was last enqueued.  Starts at "_sz"
  53   // (indicating an empty buffer) and goes towards zero.
  54   size_t _index;
  55 
  56   // The size of the buffer.
  57   size_t _sz;
  58 
  59   // If true, the queue is permanent, and doesn't need to deallocate
  60   // its buffer in the destructor (since that obtains a lock which may not
  61   // be legally locked by then.
  62   bool _perm;
  63 
  64   // If there is a lock associated with this buffer, this is that lock.
  65   Mutex* _lock;
  66 
  67   PtrQueueSet* qset() { return _qset; }
  68   bool is_permanent() const { return _perm; }
  69 
  70   // Process queue entries and release resources, if not permanent.
  71   void flush_impl();
  72 
  73 public:
  74   // Initialize this queue to contain a null buffer, and be part of the
  75   // given PtrQueueSet.
  76   PtrQueue(PtrQueueSet* qset, bool perm = false, bool active = false);
  77 
  78   // Requires queue flushed or permanent.
  79   ~PtrQueue();
  80 


  81   // Associate a lock with a ptr queue.
  82   void set_lock(Mutex* lock) { _lock = lock; }
  83 
  84   void reset() { if (_buf != NULL) _index = _sz; }
  85 
  86   void enqueue(volatile void* ptr) {
  87     enqueue((void*)(ptr));
  88   }
  89 
  90   // Enqueues the given "obj".
  91   void enqueue(void* ptr) {
  92     if (!_active) return;
  93     else enqueue_known_active(ptr);
  94   }
  95 
  96   // This method is called when we're doing the zero index handling
  97   // and gives a chance to the queues to do any pre-enqueueing
  98   // processing they might want to do on the buffer. It should return
  99   // true if the buffer should be enqueued, or false if enough
 100   // entries were cleared from it so that it can be re-used. It should


 112   }
 113 
 114   bool is_empty() {
 115     return _buf == NULL || _sz == _index;
 116   }
 117 
 118   // Set the "active" property of the queue to "b".  An enqueue to an
 119   // inactive thread is a no-op.  Setting a queue to inactive resets its
 120   // log to the empty state.
 121   void set_active(bool b) {
 122     _active = b;
 123     if (!b && _buf != NULL) {
 124       _index = _sz;
 125     } else if (b && _buf != NULL) {
 126       assert(_index == _sz, "invariant: queues are empty when activated.");
 127     }
 128   }
 129 
 130   bool is_active() { return _active; }
 131 
 132   static int byte_index_to_index(int ind) {
 133     assert((ind % oopSize) == 0, "Invariant.");
 134     return ind / oopSize;
 135   }
 136 
 137   static int index_to_byte_index(int byte_ind) {
 138     return byte_ind * oopSize;
 139   }
 140 
 141   // To support compiler.
 142   static ByteSize byte_offset_of_index() {
 143     return byte_offset_of(PtrQueue, _index);
 144   }
 145   static ByteSize byte_width_of_index() { return in_ByteSize(sizeof(size_t)); }
 146 
 147   static ByteSize byte_offset_of_buf() {
 148     return byte_offset_of(PtrQueue, _buf);
 149   }
 150   static ByteSize byte_width_of_buf() { return in_ByteSize(sizeof(void*)); }
 151 
 152   static ByteSize byte_offset_of_active() {
 153     return byte_offset_of(PtrQueue, _active);
 154   }
 155   static ByteSize byte_width_of_active() { return in_ByteSize(sizeof(bool)); }
 156 
 157 };
 158 


 229   bool _notify_when_complete;
 230 
 231   // Maximum number of elements allowed on completed queue: after that,
 232   // enqueuer does the work itself.  Zero indicates no maximum.
 233   int _max_completed_queue;
 234   int _completed_queue_padding;
 235 
 236   int completed_buffers_list_length();
 237   void assert_completed_buffer_list_len_correct_locked();
 238   void assert_completed_buffer_list_len_correct();
 239 
 240 protected:
 241   // A mutator thread does the the work of processing a buffer.
 242   // Returns "true" iff the work is complete (and the buffer may be
 243   // deallocated).
 244   virtual bool mut_process_buffer(void** buf) {
 245     ShouldNotReachHere();
 246     return false;
 247   }
 248 
 249 public:
 250   // Create an empty ptr queue set.
 251   PtrQueueSet(bool notify_when_complete = false);

 252 
 253   // Because of init-order concerns, we can't pass these as constructor
 254   // arguments.
 255   void initialize(Monitor* cbl_mon, Mutex* fl_lock,

 256                   int process_completed_threshold,
 257                   int max_completed_queue,
 258                   PtrQueueSet *fl_owner = NULL) {
 259     _max_completed_queue = max_completed_queue;
 260     _process_completed_threshold = process_completed_threshold;
 261     _completed_queue_padding = 0;
 262     assert(cbl_mon != NULL && fl_lock != NULL, "Init order issue?");
 263     _cbl_mon = cbl_mon;
 264     _fl_lock = fl_lock;
 265     _fl_owner = (fl_owner != NULL) ? fl_owner : this;
 266   }
 267 
 268   // Return an empty oop array of size _sz (required to be non-zero).
 269   void** allocate_buffer();
 270 
 271   // Return an empty buffer to the free list.  The "buf" argument is
 272   // required to be a pointer to the head of an array of length "_sz".
 273   void deallocate_buffer(void** buf);
 274 
 275   // Declares that "buf" is a complete buffer.
 276   void enqueue_complete_buffer(void** buf, size_t index = 0);
 277 
 278   // To be invoked by the mutator.
 279   bool process_or_enqueue_complete_buffer(void** buf);
 280 
 281   bool completed_buffers_exist_dirty() {
 282     return _n_completed_buffers > 0;
 283   }
 284 
 285   bool process_completed_buffers() { return _process_completed; }
 286   void set_process_completed(bool x) { _process_completed = x; }
 287 
 288   bool is_active() { return _all_active; }




  23  */
  24 
  25 #ifndef SHARE_VM_GC_G1_PTRQUEUE_HPP
  26 #define SHARE_VM_GC_G1_PTRQUEUE_HPP
  27 
  28 #include "memory/allocation.hpp"
  29 #include "utilities/sizes.hpp"
  30 
  31 // There are various techniques that require threads to be able to log
  32 // addresses.  For example, a generational write barrier might log
  33 // the addresses of modified old-generation objects.  This type supports
  34 // this operation.
  35 
  36 // The definition of placement operator new(size_t, void*) in the <new>.
  37 #include <new>
  38 
  39 class PtrQueueSet;
  40 class PtrQueue VALUE_OBJ_CLASS_SPEC {
  41   friend class VMStructs;
  42 
  43   // Noncopyable - not defined.
  44   PtrQueue(const PtrQueue&);
  45   PtrQueue& operator=(const PtrQueue&);
  46 
  47   // The ptr queue set to which this queue belongs.
  48   PtrQueueSet* const _qset;
  49 
  50   // Whether updates should be logged.
  51   bool _active;
  52 
  53   // If true, the queue is permanent, and doesn't need to deallocate
  54   // its buffer in the destructor (since that obtains a lock which may not
  55   // be legally locked by then.
  56   const bool _permanent;
  57 
  58 protected:
  59   // The buffer.
  60   void** _buf;
  61   // The (byte) index at which an object was last enqueued.  Starts at "_sz"
  62   // (indicating an empty buffer) and goes towards zero.
  63   size_t _index;
  64 
  65   // The (byte) size of the buffer.
  66   size_t _sz;
  67 





  68   // If there is a lock associated with this buffer, this is that lock.
  69   Mutex* _lock;
  70 
  71   PtrQueueSet* qset() { return _qset; }
  72   bool is_permanent() const { return _permanent; }
  73 
  74   // Process queue entries and release resources, if not permanent.
  75   void flush_impl();
  76 

  77   // Initialize this queue to contain a null buffer, and be part of the
  78   // given PtrQueueSet.
  79   PtrQueue(PtrQueueSet* qset, bool permanent = false, bool active = false);
  80 
  81   // Requires queue flushed or permanent.
  82   ~PtrQueue();
  83 
  84 public:
  85 
  86   // Associate a lock with a ptr queue.
  87   void set_lock(Mutex* lock) { _lock = lock; }
  88 
  89   void reset() { if (_buf != NULL) _index = _sz; }
  90 
  91   void enqueue(volatile void* ptr) {
  92     enqueue((void*)(ptr));
  93   }
  94 
  95   // Enqueues the given "obj".
  96   void enqueue(void* ptr) {
  97     if (!_active) return;
  98     else enqueue_known_active(ptr);
  99   }
 100 
 101   // This method is called when we're doing the zero index handling
 102   // and gives a chance to the queues to do any pre-enqueueing
 103   // processing they might want to do on the buffer. It should return
 104   // true if the buffer should be enqueued, or false if enough
 105   // entries were cleared from it so that it can be re-used. It should


 117   }
 118 
 119   bool is_empty() {
 120     return _buf == NULL || _sz == _index;
 121   }
 122 
 123   // Set the "active" property of the queue to "b".  An enqueue to an
 124   // inactive thread is a no-op.  Setting a queue to inactive resets its
 125   // log to the empty state.
 126   void set_active(bool b) {
 127     _active = b;
 128     if (!b && _buf != NULL) {
 129       _index = _sz;
 130     } else if (b && _buf != NULL) {
 131       assert(_index == _sz, "invariant: queues are empty when activated.");
 132     }
 133   }
 134 
 135   bool is_active() { return _active; }
 136 
 137   static size_t byte_index_to_index(size_t ind) {
 138     assert((ind % sizeof(void*)) == 0, "Invariant.");
 139     return ind / sizeof(void*);




 140   }
 141 
 142   // To support compiler.
 143   static ByteSize byte_offset_of_index() {
 144     return byte_offset_of(PtrQueue, _index);
 145   }
 146   static ByteSize byte_width_of_index() { return in_ByteSize(sizeof(size_t)); }
 147 
 148   static ByteSize byte_offset_of_buf() {
 149     return byte_offset_of(PtrQueue, _buf);
 150   }
 151   static ByteSize byte_width_of_buf() { return in_ByteSize(sizeof(void*)); }
 152 
 153   static ByteSize byte_offset_of_active() {
 154     return byte_offset_of(PtrQueue, _active);
 155   }
 156   static ByteSize byte_width_of_active() { return in_ByteSize(sizeof(bool)); }
 157 
 158 };
 159 


 230   bool _notify_when_complete;
 231 
 232   // Maximum number of elements allowed on completed queue: after that,
 233   // enqueuer does the work itself.  Zero indicates no maximum.
 234   int _max_completed_queue;
 235   int _completed_queue_padding;
 236 
 237   int completed_buffers_list_length();
 238   void assert_completed_buffer_list_len_correct_locked();
 239   void assert_completed_buffer_list_len_correct();
 240 
 241 protected:
 242   // A mutator thread does the the work of processing a buffer.
 243   // Returns "true" iff the work is complete (and the buffer may be
 244   // deallocated).
 245   virtual bool mut_process_buffer(void** buf) {
 246     ShouldNotReachHere();
 247     return false;
 248   }
 249 

 250   // Create an empty ptr queue set.
 251   PtrQueueSet(bool notify_when_complete = false);
 252   ~PtrQueueSet();
 253 
 254   // Because of init-order concerns, we can't pass these as constructor
 255   // arguments.
 256   void initialize(Monitor* cbl_mon,
 257                   Mutex* fl_lock,
 258                   int process_completed_threshold,
 259                   int max_completed_queue,
 260                   PtrQueueSet *fl_owner = NULL);
 261 
 262 public:






 263 
 264   // Return an empty array of size _sz (required to be non-zero).
 265   void** allocate_buffer();
 266 
 267   // Return an empty buffer to the free list.  The "buf" argument is
 268   // required to be a pointer to the head of an array of length "_sz".
 269   void deallocate_buffer(void** buf);
 270 
 271   // Declares that "buf" is a complete buffer.
 272   void enqueue_complete_buffer(void** buf, size_t index = 0);
 273 
 274   // To be invoked by the mutator.
 275   bool process_or_enqueue_complete_buffer(void** buf);
 276 
 277   bool completed_buffers_exist_dirty() {
 278     return _n_completed_buffers > 0;
 279   }
 280 
 281   bool process_completed_buffers() { return _process_completed; }
 282   void set_process_completed(bool x) { _process_completed = x; }
 283 
 284   bool is_active() { return _all_active; }


< prev index next >