< prev index next >

src/hotspot/share/gc/g1/g1DirtyCardQueue.hpp

Print this page
rev 55389 : [mq]: satb_lock_free


  59 
  60   // Process queue entries and release resources.
  61   void flush() { flush_impl(); }
  62 
  63   inline G1DirtyCardQueueSet* dirty_card_qset() const;
  64 
  65   // Compiler support.
  66   static ByteSize byte_offset_of_index() {
  67     return PtrQueue::byte_offset_of_index<G1DirtyCardQueue>();
  68   }
  69   using PtrQueue::byte_width_of_index;
  70 
  71   static ByteSize byte_offset_of_buf() {
  72     return PtrQueue::byte_offset_of_buf<G1DirtyCardQueue>();
  73   }
  74   using PtrQueue::byte_width_of_buf;
  75 
  76 };
  77 
  78 class G1DirtyCardQueueSet: public PtrQueueSet {















  79   // Apply the closure to the elements of "node" from it's index to
  80   // buffer_size.  If all closure applications return true, then
  81   // returns true.  Stops processing after the first closure
  82   // application that returns false, and returns false from this
  83   // function.  If "consume" is true, the node's index is updated to
  84   // exclude the processed elements, e.g. up to the element for which
  85   // the closure returned false.
  86   bool apply_closure_to_buffer(G1CardTableEntryClosure* cl,
  87                                BufferNode* node,
  88                                bool consume,
  89                                uint worker_i = 0);
  90 
  91   // If there are more than stop_at completed buffers, pop one, apply
  92   // the specified closure to its active elements, and return true.
  93   // Otherwise return false.
  94   //
  95   // A completely processed buffer is freed.  However, if a closure
  96   // invocation returns false, processing is stopped and the partially
  97   // processed buffer (with its index updated to exclude the processed
  98   // elements, e.g. up to the element for which the closure returned
  99   // false) is returned to the completed buffer set.
 100   //
 101   // If during_pause is true, stop_at must be zero, and the closure
 102   // must never return false.
 103   bool apply_closure_to_completed_buffer(G1CardTableEntryClosure* cl,
 104                                          uint worker_i,
 105                                          size_t stop_at,
 106                                          bool during_pause);
 107 
 108   bool mut_process_buffer(BufferNode* node);
 109 
 110   // If the queue contains more buffers than configured here, the
 111   // mutator must start doing some of the concurrent refinement work,
 112   size_t _max_completed_buffers;
 113   size_t _completed_buffers_padding;
 114   static const size_t MaxCompletedBuffersUnlimited = ~size_t(0);
 115 
 116   G1FreeIdSet* _free_ids;
 117 
 118   // The number of completed buffers processed by mutator and rs thread,
 119   // respectively.
 120   jint _processed_buffers_mut;
 121   jint _processed_buffers_rs_thread;
 122 
 123   // Current buffer node used for parallel iteration.
 124   BufferNode* volatile _cur_par_buffer_node;
 125 
 126 public:
 127   G1DirtyCardQueueSet(bool notify_when_complete = true);
 128   ~G1DirtyCardQueueSet();
 129 
 130   void initialize(Monitor* cbl_mon,
 131                   BufferNode::Allocator* allocator,
 132                   bool init_free_ids = false);
 133 
 134   // The number of parallel ids that can be claimed to allow collector or
 135   // mutator threads to do card-processing work.
 136   static uint num_par_ids();
 137 
 138   static void handle_zero_index_for_thread(Thread* t);
 139 
 140   // Either process the entire buffer and return true, or enqueue the
 141   // buffer and return false.  If the buffer is completely processed,
 142   // it can be reused in place.
 143   bool process_or_enqueue_completed_buffer(BufferNode* node);
 144 




























 145   // Apply G1RefineCardConcurrentlyClosure to completed buffers until there are stop_at
 146   // completed buffers remaining.
 147   bool refine_completed_buffer_concurrently(uint worker_i, size_t stop_at);
 148 
 149   // Apply the given closure to all completed buffers. The given closure's do_card_ptr
 150   // must never return false. Must only be called during GC.
 151   bool apply_closure_during_gc(G1CardTableEntryClosure* cl, uint worker_i);
 152 
 153   void reset_for_par_iteration() { _cur_par_buffer_node = completed_buffers_head(); }
 154   // Applies the current closure to all completed buffers, non-consumptively.
 155   // Can be used in parallel, all callers using the iteration state initialized
 156   // by reset_for_par_iteration.
 157   void par_apply_closure_to_all_completed_buffers(G1CardTableEntryClosure* cl);
 158 
 159   // If a full collection is happening, reset partial logs, and ignore
 160   // completed ones: the full collection will make them all irrelevant.
 161   void abandon_logs();
 162 
 163   // If any threads have partial logs, add them to the global list of logs.
 164   void concatenate_logs();
 165 
 166   void set_max_completed_buffers(size_t m) {
 167     _max_completed_buffers = m;
 168   }
 169   size_t max_completed_buffers() const {
 170     return _max_completed_buffers;
 171   }
 172 
 173   void set_completed_buffers_padding(size_t padding) {
 174     _completed_buffers_padding = padding;
 175   }
 176   size_t completed_buffers_padding() const {
 177     return _completed_buffers_padding;
 178   }
 179 


  59 
  60   // Process queue entries and release resources.
  61   void flush() { flush_impl(); }
  62 
  63   inline G1DirtyCardQueueSet* dirty_card_qset() const;
  64 
  65   // Compiler support.
  66   static ByteSize byte_offset_of_index() {
  67     return PtrQueue::byte_offset_of_index<G1DirtyCardQueue>();
  68   }
  69   using PtrQueue::byte_width_of_index;
  70 
  71   static ByteSize byte_offset_of_buf() {
  72     return PtrQueue::byte_offset_of_buf<G1DirtyCardQueue>();
  73   }
  74   using PtrQueue::byte_width_of_buf;
  75 
  76 };
  77 
  78 class G1DirtyCardQueueSet: public PtrQueueSet {
  79   Monitor* _cbl_mon;  // Protects the fields below.
  80   BufferNode* _completed_buffers_head;
  81   BufferNode* _completed_buffers_tail;
  82   volatile size_t _n_completed_buffers;
  83 
  84   size_t _process_completed_buffers_threshold;
  85   volatile bool _process_completed_buffers;
  86 
  87   // If true, notify_all on _cbl_mon when the threshold is reached.
  88   bool _notify_when_complete;
  89 
  90   void assert_completed_buffers_list_len_correct_locked() NOT_DEBUG_RETURN;
  91 
  92   void abandon_completed_buffers();
  93 
  94   // Apply the closure to the elements of "node" from it's index to
  95   // buffer_size.  If all closure applications return true, then
  96   // returns true.  Stops processing after the first closure
  97   // application that returns false, and returns false from this
  98   // function.  If "consume" is true, the node's index is updated to
  99   // exclude the processed elements, e.g. up to the element for which
 100   // the closure returned false.
 101   bool apply_closure_to_buffer(G1CardTableEntryClosure* cl,
 102                                BufferNode* node,
 103                                bool consume,
 104                                uint worker_i = 0);
 105 
 106   // If there are more than stop_at completed buffers, pop one, apply
 107   // the specified closure to its active elements, and return true.
 108   // Otherwise return false.
 109   //
 110   // A completely processed buffer is freed.  However, if a closure
 111   // invocation returns false, processing is stopped and the partially
 112   // processed buffer (with its index updated to exclude the processed
 113   // elements, e.g. up to the element for which the closure returned
 114   // false) is returned to the completed buffer set.
 115   //
 116   // If during_pause is true, stop_at must be zero, and the closure
 117   // must never return false.
 118   bool apply_closure_to_completed_buffer(G1CardTableEntryClosure* cl,
 119                                          uint worker_i,
 120                                          size_t stop_at,
 121                                          bool during_pause);
 122 
 123   bool mut_process_buffer(BufferNode* node);
 124 
 125   // If the queue contains more buffers than configured here, the
 126   // mutator must start doing some of the concurrent refinement work,
 127   size_t _max_completed_buffers;
 128   size_t _completed_buffers_padding;
 129   static const size_t MaxCompletedBuffersUnlimited = SIZE_MAX;
 130 
 131   G1FreeIdSet* _free_ids;
 132 
 133   // The number of completed buffers processed by mutator and rs thread,
 134   // respectively.
 135   jint _processed_buffers_mut;
 136   jint _processed_buffers_rs_thread;
 137 
 138   // Current buffer node used for parallel iteration.
 139   BufferNode* volatile _cur_par_buffer_node;
 140 
 141 public:
 142   G1DirtyCardQueueSet(bool notify_when_complete = true);
 143   ~G1DirtyCardQueueSet();
 144 
 145   void initialize(Monitor* cbl_mon,
 146                   BufferNode::Allocator* allocator,
 147                   bool init_free_ids = false);
 148 
 149   // The number of parallel ids that can be claimed to allow collector or
 150   // mutator threads to do card-processing work.
 151   static uint num_par_ids();
 152 
 153   static void handle_zero_index_for_thread(Thread* t);
 154 
 155   // Either process the entire buffer and return true, or enqueue the
 156   // buffer and return false.  If the buffer is completely processed,
 157   // it can be reused in place.
 158   bool process_or_enqueue_completed_buffer(BufferNode* node);
 159 
 160   virtual void enqueue_completed_buffer(BufferNode* node);
 161 
 162   // If the number of completed buffers is > stop_at, then remove and
 163   // return a completed buffer from the list.  Otherwise, return NULL.
 164   BufferNode* get_completed_buffer(size_t stop_at = 0);
 165 
 166   // The number of buffers in the list.  Racy...
 167   size_t completed_buffers_num() const { return _n_completed_buffers; }
 168 
 169   bool process_completed_buffers() { return _process_completed_buffers; }
 170   void set_process_completed_buffers(bool x) { _process_completed_buffers = x; }
 171 
 172   // Get/Set the number of completed buffers that triggers log processing.
 173   // Log processing should be done when the number of buffers exceeds the
 174   // threshold.
 175   void set_process_completed_buffers_threshold(size_t sz) {
 176     _process_completed_buffers_threshold = sz;
 177   }
 178   size_t process_completed_buffers_threshold() const {
 179     return _process_completed_buffers_threshold;
 180   }
 181   static const size_t ProcessCompletedBuffersThresholdNever = SIZE_MAX;
 182 
 183   // Notify the consumer if the number of buffers crossed the threshold
 184   void notify_if_necessary();
 185 
 186   void merge_bufferlists(G1DirtyCardQueueSet* src);
 187 
 188   // Apply G1RefineCardConcurrentlyClosure to completed buffers until there are stop_at
 189   // completed buffers remaining.
 190   bool refine_completed_buffer_concurrently(uint worker_i, size_t stop_at);
 191 
 192   // Apply the given closure to all completed buffers. The given closure's do_card_ptr
 193   // must never return false. Must only be called during GC.
 194   bool apply_closure_during_gc(G1CardTableEntryClosure* cl, uint worker_i);
 195 
 196   void reset_for_par_iteration() { _cur_par_buffer_node = _completed_buffers_head; }
 197   // Applies the current closure to all completed buffers, non-consumptively.
 198   // Can be used in parallel, all callers using the iteration state initialized
 199   // by reset_for_par_iteration.
 200   void par_apply_closure_to_all_completed_buffers(G1CardTableEntryClosure* cl);
 201 
 202   // If a full collection is happening, reset partial logs, and release
 203   // completed ones: the full collection will make them all irrelevant.
 204   void abandon_logs();
 205 
 206   // If any threads have partial logs, add them to the global list of logs.
 207   void concatenate_logs();
 208 
 209   void set_max_completed_buffers(size_t m) {
 210     _max_completed_buffers = m;
 211   }
 212   size_t max_completed_buffers() const {
 213     return _max_completed_buffers;
 214   }
 215 
 216   void set_completed_buffers_padding(size_t padding) {
 217     _completed_buffers_padding = padding;
 218   }
 219   size_t completed_buffers_padding() const {
 220     return _completed_buffers_padding;
 221   }
 222 
< prev index next >