16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_G1_PTRQUEUE_HPP
26 #define SHARE_VM_GC_G1_PTRQUEUE_HPP
27
28 #include "memory/allocation.hpp"
29 #include "utilities/sizes.hpp"
30
31 // There are various techniques that require threads to be able to log
32 // addresses. For example, a generational write barrier might log
33 // the addresses of modified old-generation objects. This type supports
34 // this operation.
35
36 class PtrQueueSet;
37 class PtrQueue VALUE_OBJ_CLASS_SPEC {
38 friend class VMStructs;
39
40 // Noncopyable - not defined.
41 PtrQueue(const PtrQueue&);
42 PtrQueue& operator=(const PtrQueue&);
43
44 // The ptr queue set to which this queue belongs.
45 PtrQueueSet* const _qset;
46
47 // Whether updates should be logged.
48 bool _active;
49
50 // If true, the queue is permanent, and doesn't need to deallocate
51 // its buffer in the destructor (since that obtains a lock which may not
52 // be legally locked by then.
53 const bool _permanent;
54
55 protected:
87
88 void enqueue(volatile void* ptr) {
89 enqueue((void*)(ptr));
90 }
91
92 // Enqueues the given "obj".
93 void enqueue(void* ptr) {
94 if (!_active) return;
95 else enqueue_known_active(ptr);
96 }
97
98 // This method is called when we're doing the zero index handling
99 // and gives a chance to the queues to do any pre-enqueueing
100 // processing they might want to do on the buffer. It should return
101 // true if the buffer should be enqueued, or false if enough
102 // entries were cleared from it so that it can be re-used. It should
103 // not return false if the buffer is still full (otherwise we can
104 // get into an infinite loop).
105 virtual bool should_enqueue_buffer() { return true; }
106 void handle_zero_index();
107 void locking_enqueue_completed_buffer(void** buf);
108
109 void enqueue_known_active(void* ptr);
110
111 size_t size() {
112 assert(_sz >= _index, "Invariant.");
113 return _buf == NULL ? 0 : _sz - _index;
114 }
115
116 bool is_empty() {
117 return _buf == NULL || _sz == _index;
118 }
119
120 // Set the "active" property of the queue to "b". An enqueue to an
121 // inactive thread is a no-op. Setting a queue to inactive resets its
122 // log to the empty state.
123 void set_active(bool b) {
124 _active = b;
125 if (!b && _buf != NULL) {
126 _index = _sz;
127 } else if (b && _buf != NULL) {
128 assert(_index == _sz, "invariant: queues are empty when activated.");
129 }
130 }
131
132 bool is_active() { return _active; }
133
134 static size_t byte_index_to_index(size_t ind) {
135 assert((ind % sizeof(void*)) == 0, "Invariant.");
136 return ind / sizeof(void*);
137 }
138
139 // To support compiler.
140
141 protected:
142 template<typename Derived>
143 static ByteSize byte_offset_of_index() {
144 return byte_offset_of(Derived, _index);
145 }
146
147 static ByteSize byte_width_of_index() { return in_ByteSize(sizeof(size_t)); }
148
149 template<typename Derived>
150 static ByteSize byte_offset_of_buf() {
151 return byte_offset_of(Derived, _buf);
152 }
153
154 static ByteSize byte_width_of_buf() { return in_ByteSize(sizeof(void*)); }
155
156 template<typename Derived>
157 static ByteSize byte_offset_of_active() {
158 return byte_offset_of(Derived, _active);
169
170 BufferNode() : _index(0), _next(NULL) { }
171 ~BufferNode() { }
172
173 static size_t buffer_offset() {
174 return offset_of(BufferNode, _buffer);
175 }
176
177 public:
178 BufferNode* next() const { return _next; }
179 void set_next(BufferNode* n) { _next = n; }
180 size_t index() const { return _index; }
181 void set_index(size_t i) { _index = i; }
182
183 // Allocate a new BufferNode with the "buffer" having size bytes.
184 static BufferNode* allocate(size_t byte_size);
185
186 // Free a BufferNode.
187 static void deallocate(BufferNode* node);
188
189 // Return the BufferNode containing the buffer.
190 static BufferNode* make_node_from_buffer(void** buffer) {
191 return reinterpret_cast<BufferNode*>(
192 reinterpret_cast<char*>(buffer) - buffer_offset());
193 }
194
195 // Return the buffer for node.
196 static void** make_buffer_from_node(BufferNode *node) {
197 // &_buffer[0] might lead to index out of bounds warnings.
198 return reinterpret_cast<void**>(
199 reinterpret_cast<char*>(node) + buffer_offset());
200 }
201 };
202
203 // A PtrQueueSet represents resources common to a set of pointer queues.
204 // In particular, the individual queues allocate buffers from this shared
205 // set, and return completed buffers to the set.
206 // All these variables are are protected by the TLOQ_CBL_mon. XXX ???
207 class PtrQueueSet VALUE_OBJ_CLASS_SPEC {
208 protected:
209 Monitor* _cbl_mon; // Protects the fields below.
210 BufferNode* _completed_buffers_head;
211 BufferNode* _completed_buffers_tail;
212 size_t _n_completed_buffers;
226 size_t _sz;
227
228 bool _all_active;
229
230 // If true, notify_all on _cbl_mon when the threshold is reached.
231 bool _notify_when_complete;
232
233 // Maximum number of elements allowed on completed queue: after that,
234 // enqueuer does the work itself. Zero indicates no maximum.
235 int _max_completed_queue;
236 size_t _completed_queue_padding;
237
238 size_t completed_buffers_list_length();
239 void assert_completed_buffer_list_len_correct_locked();
240 void assert_completed_buffer_list_len_correct();
241
242 protected:
243 // A mutator thread does the the work of processing a buffer.
244 // Returns "true" iff the work is complete (and the buffer may be
245 // deallocated).
246 virtual bool mut_process_buffer(void** buf) {
247 ShouldNotReachHere();
248 return false;
249 }
250
251 // Create an empty ptr queue set.
252 PtrQueueSet(bool notify_when_complete = false);
253 ~PtrQueueSet();
254
255 // Because of init-order concerns, we can't pass these as constructor
256 // arguments.
257 void initialize(Monitor* cbl_mon,
258 Mutex* fl_lock,
259 int process_completed_threshold,
260 int max_completed_queue,
261 PtrQueueSet *fl_owner = NULL);
262
263 public:
264
265 // Return an empty array of size _sz (required to be non-zero).
266 void** allocate_buffer();
267
268 // Return an empty buffer to the free list. The "buf" argument is
269 // required to be a pointer to the head of an array of length "_sz".
270 void deallocate_buffer(void** buf);
271
272 // Declares that "buf" is a complete buffer.
273 void enqueue_complete_buffer(void** buf, size_t index = 0);
274
275 // To be invoked by the mutator.
276 bool process_or_enqueue_complete_buffer(void** buf);
277
278 bool completed_buffers_exist_dirty() {
279 return _n_completed_buffers > 0;
280 }
281
282 bool process_completed_buffers() { return _process_completed; }
283 void set_process_completed(bool x) { _process_completed = x; }
284
285 bool is_active() { return _all_active; }
286
287 // Set the buffer size. Should be called before any "enqueue" operation
288 // can be called. And should only be called once.
289 void set_buffer_size(size_t sz);
290
291 // Get the buffer size.
292 size_t buffer_size() { return _sz; }
293
294 // Get/Set the number of completed buffers that triggers log processing.
295 void set_process_completed_threshold(int sz) { _process_completed_threshold = sz; }
296 int process_completed_threshold() const { return _process_completed_threshold; }
|
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_G1_PTRQUEUE_HPP
26 #define SHARE_VM_GC_G1_PTRQUEUE_HPP
27
28 #include "memory/allocation.hpp"
29 #include "utilities/sizes.hpp"
30
31 // There are various techniques that require threads to be able to log
32 // addresses. For example, a generational write barrier might log
33 // the addresses of modified old-generation objects. This type supports
34 // this operation.
35
36 class BufferNode;
37 class PtrQueueSet;
38 class PtrQueue VALUE_OBJ_CLASS_SPEC {
39 friend class VMStructs;
40
41 // Noncopyable - not defined.
42 PtrQueue(const PtrQueue&);
43 PtrQueue& operator=(const PtrQueue&);
44
45 // The ptr queue set to which this queue belongs.
46 PtrQueueSet* const _qset;
47
48 // Whether updates should be logged.
49 bool _active;
50
51 // If true, the queue is permanent, and doesn't need to deallocate
52 // its buffer in the destructor (since that obtains a lock which may not
53 // be legally locked by then.
54 const bool _permanent;
55
56 protected:
88
89 void enqueue(volatile void* ptr) {
90 enqueue((void*)(ptr));
91 }
92
93 // Enqueues the given "obj".
94 void enqueue(void* ptr) {
95 if (!_active) return;
96 else enqueue_known_active(ptr);
97 }
98
99 // This method is called when we're doing the zero index handling
100 // and gives a chance to the queues to do any pre-enqueueing
101 // processing they might want to do on the buffer. It should return
102 // true if the buffer should be enqueued, or false if enough
103 // entries were cleared from it so that it can be re-used. It should
104 // not return false if the buffer is still full (otherwise we can
105 // get into an infinite loop).
106 virtual bool should_enqueue_buffer() { return true; }
107 void handle_zero_index();
108 void locking_enqueue_completed_buffer(BufferNode* node);
109
110 void enqueue_known_active(void* ptr);
111
112 size_t size() {
113 assert(_sz >= _index, "Invariant.");
114 return _buf == NULL ? 0 : _sz - _index;
115 }
116
117 bool is_empty() {
118 return _buf == NULL || _sz == _index;
119 }
120
121 // Set the "active" property of the queue to "b". An enqueue to an
122 // inactive thread is a no-op. Setting a queue to inactive resets its
123 // log to the empty state.
124 void set_active(bool b) {
125 _active = b;
126 if (!b && _buf != NULL) {
127 _index = _sz;
128 } else if (b && _buf != NULL) {
129 assert(_index == _sz, "invariant: queues are empty when activated.");
130 }
131 }
132
133 bool is_active() { return _active; }
134
135 static size_t byte_index_to_index(size_t ind) {
136 assert((ind % sizeof(void*)) == 0, "Invariant.");
137 return ind / sizeof(void*);
138 }
139
140 static size_t index_to_byte_index(size_t ind) {
141 return ind * sizeof(void*);
142 }
143
144 // To support compiler.
145
146 protected:
147 template<typename Derived>
148 static ByteSize byte_offset_of_index() {
149 return byte_offset_of(Derived, _index);
150 }
151
152 static ByteSize byte_width_of_index() { return in_ByteSize(sizeof(size_t)); }
153
154 template<typename Derived>
155 static ByteSize byte_offset_of_buf() {
156 return byte_offset_of(Derived, _buf);
157 }
158
159 static ByteSize byte_width_of_buf() { return in_ByteSize(sizeof(void*)); }
160
161 template<typename Derived>
162 static ByteSize byte_offset_of_active() {
163 return byte_offset_of(Derived, _active);
174
175 BufferNode() : _index(0), _next(NULL) { }
176 ~BufferNode() { }
177
178 static size_t buffer_offset() {
179 return offset_of(BufferNode, _buffer);
180 }
181
182 public:
183 BufferNode* next() const { return _next; }
184 void set_next(BufferNode* n) { _next = n; }
185 size_t index() const { return _index; }
186 void set_index(size_t i) { _index = i; }
187
188 // Allocate a new BufferNode with the "buffer" having size bytes.
189 static BufferNode* allocate(size_t byte_size);
190
191 // Free a BufferNode.
192 static void deallocate(BufferNode* node);
193
194 // Return the BufferNode containing the buffer, after setting its index.
195 static BufferNode* make_node_from_buffer(void** buffer, size_t index) {
196 BufferNode* node =
197 reinterpret_cast<BufferNode*>(
198 reinterpret_cast<char*>(buffer) - buffer_offset());
199 node->set_index(index);
200 return node;
201 }
202
203 // Return the buffer for node.
204 static void** make_buffer_from_node(BufferNode *node) {
205 // &_buffer[0] might lead to index out of bounds warnings.
206 return reinterpret_cast<void**>(
207 reinterpret_cast<char*>(node) + buffer_offset());
208 }
209 };
210
211 // A PtrQueueSet represents resources common to a set of pointer queues.
212 // In particular, the individual queues allocate buffers from this shared
213 // set, and return completed buffers to the set.
214 // All these variables are are protected by the TLOQ_CBL_mon. XXX ???
215 class PtrQueueSet VALUE_OBJ_CLASS_SPEC {
216 protected:
217 Monitor* _cbl_mon; // Protects the fields below.
218 BufferNode* _completed_buffers_head;
219 BufferNode* _completed_buffers_tail;
220 size_t _n_completed_buffers;
234 size_t _sz;
235
236 bool _all_active;
237
238 // If true, notify_all on _cbl_mon when the threshold is reached.
239 bool _notify_when_complete;
240
241 // Maximum number of elements allowed on completed queue: after that,
242 // enqueuer does the work itself. Zero indicates no maximum.
243 int _max_completed_queue;
244 size_t _completed_queue_padding;
245
246 size_t completed_buffers_list_length();
247 void assert_completed_buffer_list_len_correct_locked();
248 void assert_completed_buffer_list_len_correct();
249
250 protected:
251 // A mutator thread does the the work of processing a buffer.
252 // Returns "true" iff the work is complete (and the buffer may be
253 // deallocated).
254 virtual bool mut_process_buffer(BufferNode* node) {
255 ShouldNotReachHere();
256 return false;
257 }
258
259 // Create an empty ptr queue set.
260 PtrQueueSet(bool notify_when_complete = false);
261 ~PtrQueueSet();
262
263 // Because of init-order concerns, we can't pass these as constructor
264 // arguments.
265 void initialize(Monitor* cbl_mon,
266 Mutex* fl_lock,
267 int process_completed_threshold,
268 int max_completed_queue,
269 PtrQueueSet *fl_owner = NULL);
270
271 public:
272
273 // Return an empty array of size _sz (required to be non-zero).
274 void** allocate_buffer();
275
276 // Return an empty buffer to the free list. The "buf" argument is
277 // required to be a pointer to the head of an array of length "_sz".
278 void deallocate_buffer(BufferNode* node);
279
280 // Declares that "buf" is a complete buffer.
281 void enqueue_complete_buffer(BufferNode* node);
282
283 // To be invoked by the mutator.
284 bool process_or_enqueue_complete_buffer(BufferNode* node);
285
286 bool completed_buffers_exist_dirty() {
287 return _n_completed_buffers > 0;
288 }
289
290 bool process_completed_buffers() { return _process_completed; }
291 void set_process_completed(bool x) { _process_completed = x; }
292
293 bool is_active() { return _all_active; }
294
295 // Set the buffer size. Should be called before any "enqueue" operation
296 // can be called. And should only be called once.
297 void set_buffer_size(size_t sz);
298
299 // Get the buffer size.
300 size_t buffer_size() { return _sz; }
301
302 // Get/Set the number of completed buffers that triggers log processing.
303 void set_process_completed_threshold(int sz) { _process_completed_threshold = sz; }
304 int process_completed_threshold() const { return _process_completed_threshold; }
|