8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/g1/dirtyCardQueue.hpp"
27 #include "gc/g1/g1CollectedHeap.inline.hpp"
28 #include "gc/g1/heapRegionRemSet.hpp"
29 #include "gc/shared/workgroup.hpp"
30 #include "runtime/atomic.hpp"
31 #include "runtime/mutexLocker.hpp"
32 #include "runtime/safepoint.hpp"
33 #include "runtime/thread.inline.hpp"
34
35 // Represents a set of free small integer ids.
36 class FreeIdSet : public CHeapObj<mtGC> {
37 enum {
38 end_of_list = UINT_MAX,
39 claimed = UINT_MAX - 1
40 };
41
42 uint _size;
43 Monitor* _mon;
44
45 uint* _ids;
46 uint _hd;
47 uint _waiters;
48 uint _claimed;
49
50 public:
51 FreeIdSet(uint size, Monitor* mon);
52 ~FreeIdSet();
53
54 // Returns an unclaimed parallel id (waiting for one to be released if
95 _claimed--;
96 if (_waiters > 0) {
97 _mon->notify_all();
98 }
99 }
100
101 DirtyCardQueue::DirtyCardQueue(DirtyCardQueueSet* qset, bool permanent) :
102 // Dirty card queues are always active, so we create them with their
103 // active field set to true.
104 PtrQueue(qset, permanent, true /* active */)
105 { }
106
107 DirtyCardQueue::~DirtyCardQueue() {
108 if (!is_permanent()) {
109 flush();
110 }
111 }
112
113 DirtyCardQueueSet::DirtyCardQueueSet(bool notify_when_complete) :
114 PtrQueueSet(notify_when_complete),
115 _mut_process_closure(NULL),
116 _shared_dirty_card_queue(this, true /* permanent */),
117 _free_ids(NULL),
118 _processed_buffers_mut(0), _processed_buffers_rs_thread(0)
119 {
120 _all_active = true;
121 }
122
123 // Determines how many mutator threads can process the buffers in parallel.
124 uint DirtyCardQueueSet::num_par_ids() {
125 return (uint)os::initial_active_processor_count();
126 }
127
128 void DirtyCardQueueSet::initialize(CardTableEntryClosure* cl,
129 Monitor* cbl_mon,
130 Mutex* fl_lock,
131 int process_completed_threshold,
132 int max_completed_queue,
133 Mutex* lock,
134 DirtyCardQueueSet* fl_owner,
135 bool init_free_ids) {
136 _mut_process_closure = cl;
137 PtrQueueSet::initialize(cbl_mon,
138 fl_lock,
139 process_completed_threshold,
140 max_completed_queue,
141 fl_owner);
142 set_buffer_size(G1UpdateBufferSize);
143 _shared_dirty_card_queue.set_lock(lock);
144 if (init_free_ids) {
145 _free_ids = new FreeIdSet(num_par_ids(), _cbl_mon);
146 }
147 }
148
149 void DirtyCardQueueSet::handle_zero_index_for_thread(JavaThread* t) {
150 t->dirty_card_queue().handle_zero_index();
151 }
152
153 bool DirtyCardQueueSet::apply_closure_to_buffer(CardTableEntryClosure* cl,
154 BufferNode* node,
155 bool consume,
156 uint worker_i) {
175 }
176
177 #ifndef ASSERT
178 #define assert_fully_consumed(node, buffer_size)
179 #else
180 #define assert_fully_consumed(node, buffer_size) \
181 do { \
182 size_t _afc_index = (node)->index(); \
183 size_t _afc_size = (buffer_size); \
184 assert(_afc_index == _afc_size, \
185 "Buffer was not fully consumed as claimed: index: " \
186 SIZE_FORMAT ", size: " SIZE_FORMAT, \
187 _afc_index, _afc_size); \
188 } while (0)
189 #endif // ASSERT
190
191 bool DirtyCardQueueSet::mut_process_buffer(BufferNode* node) {
192 guarantee(_free_ids != NULL, "must be");
193
194 uint worker_i = _free_ids->claim_par_id(); // temporarily claim an id
195 bool result = apply_closure_to_buffer(_mut_process_closure, node, true, worker_i);
196 _free_ids->release_par_id(worker_i); // release the id
197
198 if (result) {
199 assert_fully_consumed(node, buffer_size());
200 Atomic::inc(&_processed_buffers_mut);
201 }
202 return result;
203 }
204
205
206 BufferNode* DirtyCardQueueSet::get_completed_buffer(size_t stop_at) {
207 BufferNode* nd = NULL;
208 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
209
210 if (_n_completed_buffers <= stop_at) {
211 _process_completed = false;
212 return NULL;
213 }
214
215 if (_completed_buffers_head != NULL) {
216 nd = _completed_buffers_head;
217 assert(_n_completed_buffers > 0, "Invariant");
218 _completed_buffers_head = nd->next();
219 _n_completed_buffers--;
220 if (_completed_buffers_head == NULL) {
221 assert(_n_completed_buffers == 0, "Invariant");
222 _completed_buffers_tail = NULL;
223 }
224 }
225 DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
226 return nd;
227 }
228
229 bool DirtyCardQueueSet::apply_closure_to_completed_buffer(CardTableEntryClosure* cl,
230 uint worker_i,
231 size_t stop_at,
232 bool during_pause) {
233 assert(!during_pause || stop_at == 0, "Should not leave any completed buffers during a pause");
234 BufferNode* nd = get_completed_buffer(stop_at);
235 if (nd == NULL) {
236 return false;
237 } else {
238 if (apply_closure_to_buffer(cl, nd, true, worker_i)) {
239 assert_fully_consumed(nd, buffer_size());
240 // Done with fully processed buffer.
241 deallocate_buffer(nd);
242 Atomic::inc(&_processed_buffers_rs_thread);
243 } else {
244 // Return partially processed buffer to the queue.
245 guarantee(!during_pause, "Should never stop early");
246 enqueue_complete_buffer(nd);
|
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/g1/dirtyCardQueue.hpp"
27 #include "gc/g1/g1CollectedHeap.inline.hpp"
28 #include "gc/g1/g1RemSet.hpp"
29 #include "gc/g1/heapRegionRemSet.hpp"
30 #include "gc/shared/workgroup.hpp"
31 #include "runtime/atomic.hpp"
32 #include "runtime/mutexLocker.hpp"
33 #include "runtime/safepoint.hpp"
34 #include "runtime/thread.inline.hpp"
35
36 // Closure used for updating remembered sets and recording references that
37 // point into the collection set while the mutator is running.
38 // Assumed to be only executed concurrently with the mutator. Yields via
39 // SuspendibleThreadSet after every card.
40 class G1RefineCardConcurrentlyClosure: public CardTableEntryClosure {
41 public:
42 bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
43 G1CollectedHeap::heap()->g1_rem_set()->refine_card_concurrently(card_ptr, worker_i);
44
45 if (SuspendibleThreadSet::should_yield()) {
46 // Caller will actually yield.
47 return false;
48 }
49 // Otherwise, we finished successfully; return true.
50 return true;
51 }
52 };
53
54 // Represents a set of free small integer ids.
55 class FreeIdSet : public CHeapObj<mtGC> {
56 enum {
57 end_of_list = UINT_MAX,
58 claimed = UINT_MAX - 1
59 };
60
61 uint _size;
62 Monitor* _mon;
63
64 uint* _ids;
65 uint _hd;
66 uint _waiters;
67 uint _claimed;
68
69 public:
70 FreeIdSet(uint size, Monitor* mon);
71 ~FreeIdSet();
72
73 // Returns an unclaimed parallel id (waiting for one to be released if
114 _claimed--;
115 if (_waiters > 0) {
116 _mon->notify_all();
117 }
118 }
119
120 DirtyCardQueue::DirtyCardQueue(DirtyCardQueueSet* qset, bool permanent) :
121 // Dirty card queues are always active, so we create them with their
122 // active field set to true.
123 PtrQueue(qset, permanent, true /* active */)
124 { }
125
126 DirtyCardQueue::~DirtyCardQueue() {
127 if (!is_permanent()) {
128 flush();
129 }
130 }
131
132 DirtyCardQueueSet::DirtyCardQueueSet(bool notify_when_complete) :
133 PtrQueueSet(notify_when_complete),
134 _shared_dirty_card_queue(this, true /* permanent */),
135 _free_ids(NULL),
136 _processed_buffers_mut(0), _processed_buffers_rs_thread(0)
137 {
138 _all_active = true;
139 }
140
141 // Determines how many mutator threads can process the buffers in parallel.
142 uint DirtyCardQueueSet::num_par_ids() {
143 return (uint)os::initial_active_processor_count();
144 }
145
146 void DirtyCardQueueSet::initialize(Monitor* cbl_mon,
147 Mutex* fl_lock,
148 int process_completed_threshold,
149 int max_completed_queue,
150 Mutex* lock,
151 DirtyCardQueueSet* fl_owner,
152 bool init_free_ids) {
153 PtrQueueSet::initialize(cbl_mon,
154 fl_lock,
155 process_completed_threshold,
156 max_completed_queue,
157 fl_owner);
158 set_buffer_size(G1UpdateBufferSize);
159 _shared_dirty_card_queue.set_lock(lock);
160 if (init_free_ids) {
161 _free_ids = new FreeIdSet(num_par_ids(), _cbl_mon);
162 }
163 }
164
165 void DirtyCardQueueSet::handle_zero_index_for_thread(JavaThread* t) {
166 t->dirty_card_queue().handle_zero_index();
167 }
168
169 bool DirtyCardQueueSet::apply_closure_to_buffer(CardTableEntryClosure* cl,
170 BufferNode* node,
171 bool consume,
172 uint worker_i) {
191 }
192
193 #ifndef ASSERT
194 #define assert_fully_consumed(node, buffer_size)
195 #else
196 #define assert_fully_consumed(node, buffer_size) \
197 do { \
198 size_t _afc_index = (node)->index(); \
199 size_t _afc_size = (buffer_size); \
200 assert(_afc_index == _afc_size, \
201 "Buffer was not fully consumed as claimed: index: " \
202 SIZE_FORMAT ", size: " SIZE_FORMAT, \
203 _afc_index, _afc_size); \
204 } while (0)
205 #endif // ASSERT
206
207 bool DirtyCardQueueSet::mut_process_buffer(BufferNode* node) {
208 guarantee(_free_ids != NULL, "must be");
209
210 uint worker_i = _free_ids->claim_par_id(); // temporarily claim an id
211 G1RefineCardConcurrentlyClosure cl;
212 bool result = apply_closure_to_buffer(&cl, node, true, worker_i);
213 _free_ids->release_par_id(worker_i); // release the id
214
215 if (result) {
216 assert_fully_consumed(node, buffer_size());
217 Atomic::inc(&_processed_buffers_mut);
218 }
219 return result;
220 }
221
222
223 BufferNode* DirtyCardQueueSet::get_completed_buffer(size_t stop_at) {
224 BufferNode* nd = NULL;
225 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
226
227 if (_n_completed_buffers <= stop_at) {
228 _process_completed = false;
229 return NULL;
230 }
231
232 if (_completed_buffers_head != NULL) {
233 nd = _completed_buffers_head;
234 assert(_n_completed_buffers > 0, "Invariant");
235 _completed_buffers_head = nd->next();
236 _n_completed_buffers--;
237 if (_completed_buffers_head == NULL) {
238 assert(_n_completed_buffers == 0, "Invariant");
239 _completed_buffers_tail = NULL;
240 }
241 }
242 DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
243 return nd;
244 }
245
246 bool DirtyCardQueueSet::refine_completed_buffer_concurrently(uint worker_i, size_t stop_at) {
247 G1RefineCardConcurrentlyClosure cl;
248 return apply_closure_to_completed_buffer(&cl, worker_i, stop_at, false);
249 }
250
251 bool DirtyCardQueueSet::apply_closure_during_gc(CardTableEntryClosure* cl, uint worker_i) {
252 assert_at_safepoint(false);
253 return apply_closure_to_completed_buffer(cl, worker_i, 0, true);
254 }
255
256 bool DirtyCardQueueSet::apply_closure_to_completed_buffer(CardTableEntryClosure* cl,
257 uint worker_i,
258 size_t stop_at,
259 bool during_pause) {
260 assert(!during_pause || stop_at == 0, "Should not leave any completed buffers during a pause");
261 BufferNode* nd = get_completed_buffer(stop_at);
262 if (nd == NULL) {
263 return false;
264 } else {
265 if (apply_closure_to_buffer(cl, nd, true, worker_i)) {
266 assert_fully_consumed(nd, buffer_size());
267 // Done with fully processed buffer.
268 deallocate_buffer(nd);
269 Atomic::inc(&_processed_buffers_rs_thread);
270 } else {
271 // Return partially processed buffer to the queue.
272 guarantee(!during_pause, "Should never stop early");
273 enqueue_complete_buffer(nd);
|