74 BufferNode* node = BufferNode::make_node_from_buffer(_buf, index());
75 G1DirtyCardQueueSet* dcqs = dirty_card_qset();
76 if (dcqs->process_or_enqueue_completed_buffer(node)) {
77 reset(); // Buffer fully processed, reset index.
78 } else {
79 allocate_buffer(); // Buffer enqueued, get a new one.
80 }
81 }
82
83 G1DirtyCardQueueSet::G1DirtyCardQueueSet(bool notify_when_complete) :
84 PtrQueueSet(),
85 _cbl_mon(NULL),
86 _completed_buffers_head(NULL),
87 _completed_buffers_tail(NULL),
88 _num_entries_in_completed_buffers(0),
89 _process_completed_buffers_threshold(ProcessCompletedBuffersThresholdNever),
90 _process_completed_buffers(false),
91 _notify_when_complete(notify_when_complete),
92 _max_completed_buffers(MaxCompletedBuffersUnlimited),
93 _completed_buffers_padding(0),
94 _free_ids(NULL),
95 _processed_buffers_mut(0),
96 _processed_buffers_rs_thread(0)
97 {
98 _all_active = true;
99 }
100
101 G1DirtyCardQueueSet::~G1DirtyCardQueueSet() {
102 abandon_completed_buffers();
103 delete _free_ids;
104 }
105
106 // Determines how many mutator threads can process the buffers in parallel.
107 uint G1DirtyCardQueueSet::num_par_ids() {
108 return (uint)os::initial_active_processor_count();
109 }
110
111 void G1DirtyCardQueueSet::initialize(Monitor* cbl_mon,
112 BufferNode::Allocator* allocator,
113 bool init_free_ids) {
114 PtrQueueSet::initialize(allocator);
115 assert(_cbl_mon == NULL, "Init order issue?");
116 _cbl_mon = cbl_mon;
117 if (init_free_ids) {
118 _free_ids = new G1FreeIdSet(0, num_par_ids());
119 }
120 }
121
122 void G1DirtyCardQueueSet::handle_zero_index_for_thread(Thread* t) {
123 G1ThreadLocalData::dirty_card_queue(t).handle_zero_index();
124 }
125
126 void G1DirtyCardQueueSet::enqueue_completed_buffer(BufferNode* cbn) {
127 MutexLocker x(_cbl_mon, Mutex::_no_safepoint_check_flag);
128 cbn->set_next(NULL);
129 if (_completed_buffers_tail == NULL) {
130 assert(_completed_buffers_head == NULL, "Well-formedness");
131 _completed_buffers_head = cbn;
132 _completed_buffers_tail = cbn;
133 } else {
134 _completed_buffers_tail->set_next(cbn);
135 _completed_buffers_tail = cbn;
136 }
137 _num_entries_in_completed_buffers += buffer_size() - cbn->index();
138
139 if (!process_completed_buffers() &&
274
275 bool G1DirtyCardQueueSet::process_or_enqueue_completed_buffer(BufferNode* node) {
276 if (Thread::current()->is_Java_thread()) {
277 // If the number of buffers exceeds the limit, make this Java
278 // thread do the processing itself. We don't lock to access
279 // buffer count or padding; it is fine to be imprecise here. The
280 // add of padding could overflow, which is treated as unlimited.
281 size_t max_buffers = max_completed_buffers();
282 size_t limit = max_buffers + completed_buffers_padding();
283 if ((num_completed_buffers() > limit) && (limit >= max_buffers)) {
284 if (mut_process_buffer(node)) {
285 return true;
286 }
287 }
288 }
289 enqueue_completed_buffer(node);
290 return false;
291 }
292
293 bool G1DirtyCardQueueSet::mut_process_buffer(BufferNode* node) {
294 guarantee(_free_ids != NULL, "must be");
295
296 uint worker_i = _free_ids->claim_par_id(); // temporarily claim an id
297 G1RefineCardConcurrentlyClosure cl;
298 bool result = apply_closure_to_buffer(&cl, node, worker_i);
299 _free_ids->release_par_id(worker_i); // release the id
300
301 if (result) {
302 assert_fully_consumed(node, buffer_size());
303 Atomic::inc(&_processed_buffers_mut);
304 }
305 return result;
306 }
307
308 bool G1DirtyCardQueueSet::refine_completed_buffer_concurrently(uint worker_i, size_t stop_at) {
309 G1RefineCardConcurrentlyClosure cl;
310 return apply_closure_to_completed_buffer(&cl, worker_i, stop_at, false);
311 }
312
313 bool G1DirtyCardQueueSet::apply_closure_during_gc(G1CardTableEntryClosure* cl, uint worker_i) {
314 assert_at_safepoint();
315 return apply_closure_to_completed_buffer(cl, worker_i, 0, true);
316 }
317
318 bool G1DirtyCardQueueSet::apply_closure_to_completed_buffer(G1CardTableEntryClosure* cl,
319 uint worker_i,
|
74 BufferNode* node = BufferNode::make_node_from_buffer(_buf, index());
75 G1DirtyCardQueueSet* dcqs = dirty_card_qset();
76 if (dcqs->process_or_enqueue_completed_buffer(node)) {
77 reset(); // Buffer fully processed, reset index.
78 } else {
79 allocate_buffer(); // Buffer enqueued, get a new one.
80 }
81 }
82
83 G1DirtyCardQueueSet::G1DirtyCardQueueSet(bool notify_when_complete) :
84 PtrQueueSet(),
85 _cbl_mon(NULL),
86 _completed_buffers_head(NULL),
87 _completed_buffers_tail(NULL),
88 _num_entries_in_completed_buffers(0),
89 _process_completed_buffers_threshold(ProcessCompletedBuffersThresholdNever),
90 _process_completed_buffers(false),
91 _notify_when_complete(notify_when_complete),
92 _max_completed_buffers(MaxCompletedBuffersUnlimited),
93 _completed_buffers_padding(0),
94 _free_ids(0, num_par_ids()),
95 _processed_buffers_mut(0),
96 _processed_buffers_rs_thread(0)
97 {
98 _all_active = true;
99 }
100
101 G1DirtyCardQueueSet::~G1DirtyCardQueueSet() {
102 abandon_completed_buffers();
103 }
104
105 // Determines how many mutator threads can process the buffers in parallel.
106 uint G1DirtyCardQueueSet::num_par_ids() {
107 return (uint)os::initial_active_processor_count();
108 }
109
110 void G1DirtyCardQueueSet::initialize(Monitor* cbl_mon,
111 BufferNode::Allocator* allocator) {
112 PtrQueueSet::initialize(allocator);
113 assert(_cbl_mon == NULL, "Init order issue?");
114 _cbl_mon = cbl_mon;
115 }
116
117 void G1DirtyCardQueueSet::handle_zero_index_for_thread(Thread* t) {
118 G1ThreadLocalData::dirty_card_queue(t).handle_zero_index();
119 }
120
121 void G1DirtyCardQueueSet::enqueue_completed_buffer(BufferNode* cbn) {
122 MutexLocker x(_cbl_mon, Mutex::_no_safepoint_check_flag);
123 cbn->set_next(NULL);
124 if (_completed_buffers_tail == NULL) {
125 assert(_completed_buffers_head == NULL, "Well-formedness");
126 _completed_buffers_head = cbn;
127 _completed_buffers_tail = cbn;
128 } else {
129 _completed_buffers_tail->set_next(cbn);
130 _completed_buffers_tail = cbn;
131 }
132 _num_entries_in_completed_buffers += buffer_size() - cbn->index();
133
134 if (!process_completed_buffers() &&
269
270 bool G1DirtyCardQueueSet::process_or_enqueue_completed_buffer(BufferNode* node) {
271 if (Thread::current()->is_Java_thread()) {
272 // If the number of buffers exceeds the limit, make this Java
273 // thread do the processing itself. We don't lock to access
274 // buffer count or padding; it is fine to be imprecise here. The
275 // add of padding could overflow, which is treated as unlimited.
276 size_t max_buffers = max_completed_buffers();
277 size_t limit = max_buffers + completed_buffers_padding();
278 if ((num_completed_buffers() > limit) && (limit >= max_buffers)) {
279 if (mut_process_buffer(node)) {
280 return true;
281 }
282 }
283 }
284 enqueue_completed_buffer(node);
285 return false;
286 }
287
288 bool G1DirtyCardQueueSet::mut_process_buffer(BufferNode* node) {
289 uint worker_id = _free_ids.claim_par_id(); // temporarily claim an id
290 G1RefineCardConcurrentlyClosure cl;
291 bool result = apply_closure_to_buffer(&cl, node, worker_id);
292 _free_ids.release_par_id(worker_id); // release the id
293
294 if (result) {
295 assert_fully_consumed(node, buffer_size());
296 Atomic::inc(&_processed_buffers_mut);
297 }
298 return result;
299 }
300
301 bool G1DirtyCardQueueSet::refine_completed_buffer_concurrently(uint worker_i, size_t stop_at) {
302 G1RefineCardConcurrentlyClosure cl;
303 return apply_closure_to_completed_buffer(&cl, worker_i, stop_at, false);
304 }
305
306 bool G1DirtyCardQueueSet::apply_closure_during_gc(G1CardTableEntryClosure* cl, uint worker_i) {
307 assert_at_safepoint();
308 return apply_closure_to_completed_buffer(cl, worker_i, 0, true);
309 }
310
311 bool G1DirtyCardQueueSet::apply_closure_to_completed_buffer(G1CardTableEntryClosure* cl,
312 uint worker_i,
|