74 BufferNode* node = BufferNode::make_node_from_buffer(_buf, index());
75 G1DirtyCardQueueSet* dcqs = dirty_card_qset();
76 if (dcqs->process_or_enqueue_completed_buffer(node)) {
77 reset(); // Buffer fully processed, reset index.
78 } else {
79 allocate_buffer(); // Buffer enqueued, get a new one.
80 }
81 }
82
83 G1DirtyCardQueueSet::G1DirtyCardQueueSet(bool notify_when_complete) :
84 PtrQueueSet(),
85 _cbl_mon(NULL),
86 _completed_buffers_head(NULL),
87 _completed_buffers_tail(NULL),
88 _num_entries_in_completed_buffers(0),
89 _process_completed_buffers_threshold(ProcessCompletedBuffersThresholdNever),
90 _process_completed_buffers(false),
91 _notify_when_complete(notify_when_complete),
92 _max_completed_buffers(MaxCompletedBuffersUnlimited),
93 _completed_buffers_padding(0),
94 _free_ids(NULL),
95 _processed_buffers_mut(0),
96 _processed_buffers_rs_thread(0)
97 {
98 _all_active = true;
99 }
100
101 G1DirtyCardQueueSet::~G1DirtyCardQueueSet() {
102 abandon_completed_buffers();
103 delete _free_ids;
104 }
105
106 // Determines how many mutator threads can process the buffers in parallel.
107 uint G1DirtyCardQueueSet::num_par_ids() {
108 return (uint)os::initial_active_processor_count();
109 }
110
111 void G1DirtyCardQueueSet::initialize(Monitor* cbl_mon,
112 BufferNode::Allocator* allocator,
113 bool init_free_ids) {
114 PtrQueueSet::initialize(allocator);
115 assert(_cbl_mon == NULL, "Init order issue?");
116 _cbl_mon = cbl_mon;
117 if (init_free_ids) {
118 _free_ids = new G1FreeIdSet(0, num_par_ids());
119 }
120 }
121
122 void G1DirtyCardQueueSet::handle_zero_index_for_thread(Thread* t) {
123 G1ThreadLocalData::dirty_card_queue(t).handle_zero_index();
124 }
125
126 void G1DirtyCardQueueSet::enqueue_completed_buffer(BufferNode* cbn) {
127 MutexLocker x(_cbl_mon, Mutex::_no_safepoint_check_flag);
128 cbn->set_next(NULL);
129 if (_completed_buffers_tail == NULL) {
130 assert(_completed_buffers_head == NULL, "Well-formedness");
131 _completed_buffers_head = cbn;
132 _completed_buffers_tail = cbn;
133 } else {
134 _completed_buffers_tail->set_next(cbn);
135 _completed_buffers_tail = cbn;
136 }
137 _num_entries_in_completed_buffers += buffer_size() - cbn->index();
138
139 if (!process_completed_buffers() &&
274
275 bool G1DirtyCardQueueSet::process_or_enqueue_completed_buffer(BufferNode* node) {
276 if (Thread::current()->is_Java_thread()) {
277 // If the number of buffers exceeds the limit, make this Java
278 // thread do the processing itself. We don't lock to access
279 // buffer count or padding; it is fine to be imprecise here. The
280 // add of padding could overflow, which is treated as unlimited.
281 size_t max_buffers = max_completed_buffers();
282 size_t limit = max_buffers + completed_buffers_padding();
283 if ((num_completed_buffers() > limit) && (limit >= max_buffers)) {
284 if (mut_process_buffer(node)) {
285 return true;
286 }
287 }
288 }
289 enqueue_completed_buffer(node);
290 return false;
291 }
292
293 bool G1DirtyCardQueueSet::mut_process_buffer(BufferNode* node) {
294 guarantee(_free_ids != NULL, "must be");
295
296 uint worker_i = _free_ids->claim_par_id(); // temporarily claim an id
297 G1RefineCardConcurrentlyClosure cl;
298 bool result = apply_closure_to_buffer(&cl, node, worker_i);
299 _free_ids->release_par_id(worker_i); // release the id
300
301 if (result) {
302 assert_fully_consumed(node, buffer_size());
303 Atomic::inc(&_processed_buffers_mut);
304 }
305 return result;
306 }
307
308 bool G1DirtyCardQueueSet::refine_completed_buffer_concurrently(uint worker_i, size_t stop_at) {
309 G1RefineCardConcurrentlyClosure cl;
310 return apply_closure_to_completed_buffer(&cl, worker_i, stop_at, false);
311 }
312
313 bool G1DirtyCardQueueSet::apply_closure_during_gc(G1CardTableEntryClosure* cl, uint worker_i) {
314 assert_at_safepoint();
315 return apply_closure_to_completed_buffer(cl, worker_i, 0, true);
|
74 BufferNode* node = BufferNode::make_node_from_buffer(_buf, index());
75 G1DirtyCardQueueSet* dcqs = dirty_card_qset();
76 if (dcqs->process_or_enqueue_completed_buffer(node)) {
77 reset(); // Buffer fully processed, reset index.
78 } else {
79 allocate_buffer(); // Buffer enqueued, get a new one.
80 }
81 }
82
83 G1DirtyCardQueueSet::G1DirtyCardQueueSet(bool notify_when_complete) :
84 PtrQueueSet(),
85 _cbl_mon(NULL),
86 _completed_buffers_head(NULL),
87 _completed_buffers_tail(NULL),
88 _num_entries_in_completed_buffers(0),
89 _process_completed_buffers_threshold(ProcessCompletedBuffersThresholdNever),
90 _process_completed_buffers(false),
91 _notify_when_complete(notify_when_complete),
92 _max_completed_buffers(MaxCompletedBuffersUnlimited),
93 _completed_buffers_padding(0),
94 _free_ids(new G1FreeIdSet(0, num_par_ids())),
95 _processed_buffers_mut(0),
96 _processed_buffers_rs_thread(0)
97 {
98 _all_active = true;
99 }
100
101 G1DirtyCardQueueSet::~G1DirtyCardQueueSet() {
102 abandon_completed_buffers();
103 delete _free_ids;
104 }
105
106 // Determines how many mutator threads can process the buffers in parallel.
107 uint G1DirtyCardQueueSet::num_par_ids() {
108 return (uint)os::initial_active_processor_count();
109 }
110
111 void G1DirtyCardQueueSet::initialize(Monitor* cbl_mon,
112 BufferNode::Allocator* allocator) {
113 PtrQueueSet::initialize(allocator);
114 assert(_cbl_mon == NULL, "Init order issue?");
115 _cbl_mon = cbl_mon;
116 }
117
118 void G1DirtyCardQueueSet::handle_zero_index_for_thread(Thread* t) {
119 G1ThreadLocalData::dirty_card_queue(t).handle_zero_index();
120 }
121
122 void G1DirtyCardQueueSet::enqueue_completed_buffer(BufferNode* cbn) {
123 MutexLocker x(_cbl_mon, Mutex::_no_safepoint_check_flag);
124 cbn->set_next(NULL);
125 if (_completed_buffers_tail == NULL) {
126 assert(_completed_buffers_head == NULL, "Well-formedness");
127 _completed_buffers_head = cbn;
128 _completed_buffers_tail = cbn;
129 } else {
130 _completed_buffers_tail->set_next(cbn);
131 _completed_buffers_tail = cbn;
132 }
133 _num_entries_in_completed_buffers += buffer_size() - cbn->index();
134
135 if (!process_completed_buffers() &&
270
271 bool G1DirtyCardQueueSet::process_or_enqueue_completed_buffer(BufferNode* node) {
272 if (Thread::current()->is_Java_thread()) {
273 // If the number of buffers exceeds the limit, make this Java
274 // thread do the processing itself. We don't lock to access
275 // buffer count or padding; it is fine to be imprecise here. The
276 // add of padding could overflow, which is treated as unlimited.
277 size_t max_buffers = max_completed_buffers();
278 size_t limit = max_buffers + completed_buffers_padding();
279 if ((num_completed_buffers() > limit) && (limit >= max_buffers)) {
280 if (mut_process_buffer(node)) {
281 return true;
282 }
283 }
284 }
285 enqueue_completed_buffer(node);
286 return false;
287 }
288
289 bool G1DirtyCardQueueSet::mut_process_buffer(BufferNode* node) {
290 uint worker_i = _free_ids->claim_par_id(); // temporarily claim an id
291 G1RefineCardConcurrentlyClosure cl;
292 bool result = apply_closure_to_buffer(&cl, node, worker_i);
293 _free_ids->release_par_id(worker_i); // release the id
294
295 if (result) {
296 assert_fully_consumed(node, buffer_size());
297 Atomic::inc(&_processed_buffers_mut);
298 }
299 return result;
300 }
301
302 bool G1DirtyCardQueueSet::refine_completed_buffer_concurrently(uint worker_i, size_t stop_at) {
303 G1RefineCardConcurrentlyClosure cl;
304 return apply_closure_to_completed_buffer(&cl, worker_i, stop_at, false);
305 }
306
307 bool G1DirtyCardQueueSet::apply_closure_during_gc(G1CardTableEntryClosure* cl, uint worker_i) {
308 assert_at_safepoint();
309 return apply_closure_to_completed_buffer(cl, worker_i, 0, true);
|