39 #include "runtime/threadSMR.hpp"
40
41 // Closure used for updating remembered sets and recording references that
42 // point into the collection set while the mutator is running.
43 // Assumed to be only executed concurrently with the mutator. Yields via
44 // SuspendibleThreadSet after every card.
45 class G1RefineCardConcurrentlyClosure: public G1CardTableEntryClosure {
46 public:
47 bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
48 G1CollectedHeap::heap()->rem_set()->refine_card_concurrently(card_ptr, worker_i);
49
50 if (SuspendibleThreadSet::should_yield()) {
51 // Caller will actually yield.
52 return false;
53 }
54 // Otherwise, we finished successfully; return true.
55 return true;
56 }
57 };
58
59 G1DirtyCardQueue::G1DirtyCardQueue(G1DirtyCardQueueSet* qset, bool permanent) :
60 // Dirty card queues are always active, so we create them with their
61 // active field set to true.
62 PtrQueue(qset, permanent, true /* active */)
63 { }
64
65 G1DirtyCardQueue::~G1DirtyCardQueue() {
66 if (!is_permanent()) {
67 flush();
68 }
69 }
70
71 G1DirtyCardQueueSet::G1DirtyCardQueueSet(bool notify_when_complete) :
72 PtrQueueSet(notify_when_complete),
73 _shared_dirty_card_queue(this, true /* permanent */),
74 _free_ids(NULL),
75 _processed_buffers_mut(0),
76 _processed_buffers_rs_thread(0),
77 _cur_par_buffer_node(NULL)
78 {
79 _all_active = true;
80 }
81
82 G1DirtyCardQueueSet::~G1DirtyCardQueueSet() {
83 delete _free_ids;
84 }
85
86 // Determines how many mutator threads can process the buffers in parallel.
87 uint G1DirtyCardQueueSet::num_par_ids() {
88 return (uint)os::initial_active_processor_count();
89 }
90
91 void G1DirtyCardQueueSet::initialize(Monitor* cbl_mon,
92 BufferNode::Allocator* allocator,
93 Mutex* lock,
94 bool init_free_ids) {
95 PtrQueueSet::initialize(cbl_mon, allocator);
96 _shared_dirty_card_queue.set_lock(lock);
97 if (init_free_ids) {
98 _free_ids = new G1FreeIdSet(0, num_par_ids());
99 }
100 }
101
102 void G1DirtyCardQueueSet::handle_zero_index_for_thread(Thread* t) {
103 G1ThreadLocalData::dirty_card_queue(t).handle_zero_index();
104 }
105
106 bool G1DirtyCardQueueSet::apply_closure_to_buffer(G1CardTableEntryClosure* cl,
107 BufferNode* node,
108 bool consume,
109 uint worker_i) {
110 if (cl == NULL) return true;
111 bool result = true;
112 void** buf = BufferNode::make_buffer_from_node(node);
113 size_t i = node->index();
114 size_t limit = buffer_size();
115 for ( ; i < limit; ++i) {
116 jbyte* card_ptr = static_cast<jbyte*>(buf[i]);
200 nd = next;
201 } else {
202 nd = actual;
203 }
204 }
205 }
206
207 void G1DirtyCardQueueSet::abandon_logs() {
208 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
209 abandon_completed_buffers();
210
211 // Since abandon is done only at safepoints, we can safely manipulate
212 // these queues.
213 struct AbandonThreadLogClosure : public ThreadClosure {
214 virtual void do_thread(Thread* t) {
215 G1ThreadLocalData::dirty_card_queue(t).reset();
216 }
217 } closure;
218 Threads::threads_do(&closure);
219
220 shared_dirty_card_queue()->reset();
221 }
222
223 void G1DirtyCardQueueSet::concatenate_log(G1DirtyCardQueue& dcq) {
224 if (!dcq.is_empty()) {
225 dcq.flush();
226 }
227 }
228
229 void G1DirtyCardQueueSet::concatenate_logs() {
230 // Iterate over all the threads, if we find a partial log add it to
231 // the global list of logs. Temporarily turn off the limit on the number
232 // of outstanding buffers.
233 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
234 size_t old_limit = max_completed_buffers();
235 set_max_completed_buffers(MaxCompletedBuffersUnlimited);
236
237 class ConcatenateThreadLogClosure : public ThreadClosure {
238 G1DirtyCardQueueSet* _qset;
239 public:
240 ConcatenateThreadLogClosure(G1DirtyCardQueueSet* qset) : _qset(qset) {}
241 virtual void do_thread(Thread* t) {
242 _qset->concatenate_log(G1ThreadLocalData::dirty_card_queue(t));
243 }
244 } closure(this);
245 Threads::threads_do(&closure);
246
247 concatenate_log(_shared_dirty_card_queue);
248 set_max_completed_buffers(old_limit);
249 }
|
39 #include "runtime/threadSMR.hpp"
40
41 // Closure used for updating remembered sets and recording references that
42 // point into the collection set while the mutator is running.
43 // Assumed to be only executed concurrently with the mutator. Yields via
44 // SuspendibleThreadSet after every card.
45 class G1RefineCardConcurrentlyClosure: public G1CardTableEntryClosure {
46 public:
47 bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
48 G1CollectedHeap::heap()->rem_set()->refine_card_concurrently(card_ptr, worker_i);
49
50 if (SuspendibleThreadSet::should_yield()) {
51 // Caller will actually yield.
52 return false;
53 }
54 // Otherwise, we finished successfully; return true.
55 return true;
56 }
57 };
58
59 G1DirtyCardQueue::G1DirtyCardQueue(G1DirtyCardQueueSet* qset) :
60 // Dirty card queues are always active, so we create them with their
61 // active field set to true.
62 PtrQueue(qset, true /* active */)
63 { }
64
65 G1DirtyCardQueue::~G1DirtyCardQueue() {
66 flush();
67 }
68
69 G1DirtyCardQueueSet::G1DirtyCardQueueSet(bool notify_when_complete) :
70 PtrQueueSet(notify_when_complete),
71 _free_ids(NULL),
72 _processed_buffers_mut(0),
73 _processed_buffers_rs_thread(0),
74 _cur_par_buffer_node(NULL)
75 {
76 _all_active = true;
77 }
78
79 G1DirtyCardQueueSet::~G1DirtyCardQueueSet() {
80 delete _free_ids;
81 }
82
83 // Determines how many mutator threads can process the buffers in parallel.
84 uint G1DirtyCardQueueSet::num_par_ids() {
85 return (uint)os::initial_active_processor_count();
86 }
87
88 void G1DirtyCardQueueSet::initialize(Monitor* cbl_mon,
89 BufferNode::Allocator* allocator,
90 bool init_free_ids) {
91 PtrQueueSet::initialize(cbl_mon, allocator);
92 if (init_free_ids) {
93 _free_ids = new G1FreeIdSet(0, num_par_ids());
94 }
95 }
96
97 void G1DirtyCardQueueSet::handle_zero_index_for_thread(Thread* t) {
98 G1ThreadLocalData::dirty_card_queue(t).handle_zero_index();
99 }
100
101 bool G1DirtyCardQueueSet::apply_closure_to_buffer(G1CardTableEntryClosure* cl,
102 BufferNode* node,
103 bool consume,
104 uint worker_i) {
105 if (cl == NULL) return true;
106 bool result = true;
107 void** buf = BufferNode::make_buffer_from_node(node);
108 size_t i = node->index();
109 size_t limit = buffer_size();
110 for ( ; i < limit; ++i) {
111 jbyte* card_ptr = static_cast<jbyte*>(buf[i]);
195 nd = next;
196 } else {
197 nd = actual;
198 }
199 }
200 }
201
202 void G1DirtyCardQueueSet::abandon_logs() {
203 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
204 abandon_completed_buffers();
205
206 // Since abandon is done only at safepoints, we can safely manipulate
207 // these queues.
208 struct AbandonThreadLogClosure : public ThreadClosure {
209 virtual void do_thread(Thread* t) {
210 G1ThreadLocalData::dirty_card_queue(t).reset();
211 }
212 } closure;
213 Threads::threads_do(&closure);
214
215 G1BarrierSet::shared_dirty_card_queue().reset();
216 }
217
218 void G1DirtyCardQueueSet::concatenate_logs() {
219 // Iterate over all the threads, if we find a partial log add it to
220 // the global list of logs. Temporarily turn off the limit on the number
221 // of outstanding buffers.
222 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
223 size_t old_limit = max_completed_buffers();
224 set_max_completed_buffers(MaxCompletedBuffersUnlimited);
225
226 struct ConcatenateThreadLogClosure : public ThreadClosure {
227 virtual void do_thread(Thread* t) {
228 G1DirtyCardQueue& dcq = G1ThreadLocalData::dirty_card_queue(t);
229 if (!dcq.is_empty()) {
230 dcq.flush();
231 }
232 }
233 } closure;
234 Threads::threads_do(&closure);
235
236 G1BarrierSet::shared_dirty_card_queue().flush();
237 set_max_completed_buffers(old_limit);
238 }
|