6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/g1/dirtyCardQueue.hpp"
27 #include "gc/g1/g1CollectedHeap.inline.hpp"
28 #include "gc/g1/g1FreeIdSet.hpp"
29 #include "gc/g1/g1RemSet.hpp"
30 #include "gc/g1/g1ThreadLocalData.hpp"
31 #include "gc/g1/heapRegionRemSet.hpp"
32 #include "gc/shared/suspendibleThreadSet.hpp"
33 #include "gc/shared/workgroup.hpp"
34 #include "runtime/atomic.hpp"
35 #include "runtime/flags/flagSetting.hpp"
36 #include "runtime/mutexLocker.hpp"
37 #include "runtime/safepoint.hpp"
38 #include "runtime/thread.inline.hpp"
39 #include "runtime/threadSMR.hpp"
40
41 // Closure used for updating remembered sets and recording references that
42 // point into the collection set while the mutator is running.
43 // Assumed to be only executed concurrently with the mutator. Yields via
44 // SuspendibleThreadSet after every card.
45 class G1RefineCardConcurrentlyClosure: public CardTableEntryClosure {
46 public:
47 bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
48 G1CollectedHeap::heap()->g1_rem_set()->refine_card_concurrently(card_ptr, worker_i);
49
50 if (SuspendibleThreadSet::should_yield()) {
51 // Caller will actually yield.
52 return false;
53 }
54 // Otherwise, we finished successfully; return true.
55 return true;
56 }
57 };
58
59 DirtyCardQueue::DirtyCardQueue(DirtyCardQueueSet* qset, bool permanent) :
60 // Dirty card queues are always active, so we create them with their
61 // active field set to true.
62 PtrQueue(qset, permanent, true /* active */)
63 { }
64
65 DirtyCardQueue::~DirtyCardQueue() {
66 if (!is_permanent()) {
67 flush();
68 }
69 }
70
71 DirtyCardQueueSet::DirtyCardQueueSet(bool notify_when_complete) :
72 PtrQueueSet(notify_when_complete),
73 _shared_dirty_card_queue(this, true /* permanent */),
74 _free_ids(NULL),
75 _processed_buffers_mut(0),
76 _processed_buffers_rs_thread(0),
77 _cur_par_buffer_node(NULL)
78 {
79 _all_active = true;
80 }
81
82 DirtyCardQueueSet::~DirtyCardQueueSet() {
83 delete _free_ids;
84 }
85
86 // Determines how many mutator threads can process the buffers in parallel.
87 uint DirtyCardQueueSet::num_par_ids() {
88 return (uint)os::initial_active_processor_count();
89 }
90
91 void DirtyCardQueueSet::initialize(Monitor* cbl_mon,
92 BufferNode::Allocator* allocator,
93 Mutex* lock,
94 bool init_free_ids) {
95 PtrQueueSet::initialize(cbl_mon, allocator);
96 _shared_dirty_card_queue.set_lock(lock);
97 if (init_free_ids) {
98 _free_ids = new G1FreeIdSet(0, num_par_ids());
99 }
100 }
101
102 void DirtyCardQueueSet::handle_zero_index_for_thread(JavaThread* t) {
103 G1ThreadLocalData::dirty_card_queue(t).handle_zero_index();
104 }
105
106 bool DirtyCardQueueSet::apply_closure_to_buffer(CardTableEntryClosure* cl,
107 BufferNode* node,
108 bool consume,
109 uint worker_i) {
110 if (cl == NULL) return true;
111 bool result = true;
112 void** buf = BufferNode::make_buffer_from_node(node);
113 size_t i = node->index();
114 size_t limit = buffer_size();
115 for ( ; i < limit; ++i) {
116 jbyte* card_ptr = static_cast<jbyte*>(buf[i]);
117 assert(card_ptr != NULL, "invariant");
118 if (!cl->do_card_ptr(card_ptr, worker_i)) {
119 result = false; // Incomplete processing.
120 break;
121 }
122 }
123 if (consume) {
124 assert(i <= buffer_size(), "invariant");
125 node->set_index(i);
126 }
127 return result;
128 }
129
130 #ifndef ASSERT
131 #define assert_fully_consumed(node, buffer_size)
132 #else
133 #define assert_fully_consumed(node, buffer_size) \
134 do { \
135 size_t _afc_index = (node)->index(); \
136 size_t _afc_size = (buffer_size); \
137 assert(_afc_index == _afc_size, \
138 "Buffer was not fully consumed as claimed: index: " \
139 SIZE_FORMAT ", size: " SIZE_FORMAT, \
140 _afc_index, _afc_size); \
141 } while (0)
142 #endif // ASSERT
143
144 bool DirtyCardQueueSet::mut_process_buffer(BufferNode* node) {
145 guarantee(_free_ids != NULL, "must be");
146
147 uint worker_i = _free_ids->claim_par_id(); // temporarily claim an id
148 G1RefineCardConcurrentlyClosure cl;
149 bool result = apply_closure_to_buffer(&cl, node, true, worker_i);
150 _free_ids->release_par_id(worker_i); // release the id
151
152 if (result) {
153 assert_fully_consumed(node, buffer_size());
154 Atomic::inc(&_processed_buffers_mut);
155 }
156 return result;
157 }
158
159 bool DirtyCardQueueSet::refine_completed_buffer_concurrently(uint worker_i, size_t stop_at) {
160 G1RefineCardConcurrentlyClosure cl;
161 return apply_closure_to_completed_buffer(&cl, worker_i, stop_at, false);
162 }
163
164 bool DirtyCardQueueSet::apply_closure_during_gc(CardTableEntryClosure* cl, uint worker_i) {
165 assert_at_safepoint();
166 return apply_closure_to_completed_buffer(cl, worker_i, 0, true);
167 }
168
169 bool DirtyCardQueueSet::apply_closure_to_completed_buffer(CardTableEntryClosure* cl,
170 uint worker_i,
171 size_t stop_at,
172 bool during_pause) {
173 assert(!during_pause || stop_at == 0, "Should not leave any completed buffers during a pause");
174 BufferNode* nd = get_completed_buffer(stop_at);
175 if (nd == NULL) {
176 return false;
177 } else {
178 if (apply_closure_to_buffer(cl, nd, true, worker_i)) {
179 assert_fully_consumed(nd, buffer_size());
180 // Done with fully processed buffer.
181 deallocate_buffer(nd);
182 Atomic::inc(&_processed_buffers_rs_thread);
183 } else {
184 // Return partially processed buffer to the queue.
185 guarantee(!during_pause, "Should never stop early");
186 enqueue_completed_buffer(nd);
187 }
188 return true;
189 }
190 }
191
192 void DirtyCardQueueSet::par_apply_closure_to_all_completed_buffers(CardTableEntryClosure* cl) {
193 BufferNode* nd = _cur_par_buffer_node;
194 while (nd != NULL) {
195 BufferNode* next = nd->next();
196 BufferNode* actual = Atomic::cmpxchg(next, &_cur_par_buffer_node, nd);
197 if (actual == nd) {
198 bool b = apply_closure_to_buffer(cl, nd, false);
199 guarantee(b, "Should not stop early.");
200 nd = next;
201 } else {
202 nd = actual;
203 }
204 }
205 }
206
207 void DirtyCardQueueSet::abandon_logs() {
208 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
209 abandon_completed_buffers();
210 // Since abandon is done only at safepoints, we can safely manipulate
211 // these queues.
212 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
213 G1ThreadLocalData::dirty_card_queue(t).reset();
214 }
215 shared_dirty_card_queue()->reset();
216 }
217
218 void DirtyCardQueueSet::concatenate_log(DirtyCardQueue& dcq) {
219 if (!dcq.is_empty()) {
220 dcq.flush();
221 }
222 }
223
224 void DirtyCardQueueSet::concatenate_logs() {
225 // Iterate over all the threads, if we find a partial log add it to
226 // the global list of logs. Temporarily turn off the limit on the number
227 // of outstanding buffers.
228 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
229 size_t old_limit = max_completed_buffers();
230 set_max_completed_buffers(MaxCompletedBuffersUnlimited);
231 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
232 concatenate_log(G1ThreadLocalData::dirty_card_queue(t));
233 }
234 concatenate_log(_shared_dirty_card_queue);
235 set_max_completed_buffers(old_limit);
236 }
|
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/g1/g1CollectedHeap.inline.hpp"
27 #include "gc/g1/g1DirtyCardQueue.hpp"
28 #include "gc/g1/g1FreeIdSet.hpp"
29 #include "gc/g1/g1RemSet.hpp"
30 #include "gc/g1/g1ThreadLocalData.hpp"
31 #include "gc/g1/heapRegionRemSet.hpp"
32 #include "gc/shared/suspendibleThreadSet.hpp"
33 #include "gc/shared/workgroup.hpp"
34 #include "runtime/atomic.hpp"
35 #include "runtime/flags/flagSetting.hpp"
36 #include "runtime/mutexLocker.hpp"
37 #include "runtime/safepoint.hpp"
38 #include "runtime/thread.inline.hpp"
39 #include "runtime/threadSMR.hpp"
40
41 // Closure used for updating remembered sets and recording references that
42 // point into the collection set while the mutator is running.
43 // Assumed to be only executed concurrently with the mutator. Yields via
44 // SuspendibleThreadSet after every card.
45 class G1RefineCardConcurrentlyClosure: public G1CardTableEntryClosure {
46 public:
47 bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
48 G1CollectedHeap::heap()->g1_rem_set()->refine_card_concurrently(card_ptr, worker_i);
49
50 if (SuspendibleThreadSet::should_yield()) {
51 // Caller will actually yield.
52 return false;
53 }
54 // Otherwise, we finished successfully; return true.
55 return true;
56 }
57 };
58
59 G1DirtyCardQueue::G1DirtyCardQueue(G1DirtyCardQueueSet* qset, bool permanent) :
60 // Dirty card queues are always active, so we create them with their
61 // active field set to true.
62 PtrQueue(qset, permanent, true /* active */)
63 { }
64
65 G1DirtyCardQueue::~G1DirtyCardQueue() {
66 if (!is_permanent()) {
67 flush();
68 }
69 }
70
71 G1DirtyCardQueueSet::G1DirtyCardQueueSet(bool notify_when_complete) :
72 PtrQueueSet(notify_when_complete),
73 _shared_dirty_card_queue(this, true /* permanent */),
74 _free_ids(NULL),
75 _processed_buffers_mut(0),
76 _processed_buffers_rs_thread(0),
77 _cur_par_buffer_node(NULL)
78 {
79 _all_active = true;
80 }
81
82 G1DirtyCardQueueSet::~G1DirtyCardQueueSet() {
83 delete _free_ids;
84 }
85
86 // Determines how many mutator threads can process the buffers in parallel.
87 uint G1DirtyCardQueueSet::num_par_ids() {
88 return (uint)os::initial_active_processor_count();
89 }
90
91 void G1DirtyCardQueueSet::initialize(Monitor* cbl_mon,
92 BufferNode::Allocator* allocator,
93 Mutex* lock,
94 bool init_free_ids) {
95 PtrQueueSet::initialize(cbl_mon, allocator);
96 _shared_dirty_card_queue.set_lock(lock);
97 if (init_free_ids) {
98 _free_ids = new G1FreeIdSet(0, num_par_ids());
99 }
100 }
101
102 void G1DirtyCardQueueSet::handle_zero_index_for_thread(JavaThread* t) {
103 G1ThreadLocalData::dirty_card_queue(t).handle_zero_index();
104 }
105
106 bool G1DirtyCardQueueSet::apply_closure_to_buffer(G1CardTableEntryClosure* cl,
107 BufferNode* node,
108 bool consume,
109 uint worker_i) {
110 if (cl == NULL) return true;
111 bool result = true;
112 void** buf = BufferNode::make_buffer_from_node(node);
113 size_t i = node->index();
114 size_t limit = buffer_size();
115 for ( ; i < limit; ++i) {
116 jbyte* card_ptr = static_cast<jbyte*>(buf[i]);
117 assert(card_ptr != NULL, "invariant");
118 if (!cl->do_card_ptr(card_ptr, worker_i)) {
119 result = false; // Incomplete processing.
120 break;
121 }
122 }
123 if (consume) {
124 assert(i <= buffer_size(), "invariant");
125 node->set_index(i);
126 }
127 return result;
128 }
129
130 #ifndef ASSERT
131 #define assert_fully_consumed(node, buffer_size)
132 #else
133 #define assert_fully_consumed(node, buffer_size) \
134 do { \
135 size_t _afc_index = (node)->index(); \
136 size_t _afc_size = (buffer_size); \
137 assert(_afc_index == _afc_size, \
138 "Buffer was not fully consumed as claimed: index: " \
139 SIZE_FORMAT ", size: " SIZE_FORMAT, \
140 _afc_index, _afc_size); \
141 } while (0)
142 #endif // ASSERT
143
144 bool G1DirtyCardQueueSet::mut_process_buffer(BufferNode* node) {
145 guarantee(_free_ids != NULL, "must be");
146
147 uint worker_i = _free_ids->claim_par_id(); // temporarily claim an id
148 G1RefineCardConcurrentlyClosure cl;
149 bool result = apply_closure_to_buffer(&cl, node, true, worker_i);
150 _free_ids->release_par_id(worker_i); // release the id
151
152 if (result) {
153 assert_fully_consumed(node, buffer_size());
154 Atomic::inc(&_processed_buffers_mut);
155 }
156 return result;
157 }
158
159 bool G1DirtyCardQueueSet::refine_completed_buffer_concurrently(uint worker_i, size_t stop_at) {
160 G1RefineCardConcurrentlyClosure cl;
161 return apply_closure_to_completed_buffer(&cl, worker_i, stop_at, false);
162 }
163
164 bool G1DirtyCardQueueSet::apply_closure_during_gc(G1CardTableEntryClosure* cl, uint worker_i) {
165 assert_at_safepoint();
166 return apply_closure_to_completed_buffer(cl, worker_i, 0, true);
167 }
168
169 bool G1DirtyCardQueueSet::apply_closure_to_completed_buffer(G1CardTableEntryClosure* cl,
170 uint worker_i,
171 size_t stop_at,
172 bool during_pause) {
173 assert(!during_pause || stop_at == 0, "Should not leave any completed buffers during a pause");
174 BufferNode* nd = get_completed_buffer(stop_at);
175 if (nd == NULL) {
176 return false;
177 } else {
178 if (apply_closure_to_buffer(cl, nd, true, worker_i)) {
179 assert_fully_consumed(nd, buffer_size());
180 // Done with fully processed buffer.
181 deallocate_buffer(nd);
182 Atomic::inc(&_processed_buffers_rs_thread);
183 } else {
184 // Return partially processed buffer to the queue.
185 guarantee(!during_pause, "Should never stop early");
186 enqueue_completed_buffer(nd);
187 }
188 return true;
189 }
190 }
191
192 void G1DirtyCardQueueSet::par_apply_closure_to_all_completed_buffers(G1CardTableEntryClosure* cl) {
193 BufferNode* nd = _cur_par_buffer_node;
194 while (nd != NULL) {
195 BufferNode* next = nd->next();
196 BufferNode* actual = Atomic::cmpxchg(next, &_cur_par_buffer_node, nd);
197 if (actual == nd) {
198 bool b = apply_closure_to_buffer(cl, nd, false);
199 guarantee(b, "Should not stop early.");
200 nd = next;
201 } else {
202 nd = actual;
203 }
204 }
205 }
206
207 void G1DirtyCardQueueSet::abandon_logs() {
208 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
209 abandon_completed_buffers();
210 // Since abandon is done only at safepoints, we can safely manipulate
211 // these queues.
212 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
213 G1ThreadLocalData::dirty_card_queue(t).reset();
214 }
215 shared_dirty_card_queue()->reset();
216 }
217
218 void G1DirtyCardQueueSet::concatenate_log(G1DirtyCardQueue& dcq) {
219 if (!dcq.is_empty()) {
220 dcq.flush();
221 }
222 }
223
224 void G1DirtyCardQueueSet::concatenate_logs() {
225 // Iterate over all the threads, if we find a partial log add it to
226 // the global list of logs. Temporarily turn off the limit on the number
227 // of outstanding buffers.
228 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
229 size_t old_limit = max_completed_buffers();
230 set_max_completed_buffers(MaxCompletedBuffersUnlimited);
231 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
232 concatenate_log(G1ThreadLocalData::dirty_card_queue(t));
233 }
234 concatenate_log(_shared_dirty_card_queue);
235 set_max_completed_buffers(old_limit);
236 }
|