1 /* 2 * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/g1BufferNodeList.hpp" 27 #include "gc/g1/g1CardTableEntryClosure.hpp" 28 #include "gc/g1/g1CollectedHeap.inline.hpp" 29 #include "gc/g1/g1DirtyCardQueue.hpp" 30 #include "gc/g1/g1FreeIdSet.hpp" 31 #include "gc/g1/g1RedirtyCardsQueue.hpp" 32 #include "gc/g1/g1RemSet.hpp" 33 #include "gc/g1/g1ThreadLocalData.hpp" 34 #include "gc/g1/heapRegionRemSet.hpp" 35 #include "gc/shared/suspendibleThreadSet.hpp" 36 #include "gc/shared/workgroup.hpp" 37 #include "runtime/atomic.hpp" 38 #include "runtime/flags/flagSetting.hpp" 39 #include "runtime/mutexLocker.hpp" 40 #include "runtime/safepoint.hpp" 41 #include "runtime/thread.inline.hpp" 42 #include "runtime/threadSMR.hpp" 43 44 G1DirtyCardQueue::G1DirtyCardQueue(G1DirtyCardQueueSet* qset) : 45 // Dirty card queues are always active, so we create them with their 46 // active field set to true. 47 PtrQueue(qset, true /* active */) 48 { } 49 50 G1DirtyCardQueue::~G1DirtyCardQueue() { 51 flush(); 52 } 53 54 void G1DirtyCardQueue::handle_completed_buffer() { 55 assert(_buf != NULL, "precondition"); 56 BufferNode* node = BufferNode::make_node_from_buffer(_buf, index()); 57 G1DirtyCardQueueSet* dcqs = dirty_card_qset(); 58 if (dcqs->process_or_enqueue_completed_buffer(node)) { 59 reset(); // Buffer fully processed, reset index. 60 } else { 61 allocate_buffer(); // Buffer enqueued, get a new one. 62 } 63 } 64 65 G1DirtyCardQueueSet::G1DirtyCardQueueSet() : 66 PtrQueueSet(), 67 _cbl_mon(NULL), 68 _completed_buffers_head(NULL), 69 _completed_buffers_tail(NULL), 70 _num_cards(0), 71 _process_cards_threshold(ProcessCardsThresholdNever), 72 _process_completed_buffers(false), 73 _max_cards(MaxCardsUnlimited), 74 _max_cards_padding(0), 75 _free_ids(0, num_par_ids()), 76 _processed_buffers_mut(0), 77 _processed_buffers_rs_thread(0) 78 { 79 _all_active = true; 80 } 81 82 G1DirtyCardQueueSet::~G1DirtyCardQueueSet() { 83 abandon_completed_buffers(); 84 } 85 86 // Determines how many mutator threads can process the buffers in parallel. 87 uint G1DirtyCardQueueSet::num_par_ids() { 88 return (uint)os::initial_active_processor_count(); 89 } 90 91 void G1DirtyCardQueueSet::initialize(Monitor* cbl_mon, 92 BufferNode::Allocator* allocator) { 93 PtrQueueSet::initialize(allocator); 94 assert(_cbl_mon == NULL, "Init order issue?"); 95 _cbl_mon = cbl_mon; 96 } 97 98 void G1DirtyCardQueueSet::handle_zero_index_for_thread(Thread* t) { 99 G1ThreadLocalData::dirty_card_queue(t).handle_zero_index(); 100 } 101 102 void G1DirtyCardQueueSet::enqueue_completed_buffer(BufferNode* cbn) { 103 MonitorLocker ml(_cbl_mon, Mutex::_no_safepoint_check_flag); 104 cbn->set_next(NULL); 105 if (_completed_buffers_tail == NULL) { 106 assert(_completed_buffers_head == NULL, "Well-formedness"); 107 _completed_buffers_head = cbn; 108 _completed_buffers_tail = cbn; 109 } else { 110 _completed_buffers_tail->set_next(cbn); 111 _completed_buffers_tail = cbn; 112 } 113 _num_cards += buffer_size() - cbn->index(); 114 115 if (!process_completed_buffers() && 116 (num_cards() > process_cards_threshold())) { 117 set_process_completed_buffers(true); 118 ml.notify_all(); 119 } 120 verify_num_cards(); 121 } 122 123 BufferNode* G1DirtyCardQueueSet::get_completed_buffer(size_t stop_at) { 124 MutexLocker x(_cbl_mon, Mutex::_no_safepoint_check_flag); 125 126 if (num_cards() <= stop_at) { 127 return NULL; 128 } 129 130 assert(num_cards() > 0, "invariant"); 131 assert(_completed_buffers_head != NULL, "invariant"); 132 assert(_completed_buffers_tail != NULL, "invariant"); 133 134 BufferNode* bn = _completed_buffers_head; 135 _num_cards -= buffer_size() - bn->index(); 136 _completed_buffers_head = bn->next(); 137 if (_completed_buffers_head == NULL) { 138 assert(num_cards() == 0, "invariant"); 139 _completed_buffers_tail = NULL; 140 set_process_completed_buffers(false); 141 } 142 verify_num_cards(); 143 bn->set_next(NULL); 144 return bn; 145 } 146 147 #ifdef ASSERT 148 void G1DirtyCardQueueSet::verify_num_cards() const { 149 size_t actual = 0; 150 BufferNode* cur = _completed_buffers_head; 151 while (cur != NULL) { 152 actual += buffer_size() - cur->index(); 153 cur = cur->next(); 154 } 155 assert(actual == _num_cards, 156 "Num entries in completed buffers should be " SIZE_FORMAT " but are " SIZE_FORMAT, 157 _num_cards, actual); 158 } 159 #endif 160 161 void G1DirtyCardQueueSet::abandon_completed_buffers() { 162 BufferNode* buffers_to_delete = NULL; 163 { 164 MutexLocker x(_cbl_mon, Mutex::_no_safepoint_check_flag); 165 buffers_to_delete = _completed_buffers_head; 166 _completed_buffers_head = NULL; 167 _completed_buffers_tail = NULL; 168 _num_cards = 0; 169 set_process_completed_buffers(false); 170 } 171 while (buffers_to_delete != NULL) { 172 BufferNode* bn = buffers_to_delete; 173 buffers_to_delete = bn->next(); 174 bn->set_next(NULL); 175 deallocate_buffer(bn); 176 } 177 } 178 179 void G1DirtyCardQueueSet::notify_if_necessary() { 180 MonitorLocker ml(_cbl_mon, Mutex::_no_safepoint_check_flag); 181 if (num_cards() > process_cards_threshold()) { 182 set_process_completed_buffers(true); 183 ml.notify_all(); 184 } 185 } 186 187 // Merge lists of buffers. Notify the processing threads. 188 // The source queue is emptied as a result. The queues 189 // must share the monitor. 190 void G1DirtyCardQueueSet::merge_bufferlists(G1RedirtyCardsQueueSet* src) { 191 assert(allocator() == src->allocator(), "precondition"); 192 const G1BufferNodeList from = src->take_all_completed_buffers(); 193 if (from._head == NULL) return; 194 195 MutexLocker x(_cbl_mon, Mutex::_no_safepoint_check_flag); 196 if (_completed_buffers_tail == NULL) { 197 assert(_completed_buffers_head == NULL, "Well-formedness"); 198 _completed_buffers_head = from._head; 199 _completed_buffers_tail = from._tail; 200 } else { 201 assert(_completed_buffers_head != NULL, "Well formedness"); 202 _completed_buffers_tail->set_next(from._head); 203 _completed_buffers_tail = from._tail; 204 } 205 _num_cards += from._entry_count; 206 207 assert(_completed_buffers_head == NULL && _completed_buffers_tail == NULL || 208 _completed_buffers_head != NULL && _completed_buffers_tail != NULL, 209 "Sanity"); 210 verify_num_cards(); 211 } 212 213 G1BufferNodeList G1DirtyCardQueueSet::take_all_completed_buffers() { 214 MutexLocker x(_cbl_mon, Mutex::_no_safepoint_check_flag); 215 G1BufferNodeList result(_completed_buffers_head, _completed_buffers_tail, _num_cards); 216 _completed_buffers_head = NULL; 217 _completed_buffers_tail = NULL; 218 _num_cards = 0; 219 return result; 220 } 221 222 bool G1DirtyCardQueueSet::refine_buffer(BufferNode* node, uint worker_id) { 223 G1RemSet* rem_set = G1CollectedHeap::heap()->rem_set(); 224 size_t size = buffer_size(); 225 void** buffer = BufferNode::make_buffer_from_node(node); 226 size_t i = node->index(); 227 assert(i <= size, "invariant"); 228 for ( ; (i < size) && !SuspendibleThreadSet::should_yield(); ++i) { 229 CardTable::CardValue* cp = static_cast<CardTable::CardValue*>(buffer[i]); 230 rem_set->refine_card_concurrently(cp, worker_id); 231 } 232 node->set_index(i); 233 return i == size; 234 } 235 236 #ifndef ASSERT 237 #define assert_fully_consumed(node, buffer_size) 238 #else 239 #define assert_fully_consumed(node, buffer_size) \ 240 do { \ 241 size_t _afc_index = (node)->index(); \ 242 size_t _afc_size = (buffer_size); \ 243 assert(_afc_index == _afc_size, \ 244 "Buffer was not fully consumed as claimed: index: " \ 245 SIZE_FORMAT ", size: " SIZE_FORMAT, \ 246 _afc_index, _afc_size); \ 247 } while (0) 248 #endif // ASSERT 249 250 bool G1DirtyCardQueueSet::process_or_enqueue_completed_buffer(BufferNode* node) { 251 if (Thread::current()->is_Java_thread()) { 252 // If the number of buffers exceeds the limit, make this Java 253 // thread do the processing itself. We don't lock to access 254 // buffer count or padding; it is fine to be imprecise here. The 255 // add of padding could overflow, which is treated as unlimited. 256 size_t limit = max_cards() + max_cards_padding(); 257 if ((num_cards() > limit) && (limit >= max_cards())) { 258 if (mut_process_buffer(node)) { 259 return true; 260 } 261 } 262 } 263 enqueue_completed_buffer(node); 264 return false; 265 } 266 267 bool G1DirtyCardQueueSet::mut_process_buffer(BufferNode* node) { 268 uint worker_id = _free_ids.claim_par_id(); // temporarily claim an id 269 bool result = refine_buffer(node, worker_id); 270 _free_ids.release_par_id(worker_id); // release the id 271 272 if (result) { 273 assert_fully_consumed(node, buffer_size()); 274 Atomic::inc(&_processed_buffers_mut); 275 } 276 return result; 277 } 278 279 bool G1DirtyCardQueueSet::refine_completed_buffer_concurrently(uint worker_id, size_t stop_at) { 280 BufferNode* node = get_completed_buffer(stop_at); 281 if (node == NULL) { 282 return false; 283 } else if (refine_buffer(node, worker_id)) { 284 assert_fully_consumed(node, buffer_size()); 285 // Done with fully processed buffer. 286 deallocate_buffer(node); 287 Atomic::inc(&_processed_buffers_rs_thread); 288 return true; 289 } else { 290 // Return partially processed buffer to the queue. 291 enqueue_completed_buffer(node); 292 return true; 293 } 294 } 295 296 void G1DirtyCardQueueSet::abandon_logs() { 297 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint."); 298 abandon_completed_buffers(); 299 300 // Since abandon is done only at safepoints, we can safely manipulate 301 // these queues. 302 struct AbandonThreadLogClosure : public ThreadClosure { 303 virtual void do_thread(Thread* t) { 304 G1ThreadLocalData::dirty_card_queue(t).reset(); 305 } 306 } closure; 307 Threads::threads_do(&closure); 308 309 G1BarrierSet::shared_dirty_card_queue().reset(); 310 } 311 312 void G1DirtyCardQueueSet::concatenate_logs() { 313 // Iterate over all the threads, if we find a partial log add it to 314 // the global list of logs. Temporarily turn off the limit on the number 315 // of outstanding buffers. 316 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint."); 317 size_t old_limit = max_cards(); 318 set_max_cards(MaxCardsUnlimited); 319 320 struct ConcatenateThreadLogClosure : public ThreadClosure { 321 virtual void do_thread(Thread* t) { 322 G1DirtyCardQueue& dcq = G1ThreadLocalData::dirty_card_queue(t); 323 if (!dcq.is_empty()) { 324 dcq.flush(); 325 } 326 } 327 } closure; 328 Threads::threads_do(&closure); 329 330 G1BarrierSet::shared_dirty_card_queue().flush(); 331 set_max_cards(old_limit); 332 }