1 /* 2 * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/g1BufferNodeList.hpp" 27 #include "gc/g1/g1CardTableEntryClosure.hpp" 28 #include "gc/g1/g1CollectedHeap.inline.hpp" 29 #include "gc/g1/g1DirtyCardQueue.hpp" 30 #include "gc/g1/g1FreeIdSet.hpp" 31 #include "gc/g1/g1RedirtyCardsQueue.hpp" 32 #include "gc/g1/g1RemSet.hpp" 33 #include "gc/g1/g1ThreadLocalData.hpp" 34 #include "gc/g1/heapRegionRemSet.hpp" 35 #include "gc/shared/suspendibleThreadSet.hpp" 36 #include "gc/shared/workgroup.hpp" 37 #include "runtime/atomic.hpp" 38 #include "runtime/flags/flagSetting.hpp" 39 #include "runtime/mutexLocker.hpp" 40 #include "runtime/safepoint.hpp" 41 #include "runtime/thread.inline.hpp" 42 #include "runtime/threadSMR.hpp" 43 44 // Closure used for updating remembered sets and recording references that 45 // point into the collection set while the mutator is running. 46 // Assumed to be only executed concurrently with the mutator. Yields via 47 // SuspendibleThreadSet after every card. 48 class G1RefineCardConcurrentlyClosure: public G1CardTableEntryClosure { 49 public: 50 bool do_card_ptr(CardValue* card_ptr, uint worker_i) { 51 G1CollectedHeap::heap()->rem_set()->refine_card_concurrently(card_ptr, worker_i); 52 53 if (SuspendibleThreadSet::should_yield()) { 54 // Caller will actually yield. 55 return false; 56 } 57 // Otherwise, we finished successfully; return true. 58 return true; 59 } 60 }; 61 62 G1DirtyCardQueue::G1DirtyCardQueue(G1DirtyCardQueueSet* qset) : 63 // Dirty card queues are always active, so we create them with their 64 // active field set to true. 65 PtrQueue(qset, true /* active */) 66 { } 67 68 G1DirtyCardQueue::~G1DirtyCardQueue() { 69 flush(); 70 } 71 72 void G1DirtyCardQueue::handle_completed_buffer() { 73 assert(_buf != NULL, "precondition"); 74 BufferNode* node = BufferNode::make_node_from_buffer(_buf, index()); 75 G1DirtyCardQueueSet* dcqs = dirty_card_qset(); 76 if (dcqs->process_or_enqueue_completed_buffer(node)) { 77 reset(); // Buffer fully processed, reset index. 78 } else { 79 allocate_buffer(); // Buffer enqueued, get a new one. 80 } 81 } 82 83 G1DirtyCardQueueSet::G1DirtyCardQueueSet(bool notify_when_complete) : 84 PtrQueueSet(), 85 _cbl_mon(NULL), 86 _completed_buffers_head(NULL), 87 _completed_buffers_tail(NULL), 88 _num_entries_in_completed_buffers(0), 89 _process_completed_buffers_threshold(ProcessCompletedBuffersThresholdNever), 90 _process_completed_buffers(false), 91 _notify_when_complete(notify_when_complete), 92 _max_completed_buffers(MaxCompletedBuffersUnlimited), 93 _completed_buffers_padding(0), 94 _free_ids(NULL), 95 _processed_buffers_mut(0), 96 _processed_buffers_rs_thread(0) 97 { 98 _all_active = true; 99 } 100 101 G1DirtyCardQueueSet::~G1DirtyCardQueueSet() { 102 abandon_completed_buffers(); 103 delete _free_ids; 104 } 105 106 // Determines how many mutator threads can process the buffers in parallel. 107 uint G1DirtyCardQueueSet::num_par_ids() { 108 return (uint)os::initial_active_processor_count(); 109 } 110 111 void G1DirtyCardQueueSet::initialize(Monitor* cbl_mon, 112 BufferNode::Allocator* allocator, 113 bool init_free_ids) { 114 PtrQueueSet::initialize(allocator); 115 assert(_cbl_mon == NULL, "Init order issue?"); 116 _cbl_mon = cbl_mon; 117 if (init_free_ids) { 118 _free_ids = new G1FreeIdSet(0, num_par_ids()); 119 } 120 } 121 122 void G1DirtyCardQueueSet::handle_zero_index_for_thread(Thread* t) { 123 G1ThreadLocalData::dirty_card_queue(t).handle_zero_index(); 124 } 125 126 void G1DirtyCardQueueSet::enqueue_completed_buffer(BufferNode* cbn) { 127 MutexLocker x(_cbl_mon, Mutex::_no_safepoint_check_flag); 128 cbn->set_next(NULL); 129 if (_completed_buffers_tail == NULL) { 130 assert(_completed_buffers_head == NULL, "Well-formedness"); 131 _completed_buffers_head = cbn; 132 _completed_buffers_tail = cbn; 133 } else { 134 _completed_buffers_tail->set_next(cbn); 135 _completed_buffers_tail = cbn; 136 } 137 _num_entries_in_completed_buffers += buffer_size() - cbn->index(); 138 139 if (!process_completed_buffers() && 140 (num_completed_buffers() > process_completed_buffers_threshold())) { 141 set_process_completed_buffers(true); 142 if (_notify_when_complete) { 143 _cbl_mon->notify_all(); 144 } 145 } 146 verify_num_entries_in_completed_buffers(); 147 } 148 149 BufferNode* G1DirtyCardQueueSet::get_completed_buffer(size_t stop_at) { 150 MutexLocker x(_cbl_mon, Mutex::_no_safepoint_check_flag); 151 152 if (num_completed_buffers() <= stop_at) { 153 return NULL; 154 } 155 156 assert(num_completed_buffers() > 0, "invariant"); 157 assert(_completed_buffers_head != NULL, "invariant"); 158 assert(_completed_buffers_tail != NULL, "invariant"); 159 160 BufferNode* bn = _completed_buffers_head; 161 _num_entries_in_completed_buffers -= buffer_size() - bn->index(); 162 _completed_buffers_head = bn->next(); 163 if (_completed_buffers_head == NULL) { 164 assert(num_completed_buffers() == 0, "invariant"); 165 _completed_buffers_tail = NULL; 166 set_process_completed_buffers(false); 167 } 168 verify_num_entries_in_completed_buffers(); 169 bn->set_next(NULL); 170 return bn; 171 } 172 173 #ifdef ASSERT 174 void G1DirtyCardQueueSet::verify_num_entries_in_completed_buffers() const { 175 size_t actual = 0; 176 BufferNode* cur = _completed_buffers_head; 177 while (cur != NULL) { 178 actual += buffer_size() - cur->index(); 179 cur = cur->next(); 180 } 181 assert(actual == _num_entries_in_completed_buffers, 182 "Num entries in completed buffers should be " SIZE_FORMAT " but are " SIZE_FORMAT, 183 _num_entries_in_completed_buffers, actual); 184 } 185 #endif 186 187 void G1DirtyCardQueueSet::abandon_completed_buffers() { 188 BufferNode* buffers_to_delete = NULL; 189 { 190 MutexLocker x(_cbl_mon, Mutex::_no_safepoint_check_flag); 191 buffers_to_delete = _completed_buffers_head; 192 _completed_buffers_head = NULL; 193 _completed_buffers_tail = NULL; 194 _num_entries_in_completed_buffers = 0; 195 set_process_completed_buffers(false); 196 } 197 while (buffers_to_delete != NULL) { 198 BufferNode* bn = buffers_to_delete; 199 buffers_to_delete = bn->next(); 200 bn->set_next(NULL); 201 deallocate_buffer(bn); 202 } 203 } 204 205 void G1DirtyCardQueueSet::notify_if_necessary() { 206 MutexLocker x(_cbl_mon, Mutex::_no_safepoint_check_flag); 207 if (num_completed_buffers() > process_completed_buffers_threshold()) { 208 set_process_completed_buffers(true); 209 if (_notify_when_complete) 210 _cbl_mon->notify(); 211 } 212 } 213 214 // Merge lists of buffers. Notify the processing threads. 215 // The source queue is emptied as a result. The queues 216 // must share the monitor. 217 void G1DirtyCardQueueSet::merge_bufferlists(G1RedirtyCardsQueueSet* src) { 218 assert(allocator() == src->allocator(), "precondition"); 219 const G1BufferNodeList from = src->take_all_completed_buffers(); 220 if (from._head == NULL) return; 221 222 MutexLocker x(_cbl_mon, Mutex::_no_safepoint_check_flag); 223 if (_completed_buffers_tail == NULL) { 224 assert(_completed_buffers_head == NULL, "Well-formedness"); 225 _completed_buffers_head = from._head; 226 _completed_buffers_tail = from._tail; 227 } else { 228 assert(_completed_buffers_head != NULL, "Well formedness"); 229 _completed_buffers_tail->set_next(from._head); 230 _completed_buffers_tail = from._tail; 231 } 232 _num_entries_in_completed_buffers += from._entry_count; 233 234 assert(_completed_buffers_head == NULL && _completed_buffers_tail == NULL || 235 _completed_buffers_head != NULL && _completed_buffers_tail != NULL, 236 "Sanity"); 237 verify_num_entries_in_completed_buffers(); 238 } 239 240 bool G1DirtyCardQueueSet::apply_closure_to_buffer(G1CardTableEntryClosure* cl, 241 BufferNode* node, 242 uint worker_i) { 243 if (cl == NULL) return true; 244 bool result = true; 245 void** buf = BufferNode::make_buffer_from_node(node); 246 size_t i = node->index(); 247 size_t limit = buffer_size(); 248 for ( ; i < limit; ++i) { 249 CardTable::CardValue* card_ptr = static_cast<CardTable::CardValue*>(buf[i]); 250 assert(card_ptr != NULL, "invariant"); 251 if (!cl->do_card_ptr(card_ptr, worker_i)) { 252 result = false; // Incomplete processing. 253 break; 254 } 255 } 256 assert(i <= buffer_size(), "invariant"); 257 node->set_index(i); 258 return result; 259 } 260 261 #ifndef ASSERT 262 #define assert_fully_consumed(node, buffer_size) 263 #else 264 #define assert_fully_consumed(node, buffer_size) \ 265 do { \ 266 size_t _afc_index = (node)->index(); \ 267 size_t _afc_size = (buffer_size); \ 268 assert(_afc_index == _afc_size, \ 269 "Buffer was not fully consumed as claimed: index: " \ 270 SIZE_FORMAT ", size: " SIZE_FORMAT, \ 271 _afc_index, _afc_size); \ 272 } while (0) 273 #endif // ASSERT 274 275 bool G1DirtyCardQueueSet::process_or_enqueue_completed_buffer(BufferNode* node) { 276 if (Thread::current()->is_Java_thread()) { 277 // If the number of buffers exceeds the limit, make this Java 278 // thread do the processing itself. We don't lock to access 279 // buffer count or padding; it is fine to be imprecise here. The 280 // add of padding could overflow, which is treated as unlimited. 281 size_t max_buffers = max_completed_buffers(); 282 size_t limit = max_buffers + completed_buffers_padding(); 283 if ((num_completed_buffers() > limit) && (limit >= max_buffers)) { 284 if (mut_process_buffer(node)) { 285 return true; 286 } 287 } 288 } 289 enqueue_completed_buffer(node); 290 return false; 291 } 292 293 bool G1DirtyCardQueueSet::mut_process_buffer(BufferNode* node) { 294 guarantee(_free_ids != NULL, "must be"); 295 296 uint worker_i = _free_ids->claim_par_id(); // temporarily claim an id 297 G1RefineCardConcurrentlyClosure cl; 298 bool result = apply_closure_to_buffer(&cl, node, worker_i); 299 _free_ids->release_par_id(worker_i); // release the id 300 301 if (result) { 302 assert_fully_consumed(node, buffer_size()); 303 Atomic::inc(&_processed_buffers_mut); 304 } 305 return result; 306 } 307 308 bool G1DirtyCardQueueSet::refine_completed_buffer_concurrently(uint worker_i, size_t stop_at) { 309 G1RefineCardConcurrentlyClosure cl; 310 return apply_closure_to_completed_buffer(&cl, worker_i, stop_at, false); 311 } 312 313 bool G1DirtyCardQueueSet::apply_closure_during_gc(G1CardTableEntryClosure* cl, uint worker_i) { 314 assert_at_safepoint(); 315 return apply_closure_to_completed_buffer(cl, worker_i, 0, true); 316 } 317 318 bool G1DirtyCardQueueSet::apply_closure_to_completed_buffer(G1CardTableEntryClosure* cl, 319 uint worker_i, 320 size_t stop_at, 321 bool during_pause) { 322 assert(!during_pause || stop_at == 0, "Should not leave any completed buffers during a pause"); 323 BufferNode* nd = get_completed_buffer(stop_at); 324 if (nd == NULL) { 325 return false; 326 } else { 327 if (apply_closure_to_buffer(cl, nd, worker_i)) { 328 assert_fully_consumed(nd, buffer_size()); 329 // Done with fully processed buffer. 330 deallocate_buffer(nd); 331 Atomic::inc(&_processed_buffers_rs_thread); 332 } else { 333 // Return partially processed buffer to the queue. 334 guarantee(!during_pause, "Should never stop early"); 335 enqueue_completed_buffer(nd); 336 } 337 return true; 338 } 339 } 340 341 void G1DirtyCardQueueSet::abandon_logs() { 342 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint."); 343 abandon_completed_buffers(); 344 345 // Since abandon is done only at safepoints, we can safely manipulate 346 // these queues. 347 struct AbandonThreadLogClosure : public ThreadClosure { 348 virtual void do_thread(Thread* t) { 349 G1ThreadLocalData::dirty_card_queue(t).reset(); 350 } 351 } closure; 352 Threads::threads_do(&closure); 353 354 G1BarrierSet::shared_dirty_card_queue().reset(); 355 } 356 357 void G1DirtyCardQueueSet::concatenate_logs() { 358 // Iterate over all the threads, if we find a partial log add it to 359 // the global list of logs. Temporarily turn off the limit on the number 360 // of outstanding buffers. 361 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint."); 362 size_t old_limit = max_completed_buffers(); 363 set_max_completed_buffers(MaxCompletedBuffersUnlimited); 364 365 struct ConcatenateThreadLogClosure : public ThreadClosure { 366 virtual void do_thread(Thread* t) { 367 G1DirtyCardQueue& dcq = G1ThreadLocalData::dirty_card_queue(t); 368 if (!dcq.is_empty()) { 369 dcq.flush(); 370 } 371 } 372 } closure; 373 Threads::threads_do(&closure); 374 375 G1BarrierSet::shared_dirty_card_queue().flush(); 376 set_max_completed_buffers(old_limit); 377 }