1 /* 2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/dirtyCardQueue.hpp" 27 #include "gc/g1/g1CollectedHeap.inline.hpp" 28 #include "gc/g1/g1RemSet.hpp" 29 #include "gc/g1/g1ThreadLocalData.hpp" 30 #include "gc/g1/heapRegionRemSet.hpp" 31 #include "gc/shared/suspendibleThreadSet.hpp" 32 #include "gc/shared/workgroup.hpp" 33 #include "runtime/atomic.hpp" 34 #include "runtime/flags/flagSetting.hpp" 35 #include "runtime/mutexLocker.hpp" 36 #include "runtime/safepoint.hpp" 37 #include "runtime/thread.inline.hpp" 38 #include "runtime/threadSMR.hpp" 39 40 // Closure used for updating remembered sets and recording references that 41 // point into the collection set while the mutator is running. 42 // Assumed to be only executed concurrently with the mutator. Yields via 43 // SuspendibleThreadSet after every card. 44 class G1RefineCardConcurrentlyClosure: public CardTableEntryClosure { 45 public: 46 bool do_card_ptr(jbyte* card_ptr, uint worker_i) { 47 G1CollectedHeap::heap()->g1_rem_set()->refine_card_concurrently(card_ptr, worker_i); 48 49 if (SuspendibleThreadSet::should_yield()) { 50 // Caller will actually yield. 51 return false; 52 } 53 // Otherwise, we finished successfully; return true. 54 return true; 55 } 56 }; 57 58 // Represents a set of free small integer ids. 59 class FreeIdSet : public CHeapObj<mtGC> { 60 enum { 61 end_of_list = UINT_MAX, 62 claimed = UINT_MAX - 1 63 }; 64 65 uint _size; 66 Monitor* _mon; 67 68 uint* _ids; 69 uint _hd; 70 uint _waiters; 71 uint _claimed; 72 73 public: 74 FreeIdSet(uint size, Monitor* mon); 75 ~FreeIdSet(); 76 77 // Returns an unclaimed parallel id (waiting for one to be released if 78 // necessary). 79 uint claim_par_id(); 80 81 void release_par_id(uint id); 82 }; 83 84 FreeIdSet::FreeIdSet(uint size, Monitor* mon) : 85 _size(size), _mon(mon), _hd(0), _waiters(0), _claimed(0) 86 { 87 guarantee(size != 0, "must be"); 88 _ids = NEW_C_HEAP_ARRAY(uint, size, mtGC); 89 for (uint i = 0; i < size - 1; i++) { 90 _ids[i] = i+1; 91 } 92 _ids[size-1] = end_of_list; // end of list. 93 } 94 95 FreeIdSet::~FreeIdSet() { 96 FREE_C_HEAP_ARRAY(uint, _ids); 97 } 98 99 uint FreeIdSet::claim_par_id() { 100 MutexLockerEx x(_mon, Mutex::_no_safepoint_check_flag); 101 while (_hd == end_of_list) { 102 _waiters++; 103 _mon->wait(Mutex::_no_safepoint_check_flag); 104 _waiters--; 105 } 106 uint res = _hd; 107 _hd = _ids[res]; 108 _ids[res] = claimed; // For debugging. 109 _claimed++; 110 return res; 111 } 112 113 void FreeIdSet::release_par_id(uint id) { 114 MutexLockerEx x(_mon, Mutex::_no_safepoint_check_flag); 115 assert(_ids[id] == claimed, "Precondition."); 116 _ids[id] = _hd; 117 _hd = id; 118 _claimed--; 119 if (_waiters > 0) { 120 _mon->notify_all(); 121 } 122 } 123 124 DirtyCardQueue::DirtyCardQueue(DirtyCardQueueSet* qset, bool permanent) : 125 // Dirty card queues are always active, so we create them with their 126 // active field set to true. 127 PtrQueue(qset, permanent, true /* active */) 128 { } 129 130 DirtyCardQueue::~DirtyCardQueue() { 131 if (!is_permanent()) { 132 flush(); 133 } 134 } 135 136 DirtyCardQueueSet::DirtyCardQueueSet(bool notify_when_complete) : 137 PtrQueueSet(notify_when_complete), 138 _shared_dirty_card_queue(this, true /* permanent */), 139 _free_ids(NULL), 140 _processed_buffers_mut(0), 141 _processed_buffers_rs_thread(0), 142 _cur_par_buffer_node(NULL) 143 { 144 _all_active = true; 145 } 146 147 // Determines how many mutator threads can process the buffers in parallel. 148 uint DirtyCardQueueSet::num_par_ids() { 149 return (uint)os::initial_active_processor_count(); 150 } 151 152 void DirtyCardQueueSet::initialize(Monitor* cbl_mon, 153 BufferNode::Allocator* allocator, 154 Mutex* lock, 155 bool init_free_ids) { 156 PtrQueueSet::initialize(cbl_mon, allocator); 157 _shared_dirty_card_queue.set_lock(lock); 158 if (init_free_ids) { 159 _free_ids = new FreeIdSet(num_par_ids(), cbl_mon); 160 } 161 } 162 163 void DirtyCardQueueSet::handle_zero_index_for_thread(JavaThread* t) { 164 G1ThreadLocalData::dirty_card_queue(t).handle_zero_index(); 165 } 166 167 bool DirtyCardQueueSet::apply_closure_to_buffer(CardTableEntryClosure* cl, 168 BufferNode* node, 169 bool consume, 170 uint worker_i) { 171 if (cl == NULL) return true; 172 bool result = true; 173 void** buf = BufferNode::make_buffer_from_node(node); 174 size_t i = node->index(); 175 size_t limit = buffer_size(); 176 for ( ; i < limit; ++i) { 177 jbyte* card_ptr = static_cast<jbyte*>(buf[i]); 178 assert(card_ptr != NULL, "invariant"); 179 if (!cl->do_card_ptr(card_ptr, worker_i)) { 180 result = false; // Incomplete processing. 181 break; 182 } 183 } 184 if (consume) { 185 assert(i <= buffer_size(), "invariant"); 186 node->set_index(i); 187 } 188 return result; 189 } 190 191 #ifndef ASSERT 192 #define assert_fully_consumed(node, buffer_size) 193 #else 194 #define assert_fully_consumed(node, buffer_size) \ 195 do { \ 196 size_t _afc_index = (node)->index(); \ 197 size_t _afc_size = (buffer_size); \ 198 assert(_afc_index == _afc_size, \ 199 "Buffer was not fully consumed as claimed: index: " \ 200 SIZE_FORMAT ", size: " SIZE_FORMAT, \ 201 _afc_index, _afc_size); \ 202 } while (0) 203 #endif // ASSERT 204 205 bool DirtyCardQueueSet::mut_process_buffer(BufferNode* node) { 206 guarantee(_free_ids != NULL, "must be"); 207 208 uint worker_i = _free_ids->claim_par_id(); // temporarily claim an id 209 G1RefineCardConcurrentlyClosure cl; 210 bool result = apply_closure_to_buffer(&cl, node, true, worker_i); 211 _free_ids->release_par_id(worker_i); // release the id 212 213 if (result) { 214 assert_fully_consumed(node, buffer_size()); 215 Atomic::inc(&_processed_buffers_mut); 216 } 217 return result; 218 } 219 220 bool DirtyCardQueueSet::refine_completed_buffer_concurrently(uint worker_i, size_t stop_at) { 221 G1RefineCardConcurrentlyClosure cl; 222 return apply_closure_to_completed_buffer(&cl, worker_i, stop_at, false); 223 } 224 225 bool DirtyCardQueueSet::apply_closure_during_gc(CardTableEntryClosure* cl, uint worker_i) { 226 assert_at_safepoint(); 227 return apply_closure_to_completed_buffer(cl, worker_i, 0, true); 228 } 229 230 bool DirtyCardQueueSet::apply_closure_to_completed_buffer(CardTableEntryClosure* cl, 231 uint worker_i, 232 size_t stop_at, 233 bool during_pause) { 234 assert(!during_pause || stop_at == 0, "Should not leave any completed buffers during a pause"); 235 BufferNode* nd = get_completed_buffer(stop_at); 236 if (nd == NULL) { 237 return false; 238 } else { 239 if (apply_closure_to_buffer(cl, nd, true, worker_i)) { 240 assert_fully_consumed(nd, buffer_size()); 241 // Done with fully processed buffer. 242 deallocate_buffer(nd); 243 Atomic::inc(&_processed_buffers_rs_thread); 244 } else { 245 // Return partially processed buffer to the queue. 246 guarantee(!during_pause, "Should never stop early"); 247 enqueue_completed_buffer(nd); 248 } 249 return true; 250 } 251 } 252 253 void DirtyCardQueueSet::par_apply_closure_to_all_completed_buffers(CardTableEntryClosure* cl) { 254 BufferNode* nd = _cur_par_buffer_node; 255 while (nd != NULL) { 256 BufferNode* next = nd->next(); 257 BufferNode* actual = Atomic::cmpxchg(next, &_cur_par_buffer_node, nd); 258 if (actual == nd) { 259 bool b = apply_closure_to_buffer(cl, nd, false); 260 guarantee(b, "Should not stop early."); 261 nd = next; 262 } else { 263 nd = actual; 264 } 265 } 266 } 267 268 void DirtyCardQueueSet::abandon_logs() { 269 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint."); 270 abandon_completed_buffers(); 271 // Since abandon is done only at safepoints, we can safely manipulate 272 // these queues. 273 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) { 274 G1ThreadLocalData::dirty_card_queue(t).reset(); 275 } 276 shared_dirty_card_queue()->reset(); 277 } 278 279 void DirtyCardQueueSet::concatenate_log(DirtyCardQueue& dcq) { 280 if (!dcq.is_empty()) { 281 dcq.flush(); 282 } 283 } 284 285 void DirtyCardQueueSet::concatenate_logs() { 286 // Iterate over all the threads, if we find a partial log add it to 287 // the global list of logs. Temporarily turn off the limit on the number 288 // of outstanding buffers. 289 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint."); 290 size_t old_limit = max_completed_buffers(); 291 set_max_completed_buffers(MaxCompletedBuffersUnlimited); 292 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) { 293 concatenate_log(G1ThreadLocalData::dirty_card_queue(t)); 294 } 295 concatenate_log(_shared_dirty_card_queue); 296 set_max_completed_buffers(old_limit); 297 }