1 /* 2 * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/ptrQueue.hpp" 27 #include "memory/allocation.hpp" 28 #include "memory/allocation.inline.hpp" 29 #include "runtime/mutex.hpp" 30 #include "runtime/mutexLocker.hpp" 31 #include "runtime/thread.inline.hpp" 32 33 #include <new> 34 35 PtrQueue::PtrQueue(PtrQueueSet* qset, bool permanent, bool active) : 36 _qset(qset), 37 _active(active), 38 _permanent(permanent), 39 _index(0), 40 _capacity_in_bytes(0), 41 _buf(NULL), 42 _lock(NULL) 43 {} 44 45 PtrQueue::~PtrQueue() { 46 assert(_permanent || (_buf == NULL), "queue must be flushed before delete"); 47 } 48 49 void PtrQueue::flush_impl() { 50 if (_buf != NULL) { 51 BufferNode* node = BufferNode::make_node_from_buffer(_buf, index()); 52 if (is_empty()) { 53 // No work to do. 54 qset()->deallocate_buffer(node); 55 } else { 56 qset()->enqueue_complete_buffer(node); 57 } 58 _buf = NULL; 59 set_index(0); 60 } 61 } 62 63 64 void PtrQueue::enqueue_known_active(void* ptr) { 65 while (_index == 0) { 66 handle_zero_index(); 67 } 68 69 assert(_buf != NULL, "postcondition"); 70 assert(index() > 0, "postcondition"); 71 assert(index() <= capacity(), "invariant"); 72 _index -= _element_size; 73 _buf[index()] = ptr; 74 } 75 76 void PtrQueue::locking_enqueue_completed_buffer(BufferNode* node) { 77 assert(_lock->owned_by_self(), "Required."); 78 79 // We have to unlock _lock (which may be Shared_DirtyCardQ_lock) before 80 // we acquire DirtyCardQ_CBL_mon inside enqueue_complete_buffer as they 81 // have the same rank and we may get the "possible deadlock" message 82 _lock->unlock(); 83 84 qset()->enqueue_complete_buffer(node); 85 // We must relock only because the caller will unlock, for the normal 86 // case. 87 _lock->lock_without_safepoint_check(); 88 } 89 90 91 BufferNode* BufferNode::allocate(size_t size) { 92 size_t byte_size = size * sizeof(void*); 93 void* data = NEW_C_HEAP_ARRAY(char, buffer_offset() + byte_size, mtGC); 94 return new (data) BufferNode; 95 } 96 97 void BufferNode::deallocate(BufferNode* node) { 98 node->~BufferNode(); 99 FREE_C_HEAP_ARRAY(char, node); 100 } 101 102 PtrQueueSet::PtrQueueSet(bool notify_when_complete) : 103 _buffer_size(0), 104 _max_completed_queue(0), 105 _cbl_mon(NULL), _fl_lock(NULL), 106 _notify_when_complete(notify_when_complete), 107 _completed_buffers_head(NULL), 108 _completed_buffers_tail(NULL), 109 _n_completed_buffers(0), 110 _process_completed_threshold(0), _process_completed(false), 111 _buf_free_list(NULL), _buf_free_list_sz(0) 112 { 113 _fl_owner = this; 114 } 115 116 PtrQueueSet::~PtrQueueSet() { 117 // There are presently only a couple (derived) instances ever 118 // created, and they are permanent, so no harm currently done by 119 // doing nothing here. 120 } 121 122 void PtrQueueSet::initialize(Monitor* cbl_mon, 123 Mutex* fl_lock, 124 int process_completed_threshold, 125 int max_completed_queue, 126 PtrQueueSet *fl_owner) { 127 _max_completed_queue = max_completed_queue; 128 _process_completed_threshold = process_completed_threshold; 129 _completed_queue_padding = 0; 130 assert(cbl_mon != NULL && fl_lock != NULL, "Init order issue?"); 131 _cbl_mon = cbl_mon; 132 _fl_lock = fl_lock; 133 _fl_owner = (fl_owner != NULL) ? fl_owner : this; 134 } 135 136 void** PtrQueueSet::allocate_buffer() { 137 BufferNode* node = NULL; 138 { 139 MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag); 140 node = _fl_owner->_buf_free_list; 141 if (node != NULL) { 142 _fl_owner->_buf_free_list = node->next(); 143 _fl_owner->_buf_free_list_sz--; 144 } 145 } 146 if (node == NULL) { 147 node = BufferNode::allocate(buffer_size()); 148 } else { 149 // Reinitialize buffer obtained from free list. 150 node->set_index(0); 151 node->set_next(NULL); 152 } 153 return BufferNode::make_buffer_from_node(node); 154 } 155 156 void PtrQueueSet::deallocate_buffer(BufferNode* node) { 157 MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag); 158 node->set_next(_fl_owner->_buf_free_list); 159 _fl_owner->_buf_free_list = node; 160 _fl_owner->_buf_free_list_sz++; 161 } 162 163 void PtrQueueSet::reduce_free_list() { 164 assert(_fl_owner == this, "Free list reduction is allowed only for the owner"); 165 // For now we'll adopt the strategy of deleting half. 166 MutexLockerEx x(_fl_lock, Mutex::_no_safepoint_check_flag); 167 size_t n = _buf_free_list_sz / 2; 168 for (size_t i = 0; i < n; ++i) { 169 assert(_buf_free_list != NULL, 170 "_buf_free_list_sz is wrong: " SIZE_FORMAT, _buf_free_list_sz); 171 BufferNode* node = _buf_free_list; 172 _buf_free_list = node->next(); 173 _buf_free_list_sz--; 174 BufferNode::deallocate(node); 175 } 176 } 177 178 void PtrQueue::handle_zero_index() { 179 assert(index() == 0, "precondition"); 180 181 // This thread records the full buffer and allocates a new one (while 182 // holding the lock if there is one). 183 if (_buf != NULL) { 184 if (!should_enqueue_buffer()) { 185 assert(index() > 0, "the buffer can only be re-used if it's not full"); 186 return; 187 } 188 189 if (_lock) { 190 assert(_lock->owned_by_self(), "Required."); 191 192 // The current PtrQ may be the shared dirty card queue and 193 // may be being manipulated by more than one worker thread 194 // during a pause. Since the enqueueing of the completed 195 // buffer unlocks the Shared_DirtyCardQ_lock more than one 196 // worker thread can 'race' on reading the shared queue attributes 197 // (_buf and _index) and multiple threads can call into this 198 // routine for the same buffer. This will cause the completed 199 // buffer to be added to the CBL multiple times. 200 201 // We "claim" the current buffer by caching value of _buf in 202 // a local and clearing the field while holding _lock. When 203 // _lock is released (while enqueueing the completed buffer) 204 // the thread that acquires _lock will skip this code, 205 // preventing the subsequent the multiple enqueue, and 206 // install a newly allocated buffer below. 207 208 BufferNode* node = BufferNode::make_node_from_buffer(_buf, index()); 209 _buf = NULL; // clear shared _buf field 210 211 locking_enqueue_completed_buffer(node); // enqueue completed buffer 212 213 // While the current thread was enqueueing the buffer another thread 214 // may have a allocated a new buffer and inserted it into this pointer 215 // queue. If that happens then we just return so that the current 216 // thread doesn't overwrite the buffer allocated by the other thread 217 // and potentially losing some dirtied cards. 218 219 if (_buf != NULL) return; 220 } else { 221 BufferNode* node = BufferNode::make_node_from_buffer(_buf, index()); 222 if (qset()->process_or_enqueue_complete_buffer(node)) { 223 // Recycle the buffer. No allocation. 224 assert(_buf == BufferNode::make_buffer_from_node(node), "invariant"); 225 assert(capacity() == qset()->buffer_size(), "invariant"); 226 reset(); 227 return; 228 } 229 } 230 } 231 // Set capacity in case this is the first allocation. 232 set_capacity(qset()->buffer_size()); 233 // Allocate a new buffer. 234 _buf = qset()->allocate_buffer(); 235 reset(); 236 } 237 238 bool PtrQueueSet::process_or_enqueue_complete_buffer(BufferNode* node) { 239 if (Thread::current()->is_Java_thread()) { 240 // We don't lock. It is fine to be epsilon-precise here. 241 if (_max_completed_queue == 0 || 242 (_max_completed_queue > 0 && 243 _n_completed_buffers >= _max_completed_queue + _completed_queue_padding)) { 244 bool b = mut_process_buffer(node); 245 if (b) { 246 // True here means that the buffer hasn't been deallocated and the caller may reuse it. 247 return true; 248 } 249 } 250 } 251 // The buffer will be enqueued. The caller will have to get a new one. 252 enqueue_complete_buffer(node); 253 return false; 254 } 255 256 void PtrQueueSet::enqueue_complete_buffer(BufferNode* cbn) { 257 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); 258 cbn->set_next(NULL); 259 if (_completed_buffers_tail == NULL) { 260 assert(_completed_buffers_head == NULL, "Well-formedness"); 261 _completed_buffers_head = cbn; 262 _completed_buffers_tail = cbn; 263 } else { 264 _completed_buffers_tail->set_next(cbn); 265 _completed_buffers_tail = cbn; 266 } 267 _n_completed_buffers++; 268 269 if (!_process_completed && _process_completed_threshold >= 0 && 270 _n_completed_buffers >= (size_t)_process_completed_threshold) { 271 _process_completed = true; 272 if (_notify_when_complete) { 273 _cbl_mon->notify(); 274 } 275 } 276 DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked()); 277 } 278 279 size_t PtrQueueSet::completed_buffers_list_length() { 280 size_t n = 0; 281 BufferNode* cbn = _completed_buffers_head; 282 while (cbn != NULL) { 283 n++; 284 cbn = cbn->next(); 285 } 286 return n; 287 } 288 289 void PtrQueueSet::assert_completed_buffer_list_len_correct() { 290 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); 291 assert_completed_buffer_list_len_correct_locked(); 292 } 293 294 void PtrQueueSet::assert_completed_buffer_list_len_correct_locked() { 295 guarantee(completed_buffers_list_length() == _n_completed_buffers, 296 "Completed buffer length is wrong."); 297 } 298 299 void PtrQueueSet::set_buffer_size(size_t sz) { 300 assert(_buffer_size == 0 && sz > 0, "Should be called only once."); 301 _buffer_size = sz; 302 } 303 304 // Merge lists of buffers. Notify the processing threads. 305 // The source queue is emptied as a result. The queues 306 // must share the monitor. 307 void PtrQueueSet::merge_bufferlists(PtrQueueSet *src) { 308 assert(_cbl_mon == src->_cbl_mon, "Should share the same lock"); 309 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); 310 if (_completed_buffers_tail == NULL) { 311 assert(_completed_buffers_head == NULL, "Well-formedness"); 312 _completed_buffers_head = src->_completed_buffers_head; 313 _completed_buffers_tail = src->_completed_buffers_tail; 314 } else { 315 assert(_completed_buffers_head != NULL, "Well formedness"); 316 if (src->_completed_buffers_head != NULL) { 317 _completed_buffers_tail->set_next(src->_completed_buffers_head); 318 _completed_buffers_tail = src->_completed_buffers_tail; 319 } 320 } 321 _n_completed_buffers += src->_n_completed_buffers; 322 323 src->_n_completed_buffers = 0; 324 src->_completed_buffers_head = NULL; 325 src->_completed_buffers_tail = NULL; 326 327 assert(_completed_buffers_head == NULL && _completed_buffers_tail == NULL || 328 _completed_buffers_head != NULL && _completed_buffers_tail != NULL, 329 "Sanity"); 330 } 331 332 void PtrQueueSet::notify_if_necessary() { 333 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); 334 assert(_process_completed_threshold >= 0, "_process_completed is negative"); 335 if (_n_completed_buffers >= (size_t)_process_completed_threshold || _max_completed_queue == 0) { 336 _process_completed = true; 337 if (_notify_when_complete) 338 _cbl_mon->notify(); 339 } 340 }