1 /* 2 * Copyright (c) 2016, Red Hat, Inc. and/or its affiliates. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHTASKQUEUE_HPP 25 #define SHARE_VM_GC_SHENANDOAH_SHENANDOAHTASKQUEUE_HPP 26 27 #include "memory/padded.hpp" 28 #include "utilities/taskqueue.hpp" 29 #include "runtime/mutex.hpp" 30 31 class Thread; 32 33 template<class E, MEMFLAGS F, unsigned int N = TASKQUEUE_SIZE> 34 class BufferedOverflowTaskQueue: public OverflowTaskQueue<E, F, N> 35 { 36 public: 37 typedef OverflowTaskQueue<E, F, N> taskqueue_t; 38 39 BufferedOverflowTaskQueue() : _buf_empty(true) {}; 40 41 TASKQUEUE_STATS_ONLY(using taskqueue_t::stats;) 42 43 // Push task t onto: 44 // - first, try buffer; 45 // - then, try the queue; 46 // - then, overflow stack. 47 // Return true. 48 inline bool push(E t); 49 50 // Attempt to pop from the buffer; return true if anything was popped. 51 inline bool pop_buffer(E &t); 52 53 inline void clear_buffer() { _buf_empty = true; } 54 inline bool buffer_empty() const { return _buf_empty; } 55 inline bool is_empty() const { 56 return taskqueue_t::is_empty() && buffer_empty(); 57 } 58 59 private: 60 bool _buf_empty; 61 E _elem; 62 }; 63 64 #ifdef _MSC_VER 65 #pragma warning(push) 66 // warning C4522: multiple assignment operators specified 67 #pragma warning(disable:4522) 68 #endif 69 70 // ObjArrayChunkedTask 71 // 72 // Encodes both regular oops, and the array oops plus chunking data for parallel array processing. 73 // The design goal is to make the regular oop ops very fast, because that would be the prevailing 74 // case. On the other hand, it should not block parallel array processing from efficiently dividing 75 // the array work. 76 // 77 // The idea is to steal the bits from the 64-bit oop to encode array data, if needed. For the 78 // proper divide-and-conquer strategies, we want to encode the "blocking" data. It turns out, the 79 // most efficient way to do this is to encode the array block as (chunk * 2^pow), where it is assumed 80 // that the block has the size of 2^pow. This requires for pow to have only 5 bits (2^32) to encode 81 // all possible arrays. 82 // 83 // |---------oop---------|-pow-|--chunk---| 84 // 0 49 54 64 85 // 86 // By definition, chunk == 0 means "no chunk", i.e. chunking starts from 1. 87 // 88 // This encoding gives a few interesting benefits: 89 // 90 // a) Encoding/decoding regular oops is very simple, because the upper bits are zero in that task: 91 // 92 // |---------oop---------|00000|0000000000| // no chunk data 93 // 94 // This helps the most ubiquitous path. The initialization amounts to putting the oop into the word 95 // with zero padding. Testing for "chunkedness" is testing for zero with chunk mask. 96 // 97 // b) Splitting tasks for divide-and-conquer is possible. Suppose we have chunk <C, P> that covers 98 // interval [ (C-1)*2^P; C*2^P ). We can then split it into two chunks: 99 // <2*C - 1, P-1>, that covers interval [ (2*C - 2)*2^(P-1); (2*C - 1)*2^(P-1) ) 100 // <2*C, P-1>, that covers interval [ (2*C - 1)*2^(P-1); 2*C*2^(P-1) ) 101 // 102 // Observe that the union of these two intervals is: 103 // [ (2*C - 2)*2^(P-1); 2*C*2^(P-1) ) 104 // 105 // ...which is the original interval: 106 // [ (C-1)*2^P; C*2^P ) 107 // 108 // c) The divide-and-conquer strategy could even start with chunk <1, round-log2-len(arr)>, and split 109 // down in the parallel threads, which alleviates the upfront (serial) splitting costs. 110 // 111 // Encoding limitations caused by current bitscales mean: 112 // 10 bits for chunk: max 1024 blocks per array 113 // 5 bits for power: max 2^32 array 114 // 49 bits for oop: max 512 TB of addressable space 115 // 116 // Stealing bits from oop trims down the addressable space. Stealing too few bits for chunk ID limits 117 // potential parallelism. Stealing too few bits for pow limits the maximum array size that can be handled. 118 // In future, these might be rebalanced to favor one degree of freedom against another. For example, 119 // if/when Arrays 2.0 bring 2^64-sized arrays, we might need to steal another bit for power. We could regain 120 // some bits back if chunks are counted in ObjArrayMarkingStride units. 121 // 122 // There is also a fallback version that uses plain fields, when we don't have enough space to steal the 123 // bits from the native pointer. It is useful to debug the _LP64 version. 124 // 125 #ifdef _LP64 126 class ObjArrayChunkedTask 127 { 128 public: 129 enum { 130 chunk_bits = 10, 131 pow_bits = 5, 132 oop_bits = sizeof(uintptr_t)*8 - chunk_bits - pow_bits, 133 }; 134 enum { 135 oop_shift = 0, 136 pow_shift = oop_shift + oop_bits, 137 chunk_shift = pow_shift + pow_bits, 138 }; 139 140 public: 141 ObjArrayChunkedTask(oop o = NULL) { 142 _obj = ((uintptr_t)(void*) o) << oop_shift; 143 } 144 ObjArrayChunkedTask(oop o, int chunk, int mult) { 145 assert(0 <= chunk && chunk < nth_bit(chunk_bits), err_msg("chunk is sane: %d", chunk)); 146 assert(0 <= mult && mult < nth_bit(pow_bits), err_msg("pow is sane: %d", mult)); 147 uintptr_t t_b = ((uintptr_t) chunk) << chunk_shift; 148 uintptr_t t_m = ((uintptr_t) mult) << pow_shift; 149 uintptr_t obj = (uintptr_t)(void*)o; 150 assert(obj < nth_bit(oop_bits), err_msg("obj ref is sane: " PTR_FORMAT, obj)); 151 intptr_t t_o = obj << oop_shift; 152 _obj = t_o | t_m | t_b; 153 } 154 ObjArrayChunkedTask(const ObjArrayChunkedTask& t): _obj(t._obj) { } 155 156 ObjArrayChunkedTask& operator =(const ObjArrayChunkedTask& t) { 157 _obj = t._obj; 158 return *this; 159 } 160 volatile ObjArrayChunkedTask& 161 operator =(const volatile ObjArrayChunkedTask& t) volatile { 162 (void)const_cast<uintptr_t&>(_obj = t._obj); 163 return *this; 164 } 165 166 inline oop obj() const { return (oop) reinterpret_cast<void*>((_obj >> oop_shift) & right_n_bits(oop_bits)); } 167 inline int chunk() const { return (int) (_obj >> chunk_shift) & right_n_bits(chunk_bits); } 168 inline int pow() const { return (int) ((_obj >> pow_shift) & right_n_bits(pow_bits)); } 169 inline bool is_not_chunked() const { return (_obj & ~right_n_bits(oop_bits + pow_bits)) == 0; } 170 171 DEBUG_ONLY(bool is_valid() const); // Tasks to be pushed/popped must be valid. 172 173 static size_t max_addressable() { 174 return nth_bit(oop_bits); 175 } 176 177 static int chunk_size() { 178 return nth_bit(chunk_bits); 179 } 180 181 private: 182 uintptr_t _obj; 183 }; 184 #else 185 class ObjArrayChunkedTask 186 { 187 public: 188 enum { 189 chunk_bits = 10, 190 pow_bits = 5, 191 }; 192 public: 193 ObjArrayChunkedTask(oop o = NULL, int chunk = 0, int pow = 0): _obj(o) { 194 assert(0 <= chunk && chunk < nth_bit(chunk_bits), err_msg("chunk is sane: %d", chunk)); 195 assert(0 <= pow && pow < nth_bit(pow_bits), err_msg("pow is sane: %d", pow)); 196 _chunk = chunk; 197 _pow = pow; 198 } 199 ObjArrayChunkedTask(const ObjArrayChunkedTask& t): _obj(t._obj), _chunk(t._chunk), _pow(t._pow) { } 200 201 ObjArrayChunkedTask& operator =(const ObjArrayChunkedTask& t) { 202 _obj = t._obj; 203 _chunk = t._chunk; 204 _pow = t._pow; 205 return *this; 206 } 207 volatile ObjArrayChunkedTask& 208 operator =(const volatile ObjArrayChunkedTask& t) volatile { 209 (void)const_cast<oop&>(_obj = t._obj); 210 _chunk = t._chunk; 211 _pow = t._pow; 212 return *this; 213 } 214 215 inline oop obj() const { return _obj; } 216 inline int chunk() const { return _chunk; } 217 inline int pow() const { return _pow; } 218 219 inline bool is_not_chunked() const { return _chunk == 0; } 220 221 DEBUG_ONLY(bool is_valid() const); // Tasks to be pushed/popped must be valid. 222 223 static size_t max_addressable() { 224 return sizeof(oop); 225 } 226 227 static int chunk_size() { 228 return nth_bit(chunk_bits); 229 } 230 231 private: 232 oop _obj; 233 int _chunk; 234 int _pow; 235 }; 236 #endif 237 238 typedef ObjArrayChunkedTask ShenandoahMarkTask; 239 typedef BufferedOverflowTaskQueue<ShenandoahMarkTask, mtGC> ShenandoahBufferedOverflowTaskQueue; 240 typedef Padded<ShenandoahBufferedOverflowTaskQueue> ShenandoahObjToScanQueue; 241 242 template <class T, MEMFLAGS F> 243 class ParallelClaimableQueueSet: public GenericTaskQueueSet<T, F> { 244 private: 245 volatile jint _claimed_index; 246 debug_only(uint _reserved; ) 247 248 public: 249 using GenericTaskQueueSet<T, F>::size; 250 251 public: 252 ParallelClaimableQueueSet(int n) : GenericTaskQueueSet<T, F>(n), _claimed_index(0) { 253 debug_only(_reserved = 0; ) 254 } 255 256 void clear_claimed() { _claimed_index = 0; } 257 T* claim_next(); 258 259 // reserve queues that not for parallel claiming 260 void reserve(uint n) { 261 assert(n <= size(), "Sanity"); 262 _claimed_index = (jint)n; 263 debug_only(_reserved = n;) 264 } 265 266 debug_only(uint get_reserved() const { return (uint)_reserved; }) 267 }; 268 269 270 template <class T, MEMFLAGS F> 271 T* ParallelClaimableQueueSet<T, F>::claim_next() { 272 jint size = (jint)GenericTaskQueueSet<T, F>::size(); 273 274 if (_claimed_index >= size) { 275 return NULL; 276 } 277 278 jint index = Atomic::add(1, &_claimed_index); 279 280 if (index <= size) { 281 return GenericTaskQueueSet<T, F>::queue((uint)index - 1); 282 } else { 283 return NULL; 284 } 285 } 286 287 class ShenandoahObjToScanQueueSet: public ParallelClaimableQueueSet<ShenandoahObjToScanQueue, mtGC> { 288 289 public: 290 ShenandoahObjToScanQueueSet(int n) : ParallelClaimableQueueSet<ShenandoahObjToScanQueue, mtGC>(n) { 291 } 292 293 bool is_empty(); 294 295 void clear(); 296 }; 297 298 299 /* 300 * This is an enhanced implementation of Google's work stealing 301 * protocol, which is described in the paper: 302 * Understanding and improving JVM GC work stealing at the data center scale 303 * (http://dl.acm.org/citation.cfm?id=2926706) 304 * 305 * Instead of a dedicated spin-master, our implementation will let spin-master to relinquish 306 * the role before it goes to sleep/wait, so allows newly arrived thread to compete for the role. 307 * The intention of above enhancement, is to reduce spin-master's latency on detecting new tasks 308 * for stealing and termination condition. 309 */ 310 311 class ShenandoahTaskTerminator: public ParallelTaskTerminator { 312 private: 313 Monitor* _blocker; 314 Thread* _spin_master; 315 316 317 public: 318 ShenandoahTaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set) : 319 ParallelTaskTerminator(n_threads, queue_set), _spin_master(NULL) { 320 _blocker = new Monitor(Mutex::leaf, "ShenandoahTaskTerminator", false); 321 } 322 323 ~ShenandoahTaskTerminator() { 324 assert(_blocker != NULL, "Can not be NULL"); 325 delete _blocker; 326 } 327 328 bool offer_termination(TerminatorTerminator* terminator); 329 330 private: 331 size_t tasks_in_queue_set() { return _queue_set->tasks(); } 332 333 334 /* 335 * Perform spin-master task. 336 * return true if termination condition is detected 337 * otherwise, return false 338 */ 339 bool do_spin_master_work(TerminatorTerminator* terminator); 340 }; 341 342 class ShenandoahCancelledTerminatorTerminator : public TerminatorTerminator { 343 virtual bool should_exit_termination() { 344 return false; 345 } 346 virtual bool should_force_termination() { 347 return true; 348 } 349 }; 350 351 #endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHTASKQUEUE_HPP