341 inline bool pop_overflow(E& t);
342
343 inline overflow_t* overflow_stack() { return &_overflow_stack; }
344
345 inline bool taskqueue_empty() const { return taskqueue_t::is_empty(); }
346 inline bool overflow_empty() const { return _overflow_stack.is_empty(); }
347 inline bool is_empty() const {
348 return taskqueue_empty() && overflow_empty();
349 }
350
351 private:
352 overflow_t _overflow_stack;
353 };
354
355 class TaskQueueSetSuper {
356 protected:
357 static int randomParkAndMiller(int* seed0);
358 public:
359 // Returns "true" if some TaskQueue in the set contains a task.
360 virtual bool peek() = 0;
361 };
362
363 template <MEMFLAGS F> class TaskQueueSetSuperImpl: public CHeapObj<F>, public TaskQueueSetSuper {
364 };
365
366 template<class T, MEMFLAGS F>
367 class GenericTaskQueueSet: public TaskQueueSetSuperImpl<F> {
368 private:
369 uint _n;
370 T** _queues;
371
372 public:
373 typedef typename T::element_type E;
374
375 GenericTaskQueueSet(int n);
376 ~GenericTaskQueueSet();
377
378 bool steal_best_of_2(uint queue_num, int* seed, E& t);
379
380 void register_queue(uint i, T* q);
381
382 T* queue(uint n);
383
384 // The thread with queue number "queue_num" (and whose random number seed is
385 // at "seed") is trying to steal a task from some other queue. (It may try
386 // several queues, according to some configuration parameter.) If some steal
387 // succeeds, returns "true" and sets "t" to the stolen task, otherwise returns
388 // false.
389 bool steal(uint queue_num, int* seed, E& t);
390
391 bool peek();
392
393 uint size() const { return _n; }
394 };
395
396 template<class T, MEMFLAGS F> void
397 GenericTaskQueueSet<T, F>::register_queue(uint i, T* q) {
398 assert(i < _n, "index out of range.");
399 _queues[i] = q;
400 }
401
402 template<class T, MEMFLAGS F> T*
403 GenericTaskQueueSet<T, F>::queue(uint i) {
404 return _queues[i];
405 }
406
407 template<class T, MEMFLAGS F>
408 bool GenericTaskQueueSet<T, F>::peek() {
409 // Try all the queues.
410 for (uint j = 0; j < _n; j++) {
411 if (_queues[j]->peek())
412 return true;
413 }
414 return false;
415 }
416
417 // When to terminate from the termination protocol.
418 class TerminatorTerminator: public CHeapObj<mtInternal> {
419 public:
420 virtual bool should_exit_termination() = 0;
421 };
422
423 // A class to aid in the termination of a set of parallel tasks using
424 // TaskQueueSet's for work stealing.
425
426 #undef TRACESPINNING
427
428 class ParallelTaskTerminator: public StackObj {
429 private:
430 uint _n_threads;
431 TaskQueueSetSuper* _queue_set;
432
433 DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, 0);
434 volatile uint _offered_termination;
435 DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile uint));
436
437 #ifdef TRACESPINNING
438 static uint _total_yields;
439 static uint _total_spins;
440 static uint _total_peeks;
441 #endif
442
443 bool peek_in_queue_set();
444 protected:
445 virtual void yield();
446 void sleep(uint millis);
447
448 public:
449
450 // "n_threads" is the number of threads to be terminated. "queue_set" is a
451 // queue sets of work queues of other threads.
452 ParallelTaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set);
453
454 // The current thread has no work, and is ready to terminate if everyone
455 // else is. If returns "true", all threads are terminated. If returns
456 // "false", available work has been observed in one of the task queues,
457 // so the global task is not complete.
458 bool offer_termination() {
459 return offer_termination(NULL);
460 }
461
462 // As above, but it also terminates if the should_exit_termination()
463 // method of the terminator parameter returns true. If terminator is
464 // NULL, then it is ignored.
465 bool offer_termination(TerminatorTerminator* terminator);
466
467 // Reset the terminator, so that it may be reused again.
468 // The caller is responsible for ensuring that this is done
469 // in an MT-safe manner, once the previous round of use of
470 // the terminator is finished.
471 void reset_for_reuse();
472 // Same as above but the number of parallel threads is set to the
473 // given number.
474 void reset_for_reuse(uint n_threads);
475
476 #ifdef TRACESPINNING
477 static uint total_yields() { return _total_yields; }
478 static uint total_spins() { return _total_spins; }
479 static uint total_peeks() { return _total_peeks; }
480 static void print_termination_counts();
481 #endif
482 };
483
484 typedef GenericTaskQueue<oop, mtGC> OopTaskQueue;
485 typedef GenericTaskQueueSet<OopTaskQueue, mtGC> OopTaskQueueSet;
|
341 inline bool pop_overflow(E& t);
342
343 inline overflow_t* overflow_stack() { return &_overflow_stack; }
344
345 inline bool taskqueue_empty() const { return taskqueue_t::is_empty(); }
346 inline bool overflow_empty() const { return _overflow_stack.is_empty(); }
347 inline bool is_empty() const {
348 return taskqueue_empty() && overflow_empty();
349 }
350
351 private:
352 overflow_t _overflow_stack;
353 };
354
355 class TaskQueueSetSuper {
356 protected:
357 static int randomParkAndMiller(int* seed0);
358 public:
359 // Returns "true" if some TaskQueue in the set contains a task.
360 virtual bool peek() = 0;
361 virtual size_t tasks() = 0;
362 };
363
364 template <MEMFLAGS F> class TaskQueueSetSuperImpl: public CHeapObj<F>, public TaskQueueSetSuper {
365 };
366
367 template<class T, MEMFLAGS F>
368 class GenericTaskQueueSet: public TaskQueueSetSuperImpl<F> {
369 private:
370 uint _n;
371 T** _queues;
372
373 public:
374 typedef typename T::element_type E;
375
376 GenericTaskQueueSet(int n);
377 ~GenericTaskQueueSet();
378
379 bool steal_best_of_2(uint queue_num, int* seed, E& t);
380
381 void register_queue(uint i, T* q);
382
383 T* queue(uint n);
384
385 // The thread with queue number "queue_num" (and whose random number seed is
386 // at "seed") is trying to steal a task from some other queue. (It may try
387 // several queues, according to some configuration parameter.) If some steal
388 // succeeds, returns "true" and sets "t" to the stolen task, otherwise returns
389 // false.
390 bool steal(uint queue_num, int* seed, E& t);
391
392 bool peek();
393 size_t tasks();
394
395 uint size() const { return _n; }
396 };
397
398 template<class T, MEMFLAGS F> void
399 GenericTaskQueueSet<T, F>::register_queue(uint i, T* q) {
400 assert(i < _n, "index out of range.");
401 _queues[i] = q;
402 }
403
404 template<class T, MEMFLAGS F> T*
405 GenericTaskQueueSet<T, F>::queue(uint i) {
406 return _queues[i];
407 }
408
409 template<class T, MEMFLAGS F>
410 bool GenericTaskQueueSet<T, F>::peek() {
411 // Try all the queues.
412 for (uint j = 0; j < _n; j++) {
413 if (_queues[j]->peek())
414 return true;
415 }
416 return false;
417 }
418
419 template<class T, MEMFLAGS F>
420 size_t GenericTaskQueueSet<T, F>::tasks() {
421 size_t n = 0;
422 for (uint j = 0; j < _n; j++) {
423 n += _queues[j]->size();
424 }
425 return n;
426 }
427
428
429 // When to terminate from the termination protocol.
430 class TerminatorTerminator: public CHeapObj<mtInternal> {
431 public:
432 virtual bool should_exit_termination() = 0;
433 };
434
435 // A class to aid in the termination of a set of parallel tasks using
436 // TaskQueueSet's for work stealing.
437
438 #undef TRACESPINNING
439
440 class ParallelTaskTerminator: public StackObj {
441 protected:
442 uint _n_threads;
443 TaskQueueSetSuper* _queue_set;
444
445 DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, 0);
446 volatile uint _offered_termination;
447 DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile uint));
448
449 #ifdef TRACESPINNING
450 static uint _total_yields;
451 static uint _total_spins;
452 static uint _total_peeks;
453 #endif
454
455 bool peek_in_queue_set();
456 protected:
457 virtual void yield();
458 void sleep(uint millis);
459
460 public:
461
462 // "n_threads" is the number of threads to be terminated. "queue_set" is a
463 // queue sets of work queues of other threads.
464 ParallelTaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set);
465
466 // The current thread has no work, and is ready to terminate if everyone
467 // else is. If returns "true", all threads are terminated. If returns
468 // "false", available work has been observed in one of the task queues,
469 // so the global task is not complete.
470 bool offer_termination() {
471 return offer_termination(NULL);
472 }
473
474 // As above, but it also terminates if the should_exit_termination()
475 // method of the terminator parameter returns true. If terminator is
476 // NULL, then it is ignored.
477 virtual bool offer_termination(TerminatorTerminator* terminator);
478
479 // Reset the terminator, so that it may be reused again.
480 // The caller is responsible for ensuring that this is done
481 // in an MT-safe manner, once the previous round of use of
482 // the terminator is finished.
483 void reset_for_reuse();
484 // Same as above but the number of parallel threads is set to the
485 // given number.
486 void reset_for_reuse(uint n_threads);
487
488 #ifdef TRACESPINNING
489 static uint total_yields() { return _total_yields; }
490 static uint total_spins() { return _total_spins; }
491 static uint total_peeks() { return _total_peeks; }
492 static void print_termination_counts();
493 #endif
494 };
495
496 typedef GenericTaskQueue<oop, mtGC> OopTaskQueue;
497 typedef GenericTaskQueueSet<OopTaskQueue, mtGC> OopTaskQueueSet;
|