< prev index next >

src/hotspot/share/gc/shared/taskTerminator.hpp

Print this page
rev 60302 : [mq]: 8245721-lkorinth-review

*** 1,7 **** /* ! * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as --- 1,7 ---- /* ! * Copyright (c) 2018, 2020, Red Hat, Inc. All rights reserved. * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as
*** 24,38 **** */ #ifndef SHARE_GC_SHARED_TASKTERMINATOR_HPP #define SHARE_GC_SHARED_TASKTERMINATOR_HPP #include "memory/allocation.hpp" #include "runtime/mutex.hpp" - #include "runtime/thread.hpp" class TaskQueueSetSuper; class TerminatorTerminator; /* * Provides a task termination protocol. * * This is an enhanced implementation of Google's OWST work stealing task termination --- 24,39 ---- */ #ifndef SHARE_GC_SHARED_TASKTERMINATOR_HPP #define SHARE_GC_SHARED_TASKTERMINATOR_HPP #include "memory/allocation.hpp" + #include "memory/padded.hpp" #include "runtime/mutex.hpp" class TaskQueueSetSuper; class TerminatorTerminator; + class Thread; /* * Provides a task termination protocol. * * This is an enhanced implementation of Google's OWST work stealing task termination
*** 48,79 **** * the role before it goes to sleep/wait, allowing newly arrived threads to compete for the role. * The intention of above enhancement is to reduce spin-master's latency on detecting new tasks * for stealing and termination condition. */ class TaskTerminator : public CHeapObj<mtGC> { uint _n_threads; TaskQueueSetSuper* _queue_set; DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, 0); volatile uint _offered_termination; DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile uint)); void assert_queue_set_empty() const NOT_DEBUG_RETURN; ! void yield(); ! ! Monitor* _blocker; Thread* _spin_master; // If we should exit current termination protocol bool exit_termination(size_t tasks, TerminatorTerminator* terminator); size_t tasks_in_queue_set() const; ! // Perform spin-master task. ! // Return true if termination condition is detected, otherwise return false ! bool do_spin_master_work(TerminatorTerminator* terminator); NONCOPYABLE(TaskTerminator); public: TaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set); --- 49,92 ---- * the role before it goes to sleep/wait, allowing newly arrived threads to compete for the role. * The intention of above enhancement is to reduce spin-master's latency on detecting new tasks * for stealing and termination condition. */ class TaskTerminator : public CHeapObj<mtGC> { + struct DelayContext { + uint _yield_count; + // Number of hard spin loops done since last yield + uint _hard_spin_count; + // Number of iterations in the hard spin loop. + uint _hard_spin_limit; + + DelayContext(); + }; + uint _n_threads; TaskQueueSetSuper* _queue_set; DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, 0); volatile uint _offered_termination; DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile uint)); void assert_queue_set_empty() const NOT_DEBUG_RETURN; ! Monitor _blocker; Thread* _spin_master; + // Prepare for return from offer_termination. Gives up the spin master token + // and wakes up up to tasks threads waiting on _blocker (the default value + // means to wake up everyone). + void prepare_for_return(Thread* this_thread, size_t tasks = SIZE_MAX); + // If we should exit current termination protocol bool exit_termination(size_t tasks, TerminatorTerminator* terminator); size_t tasks_in_queue_set() const; ! // Perform one iteration of spin-master work. ! bool do_delay_step(DelayContext& delay_context); NONCOPYABLE(TaskTerminator); public: TaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set);
< prev index next >