1 /*
   2  * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPTHREAD_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPTHREAD_HPP
  27 
  28 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp"
  29 #include "gc_implementation/shared/concurrentGCThread.hpp"
  30 #ifdef TARGET_OS_FAMILY_linux
  31 # include "thread_linux.inline.hpp"
  32 #endif
  33 #ifdef TARGET_OS_FAMILY_solaris
  34 # include "thread_solaris.inline.hpp"
  35 #endif
  36 #ifdef TARGET_OS_FAMILY_windows
  37 # include "thread_windows.inline.hpp"
  38 #endif
  39 
  40 class ConcurrentMarkSweepGeneration;
  41 class CMSCollector;
  42 
  43 // The Concurrent Mark Sweep GC Thread (could be several in the future).
  44 class ConcurrentMarkSweepThread: public ConcurrentGCThread {
  45   friend class VMStructs;
  46   friend class ConcurrentMarkSweepGeneration;   // XXX should remove friendship
  47   friend class CMSCollector;
  48  public:
  49   virtual void run();
  50 
  51  private:
  52   static ConcurrentMarkSweepThread*     _cmst;
  53   static CMSCollector*                  _collector;
  54   static SurrogateLockerThread*         _slt;
  55   static SurrogateLockerThread::SLT_msg_type _sltBuffer;
  56   static Monitor*                       _sltMonitor;
  57 
  58   ConcurrentMarkSweepThread*            _next;
  59 
  60   static bool _should_terminate;
  61 
  62   enum CMS_flag_type {
  63     CMS_nil             = NoBits,
  64     CMS_cms_wants_token = nth_bit(0),
  65     CMS_cms_has_token   = nth_bit(1),
  66     CMS_vm_wants_token  = nth_bit(2),
  67     CMS_vm_has_token    = nth_bit(3)
  68   };
  69 
  70   static int _CMS_flag;
  71 
  72   static bool CMS_flag_is_set(int b)        { return (_CMS_flag & b) != 0;   }
  73   static bool set_CMS_flag(int b)           { return (_CMS_flag |= b) != 0;  }
  74   static bool clear_CMS_flag(int b)         { return (_CMS_flag &= ~b) != 0; }
  75   void sleepBeforeNextCycle();
  76 
  77   // CMS thread should yield for a young gen collection, direct allocation,
  78   // and iCMS activity.
  79   static char _pad_1[64 - sizeof(jint)];    // prevent cache-line sharing
  80   static volatile jint _pending_yields;
  81   static volatile jint _pending_decrements; // decrements to _pending_yields
  82   static char _pad_2[64 - sizeof(jint)];    // prevent cache-line sharing
  83 
  84   // Tracing messages, enabled by CMSTraceThreadState.
  85   static inline void trace_state(const char* desc);
  86 
  87   static volatile bool _icms_enabled;   // iCMS enabled?
  88   static volatile bool _should_run;     // iCMS may run
  89   static volatile bool _should_stop;    // iCMS should stop
  90 
  91   // debugging
  92   void verify_ok_to_terminate() const PRODUCT_RETURN;
  93 
  94  public:
  95   // Constructor
  96   ConcurrentMarkSweepThread(CMSCollector* collector);
  97 
  98   static void makeSurrogateLockerThread(TRAPS);
  99   static SurrogateLockerThread* slt() { return _slt; }
 100 
 101   // Tester
 102   bool is_ConcurrentGC_thread() const { return true;       }
 103 
 104   static void threads_do(ThreadClosure* tc);
 105 
 106   // Printing
 107   void print_on(outputStream* st) const;
 108   void print() const                                  { print_on(tty); }
 109   static void print_all_on(outputStream* st);
 110   static void print_all()                             { print_all_on(tty); }
 111 
 112   // Returns the CMS Thread
 113   static ConcurrentMarkSweepThread* cmst()    { return _cmst; }
 114   static CMSCollector*         collector()    { return _collector;  }
 115 
 116   // Create and start the CMS Thread, or stop it on shutdown
 117   static ConcurrentMarkSweepThread* start(CMSCollector* collector);
 118   static void stop();
 119   static bool should_terminate() { return _should_terminate; }
 120 
 121   // Synchronization using CMS token
 122   static void synchronize(bool is_cms_thread);
 123   static void desynchronize(bool is_cms_thread);
 124   static bool vm_thread_has_cms_token() {
 125     return CMS_flag_is_set(CMS_vm_has_token);
 126   }
 127   static bool cms_thread_has_cms_token() {
 128     return CMS_flag_is_set(CMS_cms_has_token);
 129   }
 130   static bool vm_thread_wants_cms_token() {
 131     return CMS_flag_is_set(CMS_vm_wants_token);
 132   }
 133   static bool cms_thread_wants_cms_token() {
 134     return CMS_flag_is_set(CMS_cms_wants_token);
 135   }
 136 
 137   // Wait on CMS lock until the next synchronous GC
 138   // or given timeout, whichever is earlier.
 139   void    wait_on_cms_lock(long t); // milliseconds
 140 
 141   // The CMS thread will yield during the work portion of its cycle
 142   // only when requested to.  Both synchronous and asychronous requests
 143   // are provided:
 144   // (1) A synchronous request is used for young gen collections and
 145   //     for direct allocations.  The requesting thread increments
 146   //     _pending_yields at the beginning of an operation, and decrements
 147   //     _pending_yields when that operation is completed.
 148   //     In turn, the CMS thread yields when _pending_yields is positive,
 149   //     and continues to yield until the value reverts to 0.
 150   // (2) An asynchronous request, on the other hand, is used by iCMS
 151   //     for the stop_icms() operation. A single yield satisfies all of
 152   //     the outstanding asynch yield requests, of which there may
 153   //     occasionally be several in close succession. To accomplish
 154   //     this, an asynch-requesting thread atomically increments both
 155   //     _pending_yields and _pending_decrements. An asynchr requesting
 156   //     thread does not wait and "acknowledge" completion of an operation
 157   //     and deregister the request, like the synchronous version described
 158   //     above does. In turn, after yielding, the CMS thread decrements both
 159   //     _pending_yields and _pending_decrements by the value seen in
 160   //     _pending_decrements before the decrement.
 161   //  NOTE: The above scheme is isomorphic to having two request counters,
 162   //  one for async requests and one for sync requests, and for the CMS thread
 163   //  to check the sum of the two counters to decide whether it should yield
 164   //  and to clear only the async counter when it yields. However, it turns out
 165   //  to be more efficient for CMS code to just check a single counter
 166   //  _pending_yields that holds the sum (of both sync and async requests), and
 167   //  a second counter _pending_decrements that only holds the async requests,
 168   //  for greater efficiency, since in a typical CMS run, there are many more
 169   //  pontential (i.e. static) yield points than there are actual
 170   //  (i.e. dynamic) yields because of requests, which are few and far between.
 171   //
 172   // Note that, while "_pending_yields >= _pending_decrements" is an invariant,
 173   // we cannot easily test that invariant, since the counters are manipulated via
 174   // atomic instructions without explicit locking and we cannot read
 175   // the two counters atomically together: one suggestion is to
 176   // use (for example) 16-bit counters so as to be able to read the
 177   // two counters atomically even on 32-bit platforms. Notice that
 178   // the second assert in acknowledge_yield_request() below does indeed
 179   // check a form of the above invariant, albeit indirectly.
 180 
 181   static void increment_pending_yields()   {
 182     Atomic::inc(&_pending_yields);
 183     assert(_pending_yields >= 0, "can't be negative");
 184   }
 185   static void decrement_pending_yields()   {
 186     Atomic::dec(&_pending_yields);
 187     assert(_pending_yields >= 0, "can't be negative");
 188   }
 189   static void asynchronous_yield_request() {
 190     assert(CMSIncrementalMode, "Currently only used w/iCMS");
 191     increment_pending_yields();
 192     Atomic::inc(&_pending_decrements);
 193     assert(_pending_decrements >= 0, "can't be negative");
 194   }
 195   static void acknowledge_yield_request() {
 196     jint decrement = _pending_decrements;
 197     if (decrement > 0) {
 198       assert(CMSIncrementalMode, "Currently only used w/iCMS");
 199       // Order important to preserve: _pending_yields >= _pending_decrements
 200       Atomic::add(-decrement, &_pending_decrements);
 201       Atomic::add(-decrement, &_pending_yields);
 202       assert(_pending_decrements >= 0, "can't be negative");
 203       assert(_pending_yields >= 0, "can't be negative");
 204     }
 205   }
 206   static bool should_yield()   { return _pending_yields > 0; }
 207 
 208   // CMS incremental mode.
 209   static void start_icms(); // notify thread to start a quantum of work
 210   static void stop_icms();  // request thread to stop working
 211   void icms_wait();         // if asked to stop, wait until notified to start
 212 
 213   // Incremental mode is enabled globally by the flag CMSIncrementalMode.  It
 214   // must also be enabled/disabled dynamically to allow foreground collections.
 215   static inline void enable_icms()              { _icms_enabled = true; }
 216   static inline void disable_icms()             { _icms_enabled = false; }
 217   static inline void set_icms_enabled(bool val) { _icms_enabled = val; }
 218   static inline bool icms_enabled()             { return _icms_enabled; }
 219 };
 220 
 221 inline void ConcurrentMarkSweepThread::trace_state(const char* desc) {
 222   if (CMSTraceThreadState) {
 223     char buf[128];
 224     TimeStamp& ts = gclog_or_tty->time_stamp();
 225     if (!ts.is_updated()) {
 226       ts.update();
 227     }
 228     jio_snprintf(buf, sizeof(buf), " [%.3f:  CMSThread %s] ",
 229                  ts.seconds(), desc);
 230     buf[sizeof(buf) - 1] = '\0';
 231     gclog_or_tty->print(buf);
 232   }
 233 }
 234 
 235 // For scoped increment/decrement of (synchronous) yield requests
 236 class CMSSynchronousYieldRequest: public StackObj {
 237  public:
 238   CMSSynchronousYieldRequest() {
 239     ConcurrentMarkSweepThread::increment_pending_yields();
 240   }
 241   ~CMSSynchronousYieldRequest() {
 242     ConcurrentMarkSweepThread::decrement_pending_yields();
 243   }
 244 };
 245 
 246 // Used to emit a warning in case of unexpectedly excessive
 247 // looping (in "apparently endless loops") in CMS code.
 248 class CMSLoopCountWarn: public StackObj {
 249  private:
 250   const char* _src;
 251   const char* _msg;
 252   const intx  _threshold;
 253   intx        _ticks;
 254 
 255  public:
 256   inline CMSLoopCountWarn(const char* src, const char* msg,
 257                           const intx threshold) :
 258     _src(src), _msg(msg), _threshold(threshold), _ticks(0) { }
 259 
 260   inline void tick() {
 261     _ticks++;
 262     if (CMSLoopWarn && _ticks % _threshold == 0) {
 263       warning("%s has looped %d times %s", _src, _ticks, _msg);
 264     }
 265   }
 266 };
 267 
 268 #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPTHREAD_HPP