1 /*
   2  * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPTHREAD_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPTHREAD_HPP
  27 
  28 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp"
  29 #include "gc_implementation/shared/concurrentGCThread.hpp"
  30 #ifdef TARGET_OS_FAMILY_linux
  31 # include "thread_linux.inline.hpp"
  32 #endif
  33 #ifdef TARGET_OS_FAMILY_solaris
  34 # include "thread_solaris.inline.hpp"
  35 #endif
  36 #ifdef TARGET_OS_FAMILY_windows
  37 # include "thread_windows.inline.hpp"
  38 #endif
  39 
  40 class ConcurrentMarkSweepGeneration;
  41 class CMSCollector;
  42 
  43 // The Concurrent Mark Sweep GC Thread (could be several in the future).
  44 class ConcurrentMarkSweepThread: public ConcurrentGCThread {
  45   friend class VMStructs;
  46   friend class ConcurrentMarkSweepGeneration;   // XXX should remove friendship
  47   friend class CMSCollector;
  48  public:
  49   virtual void run();
  50 
  51  private:
  52   static ConcurrentMarkSweepThread*     _cmst;
  53   static CMSCollector*                  _collector;
  54   static SurrogateLockerThread*         _slt;
  55   static SurrogateLockerThread::SLT_msg_type _sltBuffer;
  56   static Monitor*                       _sltMonitor;
  57 
  58   ConcurrentMarkSweepThread*            _next;
  59 
  60   static bool _should_terminate;
  61 
  62   enum CMS_flag_type {
  63     CMS_nil             = NoBits,
  64     CMS_cms_wants_token = nth_bit(0),
  65     CMS_cms_has_token   = nth_bit(1),
  66     CMS_vm_wants_token  = nth_bit(2),
  67     CMS_vm_has_token    = nth_bit(3)
  68   };
  69 
  70   static int _CMS_flag;
  71 
  72   static bool CMS_flag_is_set(int b)        { return (_CMS_flag & b) != 0;   }
  73   static bool set_CMS_flag(int b)           { return (_CMS_flag |= b) != 0;  }
  74   static bool clear_CMS_flag(int b)         { return (_CMS_flag &= ~b) != 0; }
  75   void sleepBeforeNextCycle();
  76 
  77   // CMS thread should yield for a young gen collection, direct allocation,
  78   // and iCMS activity.
  79   static char _pad_1[64 - sizeof(jint)];    // prevent cache-line sharing
  80   static volatile jint _pending_yields;
  81   static volatile jint _pending_decrements; // decrements to _pending_yields
  82   static char _pad_2[64 - sizeof(jint)];    // prevent cache-line sharing
  83 
  84   // Tracing messages, enabled by CMSTraceThreadState.
  85   static inline void trace_state(const char* desc);
  86 
  87   static volatile bool _icms_enabled;   // iCMS enabled?
  88   static volatile bool _should_run;     // iCMS may run
  89   static volatile bool _should_stop;    // iCMS should stop
  90 
  91   // debugging
  92   void verify_ok_to_terminate() const PRODUCT_RETURN;
  93 
  94  public:
  95   // Constructor
  96   ConcurrentMarkSweepThread(CMSCollector* collector);
  97 
  98   static void makeSurrogateLockerThread(TRAPS);
  99   static SurrogateLockerThread* slt() { return _slt; }
 100 
 101   // Tester
 102   bool is_ConcurrentGC_thread() const { return true;       }
 103 
 104   static void threads_do(ThreadClosure* tc);
 105 
 106   // Printing
 107   void print_on(outputStream* st) const;
 108   void print() const                                  { print_on(tty); }
 109   static void print_all_on(outputStream* st);
 110   static void print_all()                             { print_all_on(tty); }
 111 
 112   // Returns the CMS Thread
 113   static ConcurrentMarkSweepThread* cmst()    { return _cmst; }
 114   static CMSCollector*         collector()    { return _collector;  }
 115 
 116   // Create and start the CMS Thread, or stop it on shutdown
 117   static ConcurrentMarkSweepThread* start(CMSCollector* collector);
 118   static void stop();
 119   static bool should_terminate() { return _should_terminate; }
 120 
 121   // Synchronization using CMS token
 122   static void synchronize(bool is_cms_thread);
 123   static void desynchronize(bool is_cms_thread);
 124   static bool vm_thread_has_cms_token() {
 125     return CMS_flag_is_set(CMS_vm_has_token);
 126   }
 127   static bool cms_thread_has_cms_token() {
 128     return CMS_flag_is_set(CMS_cms_has_token);
 129   }
 130   static bool vm_thread_wants_cms_token() {
 131     return CMS_flag_is_set(CMS_vm_wants_token);
 132   }
 133   static bool cms_thread_wants_cms_token() {
 134     return CMS_flag_is_set(CMS_cms_wants_token);
 135   }
 136 
 137   // Wait on CMS lock until the next synchronous GC
 138   // or given timeout, whichever is earlier. A timeout value
 139   // of 0 indicates that there is no upper bound on the wait time.
 140   // A concurrent full gc request terminates the wait.
 141   void wait_on_cms_lock(long t_millis);
 142 
 143   // The CMS thread will yield during the work portion of its cycle
 144   // only when requested to.  Both synchronous and asychronous requests
 145   // are provided:
 146   // (1) A synchronous request is used for young gen collections and
 147   //     for direct allocations.  The requesting thread increments
 148   //     _pending_yields at the beginning of an operation, and decrements
 149   //     _pending_yields when that operation is completed.
 150   //     In turn, the CMS thread yields when _pending_yields is positive,
 151   //     and continues to yield until the value reverts to 0.
 152   // (2) An asynchronous request, on the other hand, is used by iCMS
 153   //     for the stop_icms() operation. A single yield satisfies all of
 154   //     the outstanding asynch yield requests, of which there may
 155   //     occasionally be several in close succession. To accomplish
 156   //     this, an asynch-requesting thread atomically increments both
 157   //     _pending_yields and _pending_decrements. An asynchr requesting
 158   //     thread does not wait and "acknowledge" completion of an operation
 159   //     and deregister the request, like the synchronous version described
 160   //     above does. In turn, after yielding, the CMS thread decrements both
 161   //     _pending_yields and _pending_decrements by the value seen in
 162   //     _pending_decrements before the decrement.
 163   //  NOTE: The above scheme is isomorphic to having two request counters,
 164   //  one for async requests and one for sync requests, and for the CMS thread
 165   //  to check the sum of the two counters to decide whether it should yield
 166   //  and to clear only the async counter when it yields. However, it turns out
 167   //  to be more efficient for CMS code to just check a single counter
 168   //  _pending_yields that holds the sum (of both sync and async requests), and
 169   //  a second counter _pending_decrements that only holds the async requests,
 170   //  for greater efficiency, since in a typical CMS run, there are many more
 171   //  pontential (i.e. static) yield points than there are actual
 172   //  (i.e. dynamic) yields because of requests, which are few and far between.
 173   //
 174   // Note that, while "_pending_yields >= _pending_decrements" is an invariant,
 175   // we cannot easily test that invariant, since the counters are manipulated via
 176   // atomic instructions without explicit locking and we cannot read
 177   // the two counters atomically together: one suggestion is to
 178   // use (for example) 16-bit counters so as to be able to read the
 179   // two counters atomically even on 32-bit platforms. Notice that
 180   // the second assert in acknowledge_yield_request() below does indeed
 181   // check a form of the above invariant, albeit indirectly.
 182 
 183   static void increment_pending_yields()   {
 184     Atomic::inc(&_pending_yields);
 185     assert(_pending_yields >= 0, "can't be negative");
 186   }
 187   static void decrement_pending_yields()   {
 188     Atomic::dec(&_pending_yields);
 189     assert(_pending_yields >= 0, "can't be negative");
 190   }
 191   static void asynchronous_yield_request() {
 192     assert(CMSIncrementalMode, "Currently only used w/iCMS");
 193     increment_pending_yields();
 194     Atomic::inc(&_pending_decrements);
 195     assert(_pending_decrements >= 0, "can't be negative");
 196   }
 197   static void acknowledge_yield_request() {
 198     jint decrement = _pending_decrements;
 199     if (decrement > 0) {
 200       assert(CMSIncrementalMode, "Currently only used w/iCMS");
 201       // Order important to preserve: _pending_yields >= _pending_decrements
 202       Atomic::add(-decrement, &_pending_decrements);
 203       Atomic::add(-decrement, &_pending_yields);
 204       assert(_pending_decrements >= 0, "can't be negative");
 205       assert(_pending_yields >= 0, "can't be negative");
 206     }
 207   }
 208   static bool should_yield()   { return _pending_yields > 0; }
 209 
 210   // CMS incremental mode.
 211   static void start_icms(); // notify thread to start a quantum of work
 212   static void stop_icms();  // request thread to stop working
 213   void icms_wait();         // if asked to stop, wait until notified to start
 214 
 215   // Incremental mode is enabled globally by the flag CMSIncrementalMode.  It
 216   // must also be enabled/disabled dynamically to allow foreground collections.
 217   static inline void enable_icms()              { _icms_enabled = true; }
 218   static inline void disable_icms()             { _icms_enabled = false; }
 219   static inline void set_icms_enabled(bool val) { _icms_enabled = val; }
 220   static inline bool icms_enabled()             { return _icms_enabled; }
 221 };
 222 
 223 inline void ConcurrentMarkSweepThread::trace_state(const char* desc) {
 224   if (CMSTraceThreadState) {
 225     char buf[128];
 226     TimeStamp& ts = gclog_or_tty->time_stamp();
 227     if (!ts.is_updated()) {
 228       ts.update();
 229     }
 230     jio_snprintf(buf, sizeof(buf), " [%.3f:  CMSThread %s] ",
 231                  ts.seconds(), desc);
 232     buf[sizeof(buf) - 1] = '\0';
 233     gclog_or_tty->print(buf);
 234   }
 235 }
 236 
 237 // For scoped increment/decrement of (synchronous) yield requests
 238 class CMSSynchronousYieldRequest: public StackObj {
 239  public:
 240   CMSSynchronousYieldRequest() {
 241     ConcurrentMarkSweepThread::increment_pending_yields();
 242   }
 243   ~CMSSynchronousYieldRequest() {
 244     ConcurrentMarkSweepThread::decrement_pending_yields();
 245   }
 246 };
 247 
 248 // Used to emit a warning in case of unexpectedly excessive
 249 // looping (in "apparently endless loops") in CMS code.
 250 class CMSLoopCountWarn: public StackObj {
 251  private:
 252   const char* _src;
 253   const char* _msg;
 254   const intx  _threshold;
 255   intx        _ticks;
 256 
 257  public:
 258   inline CMSLoopCountWarn(const char* src, const char* msg,
 259                           const intx threshold) :
 260     _src(src), _msg(msg), _threshold(threshold), _ticks(0) { }
 261 
 262   inline void tick() {
 263     _ticks++;
 264     if (CMSLoopWarn && _ticks % _threshold == 0) {
 265       warning("%s has looped %d times %s", _src, _ticks, _msg);
 266     }
 267   }
 268 };
 269 
 270 #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPTHREAD_HPP