1 /*
   2  * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_RUNTIME_VM_OPERATIONS_HPP
  26 #define SHARE_VM_RUNTIME_VM_OPERATIONS_HPP
  27 
  28 #include "classfile/javaClasses.hpp"
  29 #include "memory/allocation.hpp"
  30 #include "oops/oop.hpp"
  31 #include "runtime/thread.hpp"
  32 #include "code/codeCache.hpp"
  33 
  34 // The following classes are used for operations
  35 // initiated by a Java thread but that must
  36 // take place in the VMThread.
  37 
  38 #define VM_OP_ENUM(type)   VMOp_##type,
  39 
  40 // Note: When new VM_XXX comes up, add 'XXX' to the template table.
  41 #define VM_OPS_DO(template)                       \
  42   template(Dummy)                                 \
  43   template(ThreadStop)                            \
  44   template(ThreadDump)                            \
  45   template(PrintThreads)                          \
  46   template(FindDeadlocks)                         \
  47   template(ClearICs)                              \
  48   template(ForceSafepoint)                        \
  49   template(ForceAsyncSafepoint)                   \
  50   template(Deoptimize)                            \
  51   template(DeoptimizeFrame)                       \
  52   template(DeoptimizeAll)                         \
  53   template(ZombieAll)                             \
  54   template(UnlinkSymbols)                         \
  55   template(Verify)                                \
  56   template(PrintJNI)                              \
  57   template(HeapDumper)                            \
  58   template(DeoptimizeTheWorld)                    \
  59   template(CollectForMetadataAllocation)          \
  60   template(GC_HeapInspection)                     \
  61   template(GenCollectFull)                        \
  62   template(GenCollectFullConcurrent)              \
  63   template(GenCollectForAllocation)               \
  64   template(ParallelGCFailedAllocation)            \
  65   template(ParallelGCSystemGC)                    \
  66   template(CGC_Operation)                         \
  67   template(CMS_Initial_Mark)                      \
  68   template(CMS_Final_Remark)                      \
  69   template(G1CollectFull)                         \
  70   template(G1CollectForAllocation)                \
  71   template(G1IncCollectionPause)                  \
  72   template(DestroyAllocationContext)              \
  73   template(EnableBiasedLocking)                   \
  74   template(RevokeBias)                            \
  75   template(BulkRevokeBias)                        \
  76   template(PopulateDumpSharedSpace)               \
  77   template(JNIFunctionTableCopier)                \
  78   template(RedefineClasses)                       \
  79   template(UpdateForPopTopFrame)                  \
  80   template(SetFramePop)                           \
  81   template(GetOwnedMonitorInfo)                   \
  82   template(GetObjectMonitorUsage)                 \
  83   template(GetCurrentContendedMonitor)            \
  84   template(GetStackTrace)                         \
  85   template(GetMultipleStackTraces)                \
  86   template(GetAllStackTraces)                     \
  87   template(GetThreadListStackTraces)              \
  88   template(GetFrameCount)                         \
  89   template(GetFrameLocation)                      \
  90   template(ChangeBreakpoints)                     \
  91   template(GetOrSetLocal)                         \
  92   template(GetCurrentLocation)                    \
  93   template(EnterInterpOnlyMode)                   \
  94   template(ChangeSingleStep)                      \
  95   template(HeapWalkOperation)                     \
  96   template(HeapIterateOperation)                  \
  97   template(ReportJavaOutOfMemory)                 \
  98   template(JFRCheckpoint)                         \
  99   template(Exit)                                  \
 100   template(LinuxDllLoad)                          \
 101   template(RotateGCLog)                           \
 102   template(WhiteBoxOperation)                     \
 103   template(ClassLoaderStatsOperation)             \
 104   template(DumpHashtable)                         \
 105   template(DumpTouchedMethods)                    \
 106   template(MarkActiveNMethods)                    \
 107   template(PrintCompileQueue)                     \
 108   template(PrintClassHierarchy)                   \
 109   template(ThreadSuspend)                         \
 110   template(CTWThreshold)                          \
 111   template(ThreadsSuspendJVMTI)                   \
 112   template(ICBufferFull)                          \
 113   template(ScavengeMonitors)                      \
 114 
 115 class VM_Operation: public CHeapObj<mtInternal> {
 116  public:
 117   enum Mode {
 118     _safepoint,       // blocking,        safepoint, vm_op C-heap allocated
 119     _no_safepoint,    // blocking,     no safepoint, vm_op C-Heap allocated
 120     _concurrent,      // non-blocking, no safepoint, vm_op C-Heap allocated
 121     _async_safepoint  // non-blocking,    safepoint, vm_op C-Heap allocated
 122   };
 123 
 124   enum VMOp_Type {
 125     VM_OPS_DO(VM_OP_ENUM)
 126     VMOp_Terminating
 127   };
 128 
 129  private:
 130   Thread*         _calling_thread;
 131   ThreadPriority  _priority;
 132   long            _timestamp;
 133   VM_Operation*   _next;
 134   VM_Operation*   _prev;
 135 
 136   // The VM operation name array
 137   static const char* _names[];
 138 
 139  public:
 140   VM_Operation()  { _calling_thread = NULL; _next = NULL; _prev = NULL; }
 141   virtual ~VM_Operation() {}
 142 
 143   // VM operation support (used by VM thread)
 144   Thread* calling_thread() const                 { return _calling_thread; }
 145   ThreadPriority priority()                      { return _priority; }
 146   void set_calling_thread(Thread* thread, ThreadPriority priority);
 147 
 148   long timestamp() const              { return _timestamp; }
 149   void set_timestamp(long timestamp)  { _timestamp = timestamp; }
 150 
 151   // Called by VM thread - does in turn invoke doit(). Do not override this
 152   void evaluate();
 153 
 154   // evaluate() is called by the VMThread and in turn calls doit().
 155   // If the thread invoking VMThread::execute((VM_Operation*) is a JavaThread,
 156   // doit_prologue() is called in that thread before transferring control to
 157   // the VMThread.
 158   // If doit_prologue() returns true the VM operation will proceed, and
 159   // doit_epilogue() will be called by the JavaThread once the VM operation
 160   // completes. If doit_prologue() returns false the VM operation is cancelled.
 161   virtual void doit()                            = 0;
 162   virtual bool doit_prologue()                   { return true; };
 163   virtual void doit_epilogue()                   {}; // Note: Not called if mode is: _concurrent
 164 
 165   // Type test
 166   virtual bool is_methodCompiler() const         { return false; }
 167 
 168   // Linking
 169   VM_Operation *next() const                     { return _next; }
 170   VM_Operation *prev() const                     { return _prev; }
 171   void set_next(VM_Operation *next)              { _next = next; }
 172   void set_prev(VM_Operation *prev)              { _prev = prev; }
 173 
 174   // Configuration. Override these appropriately in subclasses.
 175   virtual VMOp_Type type() const = 0;
 176   virtual Mode evaluation_mode() const            { return _safepoint; }
 177   virtual bool allow_nested_vm_operations() const { return false; }
 178   virtual bool is_cheap_allocated() const         { return false; }
 179   virtual void oops_do(OopClosure* f)              { /* do nothing */ };
 180 
 181   // CAUTION: <don't hang yourself with following rope>
 182   // If you override these methods, make sure that the evaluation
 183   // of these methods is race-free and non-blocking, since these
 184   // methods may be evaluated either by the mutators or by the
 185   // vm thread, either concurrently with mutators or with the mutators
 186   // stopped. In other words, taking locks is verboten, and if there
 187   // are any races in evaluating the conditions, they'd better be benign.
 188   virtual bool evaluate_at_safepoint() const {
 189     return evaluation_mode() == _safepoint  ||
 190            evaluation_mode() == _async_safepoint;
 191   }
 192   virtual bool evaluate_concurrently() const {
 193     return evaluation_mode() == _concurrent ||
 194            evaluation_mode() == _async_safepoint;
 195   }
 196 
 197   static const char* mode_to_string(Mode mode);
 198 
 199   // Debugging
 200   virtual void print_on_error(outputStream* st) const;
 201   const char* name() const { return _names[type()]; }
 202   static const char* name(int type) {
 203     assert(type >= 0 && type < VMOp_Terminating, "invalid VM operation type");
 204     return _names[type];
 205   }
 206 #ifndef PRODUCT
 207   void print_on(outputStream* st) const { print_on_error(st); }
 208 #endif
 209 };
 210 
 211 class VM_ThreadStop: public VM_Operation {
 212  private:
 213   oop     _thread;        // The Thread that the Throwable is thrown against
 214   oop     _throwable;     // The Throwable thrown at the target Thread
 215  public:
 216   // All oops are passed as JNI handles, since there is no guarantee that a GC might happen before the
 217   // VM operation is executed.
 218   VM_ThreadStop(oop thread, oop throwable) {
 219     _thread    = thread;
 220     _throwable = throwable;
 221   }
 222   VMOp_Type type() const                         { return VMOp_ThreadStop; }
 223   oop target_thread() const                      { return _thread; }
 224   oop throwable() const                          { return _throwable;}
 225   void doit();
 226   // We deoptimize if top-most frame is compiled - this might require a C2I adapter to be generated
 227   bool allow_nested_vm_operations() const        { return true; }
 228   Mode evaluation_mode() const                   { return _async_safepoint; }
 229   bool is_cheap_allocated() const                { return true; }
 230 
 231   // GC support
 232   void oops_do(OopClosure* f) {
 233     f->do_oop(&_thread); f->do_oop(&_throwable);
 234   }
 235 };
 236 
 237 class VM_ClearICs: public VM_Operation {
 238  private:
 239   bool _preserve_static_stubs;
 240  public:
 241   VM_ClearICs(bool preserve_static_stubs) { _preserve_static_stubs = preserve_static_stubs; }
 242   void doit();
 243   VMOp_Type type() const { return VMOp_ClearICs; }
 244 };
 245 
 246 // empty vm op, evaluated just to force a safepoint
 247 class VM_ForceSafepoint: public VM_Operation {
 248  public:
 249   void doit()         {}
 250   VMOp_Type type() const { return VMOp_ForceSafepoint; }
 251 };
 252 
 253 // empty vm op, when forcing a safepoint to suspend a thread
 254 class VM_ThreadSuspend: public VM_ForceSafepoint {
 255  public:
 256   VMOp_Type type() const { return VMOp_ThreadSuspend; }
 257 };
 258 
 259 // empty vm op, when forcing a safepoint due to ctw threshold is reached for the sweeper
 260 class VM_CTWThreshold: public VM_ForceSafepoint {
 261  public:
 262   VMOp_Type type() const { return VMOp_CTWThreshold; }
 263 };
 264 
 265 // empty vm op, when forcing a safepoint to suspend threads from jvmti
 266 class VM_ThreadsSuspendJVMTI: public VM_ForceSafepoint {
 267  public:
 268   VMOp_Type type() const { return VMOp_ThreadsSuspendJVMTI; }
 269 };
 270 
 271 // empty vm op, when forcing a safepoint due to inline cache buffers being full
 272 class VM_ICBufferFull: public VM_ForceSafepoint {
 273  public:
 274   VMOp_Type type() const { return VMOp_ICBufferFull; }
 275 };
 276 
 277 // empty asynchronous vm op, when forcing a safepoint to scavenge monitors
 278 class VM_ScavengeMonitors: public VM_ForceSafepoint {
 279  public:
 280   VMOp_Type type() const                         { return VMOp_ScavengeMonitors; }
 281   Mode evaluation_mode() const                   { return _async_safepoint; }
 282   bool is_cheap_allocated() const                { return true; }
 283 };
 284 
 285 class VM_Deoptimize: public VM_Operation {
 286  public:
 287   VM_Deoptimize() {}
 288   VMOp_Type type() const                        { return VMOp_Deoptimize; }
 289   void doit();
 290   bool allow_nested_vm_operations() const        { return true; }
 291 };
 292 
 293 class VM_MarkActiveNMethods: public VM_Operation {
 294  public:
 295   VM_MarkActiveNMethods() {}
 296   VMOp_Type type() const                         { return VMOp_MarkActiveNMethods; }
 297   void doit();
 298   bool allow_nested_vm_operations() const        { return true; }
 299 };
 300 
 301 // Deopt helper that can deoptimize frames in threads other than the
 302 // current thread.  Only used through Deoptimization::deoptimize_frame.
 303 class VM_DeoptimizeFrame: public VM_Operation {
 304   friend class Deoptimization;
 305 
 306  private:
 307   JavaThread* _thread;
 308   intptr_t*   _id;
 309   int _reason;
 310   VM_DeoptimizeFrame(JavaThread* thread, intptr_t* id, int reason);
 311 
 312  public:
 313   VMOp_Type type() const                         { return VMOp_DeoptimizeFrame; }
 314   void doit();
 315   bool allow_nested_vm_operations() const        { return true;  }
 316 };
 317 
 318 #ifndef PRODUCT
 319 class VM_DeoptimizeAll: public VM_Operation {
 320  private:
 321   Klass* _dependee;
 322  public:
 323   VM_DeoptimizeAll() {}
 324   VMOp_Type type() const                         { return VMOp_DeoptimizeAll; }
 325   void doit();
 326   bool allow_nested_vm_operations() const        { return true; }
 327 };
 328 
 329 
 330 class VM_ZombieAll: public VM_Operation {
 331  public:
 332   VM_ZombieAll() {}
 333   VMOp_Type type() const                         { return VMOp_ZombieAll; }
 334   void doit();
 335   bool allow_nested_vm_operations() const        { return true; }
 336 };
 337 #endif // PRODUCT
 338 
 339 class VM_UnlinkSymbols: public VM_Operation {
 340  public:
 341   VM_UnlinkSymbols() {}
 342   VMOp_Type type() const                         { return VMOp_UnlinkSymbols; }
 343   void doit();
 344   bool allow_nested_vm_operations() const        { return true; }
 345 };
 346 
 347 class VM_Verify: public VM_Operation {
 348  public:
 349   VMOp_Type type() const { return VMOp_Verify; }
 350   void doit();
 351 };
 352 
 353 
 354 class VM_PrintThreads: public VM_Operation {
 355  private:
 356   outputStream* _out;
 357   bool _print_concurrent_locks;
 358  public:
 359   VM_PrintThreads()                                                { _out = tty; _print_concurrent_locks = PrintConcurrentLocks; }
 360   VM_PrintThreads(outputStream* out, bool print_concurrent_locks)  { _out = out; _print_concurrent_locks = print_concurrent_locks; }
 361   VMOp_Type type() const                                           {  return VMOp_PrintThreads; }
 362   void doit();
 363   bool doit_prologue();
 364   void doit_epilogue();
 365 };
 366 
 367 class VM_PrintJNI: public VM_Operation {
 368  private:
 369   outputStream* _out;
 370  public:
 371   VM_PrintJNI()                         { _out = tty; }
 372   VM_PrintJNI(outputStream* out)        { _out = out; }
 373   VMOp_Type type() const                { return VMOp_PrintJNI; }
 374   void doit();
 375 };
 376 
 377 class DeadlockCycle;
 378 class VM_FindDeadlocks: public VM_Operation {
 379  private:
 380   bool           _concurrent_locks;
 381   DeadlockCycle* _deadlocks;
 382   outputStream*  _out;
 383 
 384  public:
 385   VM_FindDeadlocks(bool concurrent_locks) :  _concurrent_locks(concurrent_locks), _out(NULL), _deadlocks(NULL) {};
 386   VM_FindDeadlocks(outputStream* st) : _concurrent_locks(true), _out(st), _deadlocks(NULL) {};
 387   ~VM_FindDeadlocks();
 388 
 389   DeadlockCycle* result()      { return _deadlocks; };
 390   VMOp_Type type() const       { return VMOp_FindDeadlocks; }
 391   void doit();
 392   bool doit_prologue();
 393 };
 394 
 395 class ThreadDumpResult;
 396 class ThreadSnapshot;
 397 class ThreadConcurrentLocks;
 398 
 399 class VM_ThreadDump : public VM_Operation {
 400  private:
 401   ThreadDumpResult*              _result;
 402   int                            _num_threads;
 403   GrowableArray<instanceHandle>* _threads;
 404   int                            _max_depth;
 405   bool                           _with_locked_monitors;
 406   bool                           _with_locked_synchronizers;
 407 
 408   ThreadSnapshot* snapshot_thread(JavaThread* java_thread, ThreadConcurrentLocks* tcl);
 409 
 410  public:
 411   VM_ThreadDump(ThreadDumpResult* result,
 412                 int max_depth,  // -1 indicates entire stack
 413                 bool with_locked_monitors,
 414                 bool with_locked_synchronizers);
 415 
 416   VM_ThreadDump(ThreadDumpResult* result,
 417                 GrowableArray<instanceHandle>* threads,
 418                 int num_threads, // -1 indicates entire stack
 419                 int max_depth,
 420                 bool with_locked_monitors,
 421                 bool with_locked_synchronizers);
 422 
 423   VMOp_Type type() const { return VMOp_ThreadDump; }
 424   void doit();
 425   bool doit_prologue();
 426   void doit_epilogue();
 427 };
 428 
 429 
 430 class VM_Exit: public VM_Operation {
 431  private:
 432   int  _exit_code;
 433   static volatile bool _vm_exited;
 434   static Thread * _shutdown_thread;
 435   static void wait_if_vm_exited();
 436  public:
 437   VM_Exit(int exit_code) {
 438     _exit_code = exit_code;
 439   }
 440   static int wait_for_threads_in_native_to_block();
 441   static int set_vm_exited();
 442   static bool vm_exited()                      { return _vm_exited; }
 443   static void block_if_vm_exited() {
 444     if (_vm_exited) {
 445       wait_if_vm_exited();
 446     }
 447   }
 448   VMOp_Type type() const { return VMOp_Exit; }
 449   void doit();
 450 };
 451 
 452 class VM_PrintCompileQueue: public VM_Operation {
 453  private:
 454   outputStream* _out;
 455 
 456  public:
 457   VM_PrintCompileQueue(outputStream* st) : _out(st) {}
 458   VMOp_Type type() const { return VMOp_PrintCompileQueue; }
 459   Mode evaluation_mode() const { return _safepoint; }
 460   void doit();
 461 };
 462 
 463 #if INCLUDE_SERVICES
 464 class VM_PrintClassHierarchy: public VM_Operation {
 465  private:
 466   outputStream* _out;
 467   bool _print_interfaces;
 468   bool _print_subclasses;
 469   char* _classname;
 470 
 471  public:
 472   VM_PrintClassHierarchy(outputStream* st, bool print_interfaces, bool print_subclasses, char* classname) :
 473     _out(st), _print_interfaces(print_interfaces), _print_subclasses(print_subclasses),
 474     _classname(classname) {}
 475   VMOp_Type type() const { return VMOp_PrintClassHierarchy; }
 476   void doit();
 477 };
 478 #endif // INCLUDE_SERVICES
 479 
 480 #endif // SHARE_VM_RUNTIME_VM_OPERATIONS_HPP