1 /* 2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_RUNTIME_VM_OPERATIONS_HPP 26 #define SHARE_VM_RUNTIME_VM_OPERATIONS_HPP 27 28 #include "classfile/javaClasses.hpp" 29 #include "memory/allocation.hpp" 30 #include "oops/oop.hpp" 31 #include "runtime/thread.hpp" 32 #include "utilities/top.hpp" 33 34 // The following classes are used for operations 35 // initiated by a Java thread but that must 36 // take place in the VMThread. 37 38 #define VM_OP_ENUM(type) VMOp_##type, 39 40 // Note: When new VM_XXX comes up, add 'XXX' to the template table. 41 #define VM_OPS_DO(template) \ 42 template(Dummy) \ 43 template(ThreadStop) \ 44 template(ThreadDump) \ 45 template(PrintThreads) \ 46 template(FindDeadlocks) \ 47 template(ForceSafepoint) \ 48 template(ForceAsyncSafepoint) \ 49 template(Deoptimize) \ 50 template(DeoptimizeFrame) \ 51 template(DeoptimizeAll) \ 52 template(ZombieAll) \ 53 template(UnlinkSymbols) \ 54 template(Verify) \ 55 template(PrintJNI) \ 56 template(HeapDumper) \ 57 template(DeoptimizeTheWorld) \ 58 template(CollectForMetadataAllocation) \ 59 template(GC_HeapInspection) \ 60 template(GenCollectFull) \ 61 template(GenCollectFullConcurrent) \ 62 template(GenCollectForAllocation) \ 63 template(ParallelGCFailedAllocation) \ 64 template(ParallelGCSystemGC) \ 65 template(CGC_Operation) \ 66 template(CMS_Initial_Mark) \ 67 template(CMS_Final_Remark) \ 68 template(G1CollectFull) \ 69 template(G1CollectForAllocation) \ 70 template(G1IncCollectionPause) \ 71 template(DestroyAllocationContext) \ 72 template(EnableBiasedLocking) \ 73 template(RevokeBias) \ 74 template(BulkRevokeBias) \ 75 template(PopulateDumpSharedSpace) \ 76 template(JNIFunctionTableCopier) \ 77 template(RedefineClasses) \ 78 template(UpdateForPopTopFrame) \ 79 template(SetFramePop) \ 80 template(GetOwnedMonitorInfo) \ 81 template(GetObjectMonitorUsage) \ 82 template(GetCurrentContendedMonitor) \ 83 template(GetStackTrace) \ 84 template(GetMultipleStackTraces) \ 85 template(GetAllStackTraces) \ 86 template(GetThreadListStackTraces) \ 87 template(GetFrameCount) \ 88 template(GetFrameLocation) \ 89 template(ChangeBreakpoints) \ 90 template(GetOrSetLocal) \ 91 template(GetCurrentLocation) \ 92 template(EnterInterpOnlyMode) \ 93 template(ChangeSingleStep) \ 94 template(HeapWalkOperation) \ 95 template(HeapIterateOperation) \ 96 template(ReportJavaOutOfMemory) \ 97 template(JFRCheckpoint) \ 98 template(ShenandoahFullGC) \ 99 template(ShenandoahInitMark) \ 100 template(ShenandoahStartEvacuation) \ 101 template(ShenandoahVerifyHeapAfterEvacuation) \ 102 template(ShenandoahEvacuation) \ 103 template(ShenandoahUpdateRootRefs) \ 104 template(ShenandoahUpdateRefs) \ 105 template(Exit) \ 106 template(LinuxDllLoad) \ 107 template(RotateGCLog) \ 108 template(WhiteBoxOperation) \ 109 template(ClassLoaderStatsOperation) \ 110 template(DumpHashtable) \ 111 template(DumpTouchedMethods) \ 112 template(MarkActiveNMethods) \ 113 template(PrintCompileQueue) \ 114 template(PrintCodeList) \ 115 template(PrintCodeCache) \ 116 template(PrintClassHierarchy) \ 117 118 class VM_Operation: public CHeapObj<mtInternal> { 119 public: 120 enum Mode { 121 _safepoint, // blocking, safepoint, vm_op C-heap allocated 122 _no_safepoint, // blocking, no safepoint, vm_op C-Heap allocated 123 _concurrent, // non-blocking, no safepoint, vm_op C-Heap allocated 124 _async_safepoint // non-blocking, safepoint, vm_op C-Heap allocated 125 }; 126 127 enum VMOp_Type { 128 VM_OPS_DO(VM_OP_ENUM) 129 VMOp_Terminating 130 }; 131 132 private: 133 Thread* _calling_thread; 134 ThreadPriority _priority; 135 long _timestamp; 136 VM_Operation* _next; 137 VM_Operation* _prev; 138 139 // The VM operation name array 140 static const char* _names[]; 141 142 public: 143 VM_Operation() { _calling_thread = NULL; _next = NULL; _prev = NULL; } 144 virtual ~VM_Operation() {} 145 146 // VM operation support (used by VM thread) 147 Thread* calling_thread() const { return _calling_thread; } 148 ThreadPriority priority() { return _priority; } 149 void set_calling_thread(Thread* thread, ThreadPriority priority); 150 151 long timestamp() const { return _timestamp; } 152 void set_timestamp(long timestamp) { _timestamp = timestamp; } 153 154 // Called by VM thread - does in turn invoke doit(). Do not override this 155 void evaluate(); 156 157 // evaluate() is called by the VMThread and in turn calls doit(). 158 // If the thread invoking VMThread::execute((VM_Operation*) is a JavaThread, 159 // doit_prologue() is called in that thread before transferring control to 160 // the VMThread. 161 // If doit_prologue() returns true the VM operation will proceed, and 162 // doit_epilogue() will be called by the JavaThread once the VM operation 163 // completes. If doit_prologue() returns false the VM operation is cancelled. 164 virtual void doit() = 0; 165 virtual bool doit_prologue() { return true; }; 166 virtual void doit_epilogue() {}; // Note: Not called if mode is: _concurrent 167 168 // Type test 169 virtual bool is_methodCompiler() const { return false; } 170 171 // Linking 172 VM_Operation *next() const { return _next; } 173 VM_Operation *prev() const { return _prev; } 174 void set_next(VM_Operation *next) { _next = next; } 175 void set_prev(VM_Operation *prev) { _prev = prev; } 176 177 // Configuration. Override these appropriately in subclasses. 178 virtual VMOp_Type type() const = 0; 179 virtual Mode evaluation_mode() const { return _safepoint; } 180 virtual bool allow_nested_vm_operations() const { return false; } 181 virtual bool is_cheap_allocated() const { return false; } 182 virtual void oops_do(OopClosure* f) { /* do nothing */ }; 183 184 // CAUTION: <don't hang yourself with following rope> 185 // If you override these methods, make sure that the evaluation 186 // of these methods is race-free and non-blocking, since these 187 // methods may be evaluated either by the mutators or by the 188 // vm thread, either concurrently with mutators or with the mutators 189 // stopped. In other words, taking locks is verboten, and if there 190 // are any races in evaluating the conditions, they'd better be benign. 191 virtual bool evaluate_at_safepoint() const { 192 return evaluation_mode() == _safepoint || 193 evaluation_mode() == _async_safepoint; 194 } 195 virtual bool evaluate_concurrently() const { 196 return evaluation_mode() == _concurrent || 197 evaluation_mode() == _async_safepoint; 198 } 199 200 static const char* mode_to_string(Mode mode); 201 202 // Debugging 203 virtual void print_on_error(outputStream* st) const; 204 const char* name() const { return _names[type()]; } 205 static const char* name(int type) { 206 assert(type >= 0 && type < VMOp_Terminating, "invalid VM operation type"); 207 return _names[type]; 208 } 209 #ifndef PRODUCT 210 void print_on(outputStream* st) const { print_on_error(st); } 211 #endif 212 }; 213 214 class VM_ThreadStop: public VM_Operation { 215 private: 216 oop _thread; // The Thread that the Throwable is thrown against 217 oop _throwable; // The Throwable thrown at the target Thread 218 public: 219 // All oops are passed as JNI handles, since there is no guarantee that a GC might happen before the 220 // VM operation is executed. 221 VM_ThreadStop(oop thread, oop throwable) { 222 _thread = thread; 223 _throwable = throwable; 224 } 225 VMOp_Type type() const { return VMOp_ThreadStop; } 226 oop target_thread() const { return _thread; } 227 oop throwable() const { return _throwable;} 228 void doit(); 229 // We deoptimize if top-most frame is compiled - this might require a C2I adapter to be generated 230 bool allow_nested_vm_operations() const { return true; } 231 Mode evaluation_mode() const { return _async_safepoint; } 232 bool is_cheap_allocated() const { return true; } 233 234 // GC support 235 void oops_do(OopClosure* f) { 236 f->do_oop(&_thread); f->do_oop(&_throwable); 237 } 238 }; 239 240 // dummy vm op, evaluated just to force a safepoint 241 class VM_ForceSafepoint: public VM_Operation { 242 public: 243 VM_ForceSafepoint() {} 244 void doit() {} 245 VMOp_Type type() const { return VMOp_ForceSafepoint; } 246 }; 247 248 // dummy vm op, evaluated just to force a safepoint 249 class VM_ForceAsyncSafepoint: public VM_Operation { 250 public: 251 VM_ForceAsyncSafepoint() {} 252 void doit() {} 253 VMOp_Type type() const { return VMOp_ForceAsyncSafepoint; } 254 Mode evaluation_mode() const { return _async_safepoint; } 255 bool is_cheap_allocated() const { return true; } 256 }; 257 258 class VM_Deoptimize: public VM_Operation { 259 public: 260 VM_Deoptimize() {} 261 VMOp_Type type() const { return VMOp_Deoptimize; } 262 void doit(); 263 bool allow_nested_vm_operations() const { return true; } 264 }; 265 266 class VM_MarkActiveNMethods: public VM_Operation { 267 public: 268 VM_MarkActiveNMethods() {} 269 VMOp_Type type() const { return VMOp_MarkActiveNMethods; } 270 void doit(); 271 bool allow_nested_vm_operations() const { return true; } 272 }; 273 274 // Deopt helper that can deoptimize frames in threads other than the 275 // current thread. Only used through Deoptimization::deoptimize_frame. 276 class VM_DeoptimizeFrame: public VM_Operation { 277 friend class Deoptimization; 278 279 private: 280 JavaThread* _thread; 281 intptr_t* _id; 282 VM_DeoptimizeFrame(JavaThread* thread, intptr_t* id); 283 284 public: 285 VMOp_Type type() const { return VMOp_DeoptimizeFrame; } 286 void doit(); 287 bool allow_nested_vm_operations() const { return true; } 288 }; 289 290 #ifndef PRODUCT 291 class VM_DeoptimizeAll: public VM_Operation { 292 private: 293 KlassHandle _dependee; 294 public: 295 VM_DeoptimizeAll() {} 296 VMOp_Type type() const { return VMOp_DeoptimizeAll; } 297 void doit(); 298 bool allow_nested_vm_operations() const { return true; } 299 }; 300 301 302 class VM_ZombieAll: public VM_Operation { 303 public: 304 VM_ZombieAll() {} 305 VMOp_Type type() const { return VMOp_ZombieAll; } 306 void doit(); 307 bool allow_nested_vm_operations() const { return true; } 308 }; 309 #endif // PRODUCT 310 311 class VM_UnlinkSymbols: public VM_Operation { 312 public: 313 VM_UnlinkSymbols() {} 314 VMOp_Type type() const { return VMOp_UnlinkSymbols; } 315 void doit(); 316 bool allow_nested_vm_operations() const { return true; } 317 }; 318 319 class VM_Verify: public VM_Operation { 320 private: 321 bool _silent; 322 public: 323 VM_Verify(bool silent = VerifySilently) : _silent(silent) {} 324 VMOp_Type type() const { return VMOp_Verify; } 325 void doit(); 326 }; 327 328 329 class VM_PrintThreads: public VM_Operation { 330 private: 331 outputStream* _out; 332 bool _print_concurrent_locks; 333 public: 334 VM_PrintThreads() { _out = tty; _print_concurrent_locks = PrintConcurrentLocks; } 335 VM_PrintThreads(outputStream* out, bool print_concurrent_locks) { _out = out; _print_concurrent_locks = print_concurrent_locks; } 336 VMOp_Type type() const { return VMOp_PrintThreads; } 337 void doit(); 338 bool doit_prologue(); 339 void doit_epilogue(); 340 }; 341 342 class VM_PrintJNI: public VM_Operation { 343 private: 344 outputStream* _out; 345 public: 346 VM_PrintJNI() { _out = tty; } 347 VM_PrintJNI(outputStream* out) { _out = out; } 348 VMOp_Type type() const { return VMOp_PrintJNI; } 349 void doit(); 350 }; 351 352 class DeadlockCycle; 353 class VM_FindDeadlocks: public VM_Operation { 354 private: 355 bool _concurrent_locks; 356 DeadlockCycle* _deadlocks; 357 outputStream* _out; 358 359 public: 360 VM_FindDeadlocks(bool concurrent_locks) : _concurrent_locks(concurrent_locks), _out(NULL), _deadlocks(NULL) {}; 361 VM_FindDeadlocks(outputStream* st) : _concurrent_locks(true), _out(st), _deadlocks(NULL) {}; 362 ~VM_FindDeadlocks(); 363 364 DeadlockCycle* result() { return _deadlocks; }; 365 VMOp_Type type() const { return VMOp_FindDeadlocks; } 366 void doit(); 367 bool doit_prologue(); 368 }; 369 370 class ThreadDumpResult; 371 class ThreadSnapshot; 372 class ThreadConcurrentLocks; 373 374 class VM_ThreadDump : public VM_Operation { 375 private: 376 ThreadDumpResult* _result; 377 int _num_threads; 378 GrowableArray<instanceHandle>* _threads; 379 int _max_depth; 380 bool _with_locked_monitors; 381 bool _with_locked_synchronizers; 382 383 ThreadSnapshot* snapshot_thread(JavaThread* java_thread, ThreadConcurrentLocks* tcl); 384 385 public: 386 VM_ThreadDump(ThreadDumpResult* result, 387 int max_depth, // -1 indicates entire stack 388 bool with_locked_monitors, 389 bool with_locked_synchronizers); 390 391 VM_ThreadDump(ThreadDumpResult* result, 392 GrowableArray<instanceHandle>* threads, 393 int num_threads, // -1 indicates entire stack 394 int max_depth, 395 bool with_locked_monitors, 396 bool with_locked_synchronizers); 397 398 VMOp_Type type() const { return VMOp_ThreadDump; } 399 void doit(); 400 bool doit_prologue(); 401 void doit_epilogue(); 402 }; 403 404 405 class VM_Exit: public VM_Operation { 406 private: 407 int _exit_code; 408 static volatile bool _vm_exited; 409 static Thread * _shutdown_thread; 410 static void wait_if_vm_exited(); 411 public: 412 VM_Exit(int exit_code) { 413 _exit_code = exit_code; 414 } 415 static int wait_for_threads_in_native_to_block(); 416 static int set_vm_exited(); 417 static bool vm_exited() { return _vm_exited; } 418 static void block_if_vm_exited() { 419 if (_vm_exited) { 420 wait_if_vm_exited(); 421 } 422 } 423 VMOp_Type type() const { return VMOp_Exit; } 424 void doit(); 425 }; 426 427 428 class VM_RotateGCLog: public VM_Operation { 429 private: 430 outputStream* _out; 431 432 public: 433 VM_RotateGCLog(outputStream* st) : _out(st) {} 434 VMOp_Type type() const { return VMOp_RotateGCLog; } 435 void doit() { gclog_or_tty->rotate_log(true, _out); } 436 }; 437 438 class VM_PrintCompileQueue: public VM_Operation { 439 private: 440 outputStream* _out; 441 442 public: 443 VM_PrintCompileQueue(outputStream* st) : _out(st) {} 444 VMOp_Type type() const { return VMOp_PrintCompileQueue; } 445 Mode evaluation_mode() const { return _no_safepoint; } 446 void doit(); 447 }; 448 449 class VM_PrintCodeList: public VM_Operation { 450 private: 451 outputStream* _out; 452 453 public: 454 VM_PrintCodeList(outputStream* st) : _out(st) {} 455 VMOp_Type type() const { return VMOp_PrintCodeList; } 456 void doit(); 457 }; 458 459 class VM_PrintCodeCache: public VM_Operation { 460 private: 461 outputStream* _out; 462 463 public: 464 VM_PrintCodeCache(outputStream* st) : _out(st) {} 465 VMOp_Type type() const { return VMOp_PrintCodeCache; } 466 void doit(); 467 }; 468 469 #if INCLUDE_SERVICES 470 class VM_PrintClassHierarchy: public VM_Operation { 471 private: 472 outputStream* _out; 473 bool _print_interfaces; 474 bool _print_subclasses; 475 char* _classname; 476 477 public: 478 VM_PrintClassHierarchy(outputStream* st, bool print_interfaces, bool print_subclasses, char* classname) : 479 _out(st), _print_interfaces(print_interfaces), _print_subclasses(print_subclasses), 480 _classname(classname) {} 481 VMOp_Type type() const { return VMOp_PrintClassHierarchy; } 482 void doit(); 483 }; 484 #endif // INCLUDE_SERVICES 485 486 #endif // SHARE_VM_RUNTIME_VM_OPERATIONS_HPP