1 /* 2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_RUNTIME_VM_OPERATIONS_HPP 26 #define SHARE_VM_RUNTIME_VM_OPERATIONS_HPP 27 28 #include "classfile/javaClasses.hpp" 29 #include "memory/allocation.hpp" 30 #include "oops/oop.hpp" 31 #include "runtime/thread.hpp" 32 #include "utilities/top.hpp" 33 #include "code/codeCache.hpp" 34 35 // The following classes are used for operations 36 // initiated by a Java thread but that must 37 // take place in the VMThread. 38 39 #define VM_OP_ENUM(type) VMOp_##type, 40 41 // Note: When new VM_XXX comes up, add 'XXX' to the template table. 42 #define VM_OPS_DO(template) \ 43 template(Dummy) \ 44 template(ThreadStop) \ 45 template(ThreadDump) \ 46 template(PrintThreads) \ 47 template(FindDeadlocks) \ 48 template(ClearICs) \ 49 template(ForceSafepoint) \ 50 template(ForceAsyncSafepoint) \ 51 template(Deoptimize) \ 52 template(DeoptimizeFrame) \ 53 template(DeoptimizeAll) \ 54 template(ZombieAll) \ 55 template(UnlinkSymbols) \ 56 template(Verify) \ 57 template(PrintJNI) \ 58 template(HeapDumper) \ 59 template(DeoptimizeTheWorld) \ 60 template(CollectForMetadataAllocation) \ 61 template(GC_HeapInspection) \ 62 template(GenCollectFull) \ 63 template(GenCollectFullConcurrent) \ 64 template(GenCollectForAllocation) \ 65 template(ParallelGCFailedAllocation) \ 66 template(ParallelGCSystemGC) \ 67 template(CGC_Operation) \ 68 template(CMS_Initial_Mark) \ 69 template(CMS_Final_Remark) \ 70 template(G1CollectFull) \ 71 template(G1CollectForAllocation) \ 72 template(G1IncCollectionPause) \ 73 template(DestroyAllocationContext) \ 74 template(EnableBiasedLocking) \ 75 template(RevokeBias) \ 76 template(BulkRevokeBias) \ 77 template(PopulateDumpSharedSpace) \ 78 template(JNIFunctionTableCopier) \ 79 template(RedefineClasses) \ 80 template(UpdateForPopTopFrame) \ 81 template(SetFramePop) \ 82 template(GetOwnedMonitorInfo) \ 83 template(GetObjectMonitorUsage) \ 84 template(GetCurrentContendedMonitor) \ 85 template(GetStackTrace) \ 86 template(GetMultipleStackTraces) \ 87 template(GetAllStackTraces) \ 88 template(GetThreadListStackTraces) \ 89 template(GetFrameCount) \ 90 template(GetFrameLocation) \ 91 template(ChangeBreakpoints) \ 92 template(GetOrSetLocal) \ 93 template(GetCurrentLocation) \ 94 template(EnterInterpOnlyMode) \ 95 template(ChangeSingleStep) \ 96 template(HeapWalkOperation) \ 97 template(HeapIterateOperation) \ 98 template(ReportJavaOutOfMemory) \ 99 template(JFRCheckpoint) \ 100 template(Exit) \ 101 template(LinuxDllLoad) \ 102 template(RotateGCLog) \ 103 template(WhiteBoxOperation) \ 104 template(ClassLoaderStatsOperation) \ 105 template(DumpHashtable) \ 106 template(DumpTouchedMethods) \ 107 template(MarkActiveNMethods) \ 108 template(PrintCompileQueue) \ 109 template(PrintCodeList) \ 110 template(PrintCodeCache) \ 111 template(PrintClassHierarchy) \ 112 113 class VM_Operation: public CHeapObj<mtInternal> { 114 public: 115 enum Mode { 116 _safepoint, // blocking, safepoint, vm_op C-heap allocated 117 _no_safepoint, // blocking, no safepoint, vm_op C-Heap allocated 118 _concurrent, // non-blocking, no safepoint, vm_op C-Heap allocated 119 _async_safepoint // non-blocking, safepoint, vm_op C-Heap allocated 120 }; 121 122 enum VMOp_Type { 123 VM_OPS_DO(VM_OP_ENUM) 124 VMOp_Terminating 125 }; 126 127 private: 128 Thread* _calling_thread; 129 ThreadPriority _priority; 130 long _timestamp; 131 VM_Operation* _next; 132 VM_Operation* _prev; 133 134 // The VM operation name array 135 static const char* _names[]; 136 137 public: 138 VM_Operation() { _calling_thread = NULL; _next = NULL; _prev = NULL; } 139 virtual ~VM_Operation() {} 140 141 // VM operation support (used by VM thread) 142 Thread* calling_thread() const { return _calling_thread; } 143 ThreadPriority priority() { return _priority; } 144 void set_calling_thread(Thread* thread, ThreadPriority priority); 145 146 long timestamp() const { return _timestamp; } 147 void set_timestamp(long timestamp) { _timestamp = timestamp; } 148 149 // Called by VM thread - does in turn invoke doit(). Do not override this 150 void evaluate(); 151 152 // evaluate() is called by the VMThread and in turn calls doit(). 153 // If the thread invoking VMThread::execute((VM_Operation*) is a JavaThread, 154 // doit_prologue() is called in that thread before transferring control to 155 // the VMThread. 156 // If doit_prologue() returns true the VM operation will proceed, and 157 // doit_epilogue() will be called by the JavaThread once the VM operation 158 // completes. If doit_prologue() returns false the VM operation is cancelled. 159 virtual void doit() = 0; 160 virtual bool doit_prologue() { return true; }; 161 virtual void doit_epilogue() {}; // Note: Not called if mode is: _concurrent 162 163 // Type test 164 virtual bool is_methodCompiler() const { return false; } 165 166 // Linking 167 VM_Operation *next() const { return _next; } 168 VM_Operation *prev() const { return _prev; } 169 void set_next(VM_Operation *next) { _next = next; } 170 void set_prev(VM_Operation *prev) { _prev = prev; } 171 172 // Configuration. Override these appropriately in subclasses. 173 virtual VMOp_Type type() const = 0; 174 virtual Mode evaluation_mode() const { return _safepoint; } 175 virtual bool allow_nested_vm_operations() const { return false; } 176 virtual bool is_cheap_allocated() const { return false; } 177 virtual void oops_do(OopClosure* f) { /* do nothing */ }; 178 179 // CAUTION: <don't hang yourself with following rope> 180 // If you override these methods, make sure that the evaluation 181 // of these methods is race-free and non-blocking, since these 182 // methods may be evaluated either by the mutators or by the 183 // vm thread, either concurrently with mutators or with the mutators 184 // stopped. In other words, taking locks is verboten, and if there 185 // are any races in evaluating the conditions, they'd better be benign. 186 virtual bool evaluate_at_safepoint() const { 187 return evaluation_mode() == _safepoint || 188 evaluation_mode() == _async_safepoint; 189 } 190 virtual bool evaluate_concurrently() const { 191 return evaluation_mode() == _concurrent || 192 evaluation_mode() == _async_safepoint; 193 } 194 195 static const char* mode_to_string(Mode mode); 196 197 // Debugging 198 virtual void print_on_error(outputStream* st) const; 199 const char* name() const { return _names[type()]; } 200 static const char* name(int type) { 201 assert(type >= 0 && type < VMOp_Terminating, "invalid VM operation type"); 202 return _names[type]; 203 } 204 #ifndef PRODUCT 205 void print_on(outputStream* st) const { print_on_error(st); } 206 #endif 207 }; 208 209 class VM_ThreadStop: public VM_Operation { 210 private: 211 oop _thread; // The Thread that the Throwable is thrown against 212 oop _throwable; // The Throwable thrown at the target Thread 213 public: 214 // All oops are passed as JNI handles, since there is no guarantee that a GC might happen before the 215 // VM operation is executed. 216 VM_ThreadStop(oop thread, oop throwable) { 217 _thread = thread; 218 _throwable = throwable; 219 } 220 VMOp_Type type() const { return VMOp_ThreadStop; } 221 oop target_thread() const { return _thread; } 222 oop throwable() const { return _throwable;} 223 void doit(); 224 // We deoptimize if top-most frame is compiled - this might require a C2I adapter to be generated 225 bool allow_nested_vm_operations() const { return true; } 226 Mode evaluation_mode() const { return _async_safepoint; } 227 bool is_cheap_allocated() const { return true; } 228 229 // GC support 230 void oops_do(OopClosure* f) { 231 f->do_oop(&_thread); f->do_oop(&_throwable); 232 } 233 }; 234 235 class VM_ClearICs: public VM_Operation { 236 public: 237 VM_ClearICs() {} 238 void doit() { CodeCache::clear_inline_caches(); } 239 VMOp_Type type() const { return VMOp_ClearICs; } 240 }; 241 242 // dummy vm op, evaluated just to force a safepoint 243 class VM_ForceSafepoint: public VM_Operation { 244 public: 245 VM_ForceSafepoint() {} 246 void doit() {} 247 VMOp_Type type() const { return VMOp_ForceSafepoint; } 248 }; 249 250 // dummy vm op, evaluated just to force a safepoint 251 class VM_ForceAsyncSafepoint: public VM_Operation { 252 public: 253 VM_ForceAsyncSafepoint() {} 254 void doit() {} 255 VMOp_Type type() const { return VMOp_ForceAsyncSafepoint; } 256 Mode evaluation_mode() const { return _async_safepoint; } 257 bool is_cheap_allocated() const { return true; } 258 }; 259 260 class VM_Deoptimize: public VM_Operation { 261 public: 262 VM_Deoptimize() {} 263 VMOp_Type type() const { return VMOp_Deoptimize; } 264 void doit(); 265 bool allow_nested_vm_operations() const { return true; } 266 }; 267 268 class VM_MarkActiveNMethods: public VM_Operation { 269 public: 270 VM_MarkActiveNMethods() {} 271 VMOp_Type type() const { return VMOp_MarkActiveNMethods; } 272 void doit(); 273 bool allow_nested_vm_operations() const { return true; } 274 }; 275 276 // Deopt helper that can deoptimize frames in threads other than the 277 // current thread. Only used through Deoptimization::deoptimize_frame. 278 class VM_DeoptimizeFrame: public VM_Operation { 279 friend class Deoptimization; 280 281 private: 282 JavaThread* _thread; 283 intptr_t* _id; 284 int _reason; 285 VM_DeoptimizeFrame(JavaThread* thread, intptr_t* id, int reason); 286 287 public: 288 VMOp_Type type() const { return VMOp_DeoptimizeFrame; } 289 void doit(); 290 bool allow_nested_vm_operations() const { return true; } 291 }; 292 293 #ifndef PRODUCT 294 class VM_DeoptimizeAll: public VM_Operation { 295 private: 296 KlassHandle _dependee; 297 public: 298 VM_DeoptimizeAll() {} 299 VMOp_Type type() const { return VMOp_DeoptimizeAll; } 300 void doit(); 301 bool allow_nested_vm_operations() const { return true; } 302 }; 303 304 305 class VM_ZombieAll: public VM_Operation { 306 public: 307 VM_ZombieAll() {} 308 VMOp_Type type() const { return VMOp_ZombieAll; } 309 void doit(); 310 bool allow_nested_vm_operations() const { return true; } 311 }; 312 #endif // PRODUCT 313 314 class VM_UnlinkSymbols: public VM_Operation { 315 public: 316 VM_UnlinkSymbols() {} 317 VMOp_Type type() const { return VMOp_UnlinkSymbols; } 318 void doit(); 319 bool allow_nested_vm_operations() const { return true; } 320 }; 321 322 class VM_Verify: public VM_Operation { 323 private: 324 bool _silent; 325 public: 326 VM_Verify(bool silent = VerifySilently) : _silent(silent) {} 327 VMOp_Type type() const { return VMOp_Verify; } 328 void doit(); 329 }; 330 331 332 class VM_PrintThreads: public VM_Operation { 333 private: 334 outputStream* _out; 335 bool _print_concurrent_locks; 336 public: 337 VM_PrintThreads() { _out = tty; _print_concurrent_locks = PrintConcurrentLocks; } 338 VM_PrintThreads(outputStream* out, bool print_concurrent_locks) { _out = out; _print_concurrent_locks = print_concurrent_locks; } 339 VMOp_Type type() const { return VMOp_PrintThreads; } 340 void doit(); 341 bool doit_prologue(); 342 void doit_epilogue(); 343 }; 344 345 class VM_PrintJNI: public VM_Operation { 346 private: 347 outputStream* _out; 348 public: 349 VM_PrintJNI() { _out = tty; } 350 VM_PrintJNI(outputStream* out) { _out = out; } 351 VMOp_Type type() const { return VMOp_PrintJNI; } 352 void doit(); 353 }; 354 355 class DeadlockCycle; 356 class VM_FindDeadlocks: public VM_Operation { 357 private: 358 bool _concurrent_locks; 359 DeadlockCycle* _deadlocks; 360 outputStream* _out; 361 362 public: 363 VM_FindDeadlocks(bool concurrent_locks) : _concurrent_locks(concurrent_locks), _out(NULL), _deadlocks(NULL) {}; 364 VM_FindDeadlocks(outputStream* st) : _concurrent_locks(true), _out(st), _deadlocks(NULL) {}; 365 ~VM_FindDeadlocks(); 366 367 DeadlockCycle* result() { return _deadlocks; }; 368 VMOp_Type type() const { return VMOp_FindDeadlocks; } 369 void doit(); 370 bool doit_prologue(); 371 }; 372 373 class ThreadDumpResult; 374 class ThreadSnapshot; 375 class ThreadConcurrentLocks; 376 377 class VM_ThreadDump : public VM_Operation { 378 private: 379 ThreadDumpResult* _result; 380 int _num_threads; 381 GrowableArray<instanceHandle>* _threads; 382 int _max_depth; 383 bool _with_locked_monitors; 384 bool _with_locked_synchronizers; 385 386 ThreadSnapshot* snapshot_thread(JavaThread* java_thread, ThreadConcurrentLocks* tcl); 387 388 public: 389 VM_ThreadDump(ThreadDumpResult* result, 390 int max_depth, // -1 indicates entire stack 391 bool with_locked_monitors, 392 bool with_locked_synchronizers); 393 394 VM_ThreadDump(ThreadDumpResult* result, 395 GrowableArray<instanceHandle>* threads, 396 int num_threads, // -1 indicates entire stack 397 int max_depth, 398 bool with_locked_monitors, 399 bool with_locked_synchronizers); 400 401 VMOp_Type type() const { return VMOp_ThreadDump; } 402 void doit(); 403 bool doit_prologue(); 404 void doit_epilogue(); 405 }; 406 407 408 class VM_Exit: public VM_Operation { 409 private: 410 int _exit_code; 411 static volatile bool _vm_exited; 412 static Thread * _shutdown_thread; 413 static void wait_if_vm_exited(); 414 public: 415 VM_Exit(int exit_code) { 416 _exit_code = exit_code; 417 } 418 static int wait_for_threads_in_native_to_block(); 419 static int set_vm_exited(); 420 static bool vm_exited() { return _vm_exited; } 421 static void block_if_vm_exited() { 422 if (_vm_exited) { 423 wait_if_vm_exited(); 424 } 425 } 426 VMOp_Type type() const { return VMOp_Exit; } 427 void doit(); 428 }; 429 430 431 class VM_RotateGCLog: public VM_Operation { 432 private: 433 outputStream* _out; 434 435 public: 436 VM_RotateGCLog(outputStream* st) : _out(st) {} 437 VMOp_Type type() const { return VMOp_RotateGCLog; } 438 void doit() { gclog_or_tty->rotate_log(true, _out); } 439 }; 440 441 class VM_PrintCompileQueue: public VM_Operation { 442 private: 443 outputStream* _out; 444 445 public: 446 VM_PrintCompileQueue(outputStream* st) : _out(st) {} 447 VMOp_Type type() const { return VMOp_PrintCompileQueue; } 448 Mode evaluation_mode() const { return _no_safepoint; } 449 void doit(); 450 }; 451 452 class VM_PrintCodeList: public VM_Operation { 453 private: 454 outputStream* _out; 455 456 public: 457 VM_PrintCodeList(outputStream* st) : _out(st) {} 458 VMOp_Type type() const { return VMOp_PrintCodeList; } 459 void doit(); 460 }; 461 462 class VM_PrintCodeCache: public VM_Operation { 463 private: 464 outputStream* _out; 465 466 public: 467 VM_PrintCodeCache(outputStream* st) : _out(st) {} 468 VMOp_Type type() const { return VMOp_PrintCodeCache; } 469 void doit(); 470 }; 471 472 #if INCLUDE_SERVICES 473 class VM_PrintClassHierarchy: public VM_Operation { 474 private: 475 outputStream* _out; 476 bool _print_interfaces; 477 bool _print_subclasses; 478 char* _classname; 479 480 public: 481 VM_PrintClassHierarchy(outputStream* st, bool print_interfaces, bool print_subclasses, char* classname) : 482 _out(st), _print_interfaces(print_interfaces), _print_subclasses(print_subclasses), 483 _classname(classname) {} 484 VMOp_Type type() const { return VMOp_PrintClassHierarchy; } 485 void doit(); 486 }; 487 #endif // INCLUDE_SERVICES 488 489 #endif // SHARE_VM_RUNTIME_VM_OPERATIONS_HPP