1 /* 2 * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_SERVICES_MEM_TRACKER_HPP 26 #define SHARE_VM_SERVICES_MEM_TRACKER_HPP 27 28 #include "memory/allocation.hpp" 29 #include "runtime/globals.hpp" 30 #include "runtime/mutex.hpp" 31 #include "runtime/os.hpp" 32 #include "runtime/thread.hpp" 33 #include "services/memPtr.hpp" 34 #include "services/memRecorder.hpp" 35 #include "services/memSnapshot.hpp" 36 #include "services/memTrackWorker.hpp" 37 38 #ifdef SOLARIS 39 #include "thread_solaris.inline.hpp" 40 #endif 41 42 extern bool NMT_track_callsite; 43 44 #ifndef MAX_UNSIGNED_LONG 45 #define MAX_UNSIGNED_LONG (unsigned long)(-1) 46 #endif 47 48 #ifdef ASSERT 49 #define DEBUG_CALLER_PC (NMT_track_callsite ? os::get_caller_pc(2) : 0) 50 #else 51 #define DEBUG_CALLER_PC 0 52 #endif 53 54 // The thread closure walks threads to collect per-thread 55 // memory recorders at NMT sync point 56 class SyncThreadRecorderClosure : public ThreadClosure { 57 private: 58 int _thread_count; 59 60 public: 61 SyncThreadRecorderClosure() { 62 _thread_count =0; 63 } 64 65 void do_thread(Thread* thread); 66 int get_thread_count() const { 67 return _thread_count; 68 } 69 }; 70 71 class BaselineOutputer; 72 class MemSnapshot; 73 class MemTrackWorker; 74 class Thread; 75 /* 76 * MemTracker is the 'gate' class to native memory tracking runtime. 77 */ 78 class MemTracker : AllStatic { 79 friend class GenerationData; 80 friend class MemTrackWorker; 81 friend class MemSnapshot; 82 friend class SyncThreadRecorderClosure; 83 84 // NMT state 85 enum NMTStates { 86 NMT_uninited, // not yet initialized 87 NMT_bootstrapping_single_thread, // bootstrapping, VM is in single thread mode 88 NMT_bootstrapping_multi_thread, // bootstrapping, VM is about to enter multi-thread mode 89 NMT_started, // NMT fully started 90 NMT_shutdown_pending, // shutdown pending 91 NMT_final_shutdown, // in final phase of shutdown 92 NMT_shutdown // shutdown 93 }; 94 95 public: 96 class Tracker : public StackObj { 97 friend class MemTracker; 98 public: 99 enum MemoryOperation { 100 NoOp, // no op 101 Malloc, // malloc 102 Realloc, // realloc 103 Free, // free 104 Reserve, // virtual memory reserve 105 Commit, // virtual memory commit 106 ReserveAndCommit, // virtual memory reserve and commit 107 StackAlloc = ReserveAndCommit, // allocate thread stack 108 Type, // assign virtual memory type 109 Uncommit, // virtual memory uncommit 110 Release, // virtual memory release 111 ArenaSize, // set arena size 112 StackRelease // release thread stack 113 }; 114 115 116 protected: 117 Tracker(MemoryOperation op, Thread* thr = NULL); 118 119 public: 120 void discard(); 121 122 void record(address addr, size_t size = 0, MEMFLAGS flags = mtNone, address pc = NULL); 123 void record(address old_addr, address new_addr, size_t size, 124 MEMFLAGS flags, address pc = NULL); 125 126 private: 127 bool _need_thread_critical_lock; 128 JavaThread* _java_thread; 129 MemoryOperation _op; // memory operation 130 jint _seq; // reserved sequence number 131 }; 132 133 134 public: 135 // native memory tracking level 136 enum NMTLevel { 137 NMT_off, // native memory tracking is off 138 NMT_summary, // don't track callsite 139 NMT_detail // track callsite also 140 }; 141 142 enum ShutdownReason { 143 NMT_shutdown_none, // no shutdown requested 144 NMT_shutdown_user, // user requested shutdown 145 NMT_normal, // normal shutdown, process exit 146 NMT_out_of_memory, // shutdown due to out of memory 147 NMT_initialization, // shutdown due to initialization failure 148 NMT_use_malloc_only, // can not combine NMT with UseMallocOnly flag 149 NMT_error_reporting, // shutdown by vmError::report_and_die() 150 NMT_out_of_generation, // running out of generation queue 151 NMT_sequence_overflow // overflow the sequence number 152 }; 153 154 public: 155 // initialize NMT tracking level from command line options, called 156 // from VM command line parsing code 157 static void init_tracking_options(const char* option_line); 158 159 // if NMT is enabled to record memory activities 160 static inline bool is_on() { 161 return (_tracking_level >= NMT_summary && 162 _state >= NMT_bootstrapping_single_thread); 163 } 164 165 static inline enum NMTLevel tracking_level() { 166 return _tracking_level; 167 } 168 169 // user readable reason for shutting down NMT 170 static const char* reason() { 171 switch(_reason) { 172 case NMT_shutdown_none: 173 return "Native memory tracking is not enabled"; 174 case NMT_shutdown_user: 175 return "Native memory tracking has been shutdown by user"; 176 case NMT_normal: 177 return "Native memory tracking has been shutdown due to process exiting"; 178 case NMT_out_of_memory: 179 return "Native memory tracking has been shutdown due to out of native memory"; 180 case NMT_initialization: 181 return "Native memory tracking failed to initialize"; 182 case NMT_error_reporting: 183 return "Native memory tracking has been shutdown due to error reporting"; 184 case NMT_out_of_generation: 185 return "Native memory tracking has been shutdown due to running out of generation buffer"; 186 case NMT_sequence_overflow: 187 return "Native memory tracking has been shutdown due to overflow the sequence number"; 188 case NMT_use_malloc_only: 189 return "Native memory tracking is not supported when UseMallocOnly is on"; 190 default: 191 ShouldNotReachHere(); 192 return NULL; 193 } 194 } 195 196 // test if we can walk native stack 197 static bool can_walk_stack() { 198 // native stack is not walkable during bootstrapping on sparc 199 #if defined(SPARC) 200 return (_state == NMT_started); 201 #else 202 return (_state >= NMT_bootstrapping_single_thread && _state <= NMT_started); 203 #endif 204 } 205 206 // if native memory tracking tracks callsite 207 static inline bool track_callsite() { return _tracking_level == NMT_detail; } 208 209 // NMT automatically shuts itself down under extreme situation by default. 210 // When the value is set to false, NMT will try its best to stay alive, 211 // even it has to slow down VM. 212 static inline void set_autoShutdown(bool value) { 213 AutoShutdownNMT = value; 214 if (AutoShutdownNMT && _slowdown_calling_thread) { 215 _slowdown_calling_thread = false; 216 } 217 } 218 219 // shutdown native memory tracking capability. Native memory tracking 220 // can be shutdown by VM when it encounters low memory scenarios. 221 // Memory tracker should gracefully shutdown itself, and preserve the 222 // latest memory statistics for post morten diagnosis. 223 static void shutdown(ShutdownReason reason); 224 225 // if there is shutdown requested 226 static inline bool shutdown_in_progress() { 227 return (_state >= NMT_shutdown_pending); 228 } 229 230 // bootstrap native memory tracking, so it can start to collect raw data 231 // before worker thread can start 232 233 // the first phase of bootstrapping, when VM still in single-threaded mode 234 static void bootstrap_single_thread(); 235 // the second phase of bootstrapping, VM is about or already in multi-threaded mode 236 static void bootstrap_multi_thread(); 237 238 private: 239 static Tracker _tkr; 240 241 public: 242 // start() has to be called when VM still in single thread mode, but after 243 // command line option parsing is done. 244 static void start(); 245 246 // record a 'malloc' call 247 static inline void record_malloc(address addr, size_t size, MEMFLAGS flags, 248 address pc = 0, Thread* thread = NULL) { 249 Tracker tkr(Tracker::Malloc, thread); 250 tkr.record(addr, size, flags, pc); 251 } 252 // record a 'free' call 253 static inline void record_free(address addr, MEMFLAGS flags, Thread* thread = NULL) { 254 Tracker tkr(Tracker::Free, thread); 255 tkr.record(addr, 0, flags, DEBUG_CALLER_PC); 256 } 257 258 static inline void record_arena_size(address addr, size_t size) { 259 Tracker tkr(Tracker::ArenaSize); 260 tkr.record(addr, size); 261 } 262 263 // record a virtual memory 'reserve' call 264 static inline void record_virtual_memory_reserve(address addr, size_t size, 265 MEMFLAGS flags, address pc = 0, Thread* thread = NULL) { 266 if (is_on()) { 267 assert(size > 0, "Sanity check"); 268 Tracker tkr(Tracker::Reserve, thread); 269 tkr.record(addr, size, flags, pc); 270 } 271 } 272 273 static inline void record_thread_stack(address addr, size_t size, Thread* thr, 274 address pc = 0) { 275 if (is_on()) { 276 Tracker tkr(Tracker::StackAlloc, thr); 277 tkr.record(addr, size, mtThreadStack, pc); 278 } 279 } 280 281 static inline void release_thread_stack(address addr, size_t size, Thread* thr) { 282 if (is_on()) { 283 Tracker tkr(Tracker::StackRelease, thr); 284 tkr.record(addr, size, mtThreadStack, DEBUG_CALLER_PC); 285 } 286 } 287 288 // record a virtual memory 'commit' call 289 static inline void record_virtual_memory_commit(address addr, size_t size, 290 address pc, Thread* thread = NULL) { 291 if (is_on()) { 292 Tracker tkr(Tracker::Commit, thread); 293 tkr.record(addr, size, mtNone, pc); 294 } 295 } 296 297 static inline void record_virtual_memory_reserve_and_commit(address addr, size_t size, 298 MEMFLAGS flags, address pc, Thread* thread = NULL) { 299 if (is_on()) { 300 Tracker tkr(Tracker::ReserveAndCommit, thread); 301 tkr.record(addr, size, flags, pc); 302 } 303 } 304 305 static inline void record_virtual_memory_release(address addr, size_t size, 306 Thread* thread = NULL) { 307 if (is_on()) { 308 Tracker tkr(Tracker::Release, thread); 309 tkr.record(addr, size); 310 } 311 } 312 313 // record memory type on virtual memory base address 314 static inline void record_virtual_memory_type(address base, MEMFLAGS flags, 315 Thread* thread = NULL) { 316 if (is_on()) { 317 Tracker tkr(Tracker::Type); 318 tkr.record(base, 0, flags); 319 } 320 } 321 322 // Get memory trackers for memory operations that can result race conditions. 323 // The memory tracker has to be obtained before realloc, virtual memory uncommit 324 // and virtual memory release, and call tracker.record() method if operation 325 // succeeded, or tracker.discard() to abort the tracking. 326 static inline Tracker get_realloc_tracker() { 327 return Tracker(Tracker::Realloc); 328 } 329 330 static inline Tracker get_virtual_memory_uncommit_tracker() { 331 return Tracker(Tracker::Uncommit); 332 } 333 334 static inline Tracker get_virtual_memory_release_tracker() { 335 return Tracker(Tracker::Release); 336 } 337 338 339 // create memory baseline of current memory snapshot 340 static bool baseline(); 341 // is there a memory baseline 342 static bool has_baseline() { 343 return _baseline.baselined(); 344 } 345 346 // print memory usage from current snapshot 347 static bool print_memory_usage(BaselineOutputer& out, size_t unit, 348 bool summary_only = true); 349 // compare memory usage between current snapshot and baseline 350 static bool compare_memory_usage(BaselineOutputer& out, size_t unit, 351 bool summary_only = true); 352 353 // the version for whitebox testing support, it ensures that all memory 354 // activities before this method call, are reflected in the snapshot 355 // database. 356 static bool wbtest_wait_for_data_merge(); 357 358 // sync is called within global safepoint to synchronize nmt data 359 static void sync(); 360 361 // called when a thread is about to exit 362 static void thread_exiting(JavaThread* thread); 363 364 // retrieve global snapshot 365 static MemSnapshot* get_snapshot() { 366 if (shutdown_in_progress()) { 367 return NULL; 368 } 369 return _snapshot; 370 } 371 372 // print tracker stats 373 NOT_PRODUCT(static void print_tracker_stats(outputStream* st);) 374 NOT_PRODUCT(static void walk_stack(int toSkip, char* buf, int len);) 375 376 private: 377 // start native memory tracking worker thread 378 static bool start_worker(MemSnapshot* snapshot); 379 380 // called by worker thread to complete shutdown process 381 static void final_shutdown(); 382 383 protected: 384 // retrieve per-thread recorder of the specified thread. 385 // if the recorder is full, it will be enqueued to overflow 386 // queue, a new recorder is acquired from recorder pool or a 387 // new instance is created. 388 // when thread == NULL, it means global recorder 389 static MemRecorder* get_thread_recorder(JavaThread* thread); 390 391 // per-thread recorder pool 392 static void release_thread_recorder(MemRecorder* rec); 393 static void delete_all_pooled_recorders(); 394 395 // pending recorder queue. Recorders are queued to pending queue 396 // when they are overflowed or collected at nmt sync point. 397 static void enqueue_pending_recorder(MemRecorder* rec); 398 static MemRecorder* get_pending_recorders(); 399 static void delete_all_pending_recorders(); 400 401 // write a memory tracking record in recorder 402 static void write_tracking_record(address addr, MEMFLAGS type, 403 size_t size, jint seq, address pc, JavaThread* thread); 404 405 static bool is_single_threaded_bootstrap() { 406 return _state == NMT_bootstrapping_single_thread; 407 } 408 409 static void check_NMT_load(Thread* thr) { 410 assert(thr != NULL, "Sanity check"); 411 if (_slowdown_calling_thread && thr != _worker_thread) { 412 os::yield_all(); 413 } 414 } 415 416 static void inc_pending_op_count() { 417 Atomic::inc(&_pending_op_count); 418 } 419 420 static void dec_pending_op_count() { 421 Atomic::dec(&_pending_op_count); 422 assert(_pending_op_count >= 0, "Sanity check"); 423 } 424 425 426 private: 427 // retrieve a pooled memory record or create new one if there is not 428 // one available 429 static MemRecorder* get_new_or_pooled_instance(); 430 static void create_memory_record(address addr, MEMFLAGS type, 431 size_t size, address pc, Thread* thread); 432 static void create_record_in_recorder(address addr, MEMFLAGS type, 433 size_t size, address pc, JavaThread* thread); 434 435 static void set_current_processing_generation(unsigned long generation) { 436 _worker_thread_idle = false; 437 _processing_generation = generation; 438 } 439 440 static void report_worker_idle() { 441 _worker_thread_idle = true; 442 } 443 444 private: 445 // global memory snapshot 446 static MemSnapshot* _snapshot; 447 448 // a memory baseline of snapshot 449 static MemBaseline _baseline; 450 451 // query lock 452 static Mutex* _query_lock; 453 454 // a thread can start to allocate memory before it is attached 455 // to VM 'Thread', those memory activities are recorded here. 456 // ThreadCritical is required to guard this global recorder. 457 static MemRecorder* volatile _global_recorder; 458 459 // main thread id 460 debug_only(static intx _main_thread_tid;) 461 462 // pending recorders to be merged 463 static MemRecorder* volatile _merge_pending_queue; 464 465 NOT_PRODUCT(static volatile jint _pending_recorder_count;) 466 467 // pooled memory recorders 468 static MemRecorder* volatile _pooled_recorders; 469 470 // memory recorder pool management, uses following 471 // counter to determine if a released memory recorder 472 // should be pooled 473 474 // latest thread count 475 static int _thread_count; 476 // pooled recorder count 477 static volatile jint _pooled_recorder_count; 478 479 480 // worker thread to merge pending recorders into snapshot 481 static MemTrackWorker* _worker_thread; 482 483 // how many safepoints we skipped without entering sync point 484 static int _sync_point_skip_count; 485 486 // if the tracker is properly intialized 487 static bool _is_tracker_ready; 488 // tracking level (off, summary and detail) 489 static enum NMTLevel _tracking_level; 490 491 // current nmt state 492 static volatile enum NMTStates _state; 493 // the reason for shutting down nmt 494 static enum ShutdownReason _reason; 495 // the generation that NMT is processing 496 static volatile unsigned long _processing_generation; 497 // although NMT is still procesing current generation, but 498 // there is not more recorder to process, set idle state 499 static volatile bool _worker_thread_idle; 500 501 // if NMT should slow down calling thread to allow 502 // worker thread to catch up 503 static volatile bool _slowdown_calling_thread; 504 505 // pending memory op count. 506 // Certain memory ops need to pre-reserve sequence number 507 // before memory operation can happen to avoid race condition. 508 // See MemTracker::Tracker for detail 509 static volatile jint _pending_op_count; 510 }; 511 512 #endif // SHARE_VM_SERVICES_MEM_TRACKER_HPP