1 /*
   2  * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_SERVICES_MEM_TRACKER_HPP
  26 #define SHARE_VM_SERVICES_MEM_TRACKER_HPP
  27 
  28 #include "utilities/macros.hpp"
  29 
  30 #if !INCLUDE_NMT
  31 
  32 #include "utilities/ostream.hpp"
  33 
  34 class BaselineOutputer : public StackObj {
  35 
  36 };
  37 
  38 class BaselineTTYOutputer : public BaselineOutputer {
  39   public:
  40     BaselineTTYOutputer(outputStream* st) { }
  41 };
  42 
  43 class MemTracker : AllStatic {
  44   public:
  45    enum ShutdownReason {
  46       NMT_shutdown_none,     // no shutdown requested
  47       NMT_shutdown_user,     // user requested shutdown
  48       NMT_normal,            // normal shutdown, process exit
  49       NMT_out_of_memory,     // shutdown due to out of memory
  50       NMT_initialization,    // shutdown due to initialization failure
  51       NMT_use_malloc_only,   // can not combine NMT with UseMallocOnly flag
  52       NMT_error_reporting,   // shutdown by vmError::report_and_die()
  53       NMT_out_of_generation, // running out of generation queue
  54       NMT_sequence_overflow  // overflow the sequence number
  55    };
  56 
  57   class Tracker {
  58    public:
  59     void discard() { }
  60 
  61     void record(address addr, size_t size = 0, MEMFLAGS flags = mtNone, address pc = NULL) { }
  62     void record(address old_addr, address new_addr, size_t size,
  63       MEMFLAGS flags, address pc = NULL) { }
  64   };
  65 
  66   private:
  67    static Tracker  _tkr;
  68 
  69 
  70   public:
  71    static inline void init_tracking_options(const char* option_line) { }
  72    static inline bool is_on()   { return false; }
  73    static const char* reason()  { return "Native memory tracking is not implemented"; }
  74    static inline bool can_walk_stack() { return false; }
  75 
  76    static inline void bootstrap_single_thread() { }
  77    static inline void bootstrap_multi_thread() { }
  78    static inline void start() { }
  79 
  80    static inline void record_malloc(address addr, size_t size, MEMFLAGS flags,
  81         address pc = 0, Thread* thread = NULL) { }
  82    static inline void record_free(address addr, MEMFLAGS flags, Thread* thread = NULL) { }
  83    static inline void record_arena_size(address addr, size_t size) { }
  84    static inline void record_virtual_memory_reserve(address addr, size_t size,
  85         MEMFLAGS flags, address pc = 0, Thread* thread = NULL) { }
  86    static inline void record_virtual_memory_reserve_and_commit(address addr, size_t size,
  87         MEMFLAGS flags, address pc = 0, Thread* thread = NULL) { }
  88    static inline void record_virtual_memory_commit(address addr, size_t size,
  89         address pc = 0, Thread* thread = NULL) { }
  90    static inline void record_virtual_memory_type(address base, MEMFLAGS flags,
  91         Thread* thread = NULL) { }
  92    static inline Tracker get_realloc_tracker() { return _tkr; }
  93    static inline Tracker get_virtual_memory_uncommit_tracker() { return _tkr; }
  94    static inline Tracker get_virtual_memory_release_tracker()  { return _tkr; }
  95    static inline bool baseline() { return false; }
  96    static inline bool has_baseline() { return false; }
  97 
  98    static inline void set_autoShutdown(bool value) { }
  99    static void shutdown(ShutdownReason reason) { }
 100    static inline bool shutdown_in_progress() { return false; }
 101    static bool print_memory_usage(BaselineOutputer& out, size_t unit,
 102             bool summary_only = true) { return false; }
 103    static bool compare_memory_usage(BaselineOutputer& out, size_t unit,
 104             bool summary_only = true) { return false; }
 105 
 106    static bool wbtest_wait_for_data_merge() { return false; }
 107 
 108    static inline void sync() { }
 109    static inline void thread_exiting(JavaThread* thread) { }
 110 };
 111 
 112 
 113 #else // !INCLUDE_NMT
 114 
 115 #include "memory/allocation.hpp"
 116 #include "runtime/globals.hpp"
 117 #include "runtime/mutex.hpp"
 118 #include "runtime/os.hpp"
 119 #include "runtime/thread.hpp"
 120 #include "services/memPtr.hpp"
 121 #include "services/memRecorder.hpp"
 122 #include "services/memSnapshot.hpp"
 123 #include "services/memTrackWorker.hpp"
 124 
 125 extern bool NMT_track_callsite;
 126 
 127 #ifndef MAX_UNSIGNED_LONG
 128 #define MAX_UNSIGNED_LONG    (unsigned long)(-1)
 129 #endif
 130 
 131 #ifdef ASSERT
 132   #define DEBUG_CALLER_PC  (NMT_track_callsite ? os::get_caller_pc(2) : 0)
 133 #else
 134   #define DEBUG_CALLER_PC  0
 135 #endif
 136 
 137 // The thread closure walks threads to collect per-thread
 138 // memory recorders at NMT sync point
 139 class SyncThreadRecorderClosure : public ThreadClosure {
 140  private:
 141   int _thread_count;
 142 
 143  public:
 144   SyncThreadRecorderClosure() {
 145     _thread_count =0;
 146   }
 147 
 148   void do_thread(Thread* thread);
 149   int  get_thread_count() const {
 150     return _thread_count;
 151   }
 152 };
 153 
 154 class BaselineOutputer;
 155 class MemSnapshot;
 156 class MemTrackWorker;
 157 class Thread;
 158 /*
 159  * MemTracker is the 'gate' class to native memory tracking runtime.
 160  */
 161 class MemTracker : AllStatic {
 162   friend class GenerationData;
 163   friend class MemTrackWorker;
 164   friend class MemSnapshot;
 165   friend class SyncThreadRecorderClosure;
 166 
 167   // NMT state
 168   enum NMTStates {
 169     NMT_uninited,                        // not yet initialized
 170     NMT_bootstrapping_single_thread,     // bootstrapping, VM is in single thread mode
 171     NMT_bootstrapping_multi_thread,      // bootstrapping, VM is about to enter multi-thread mode
 172     NMT_started,                         // NMT fully started
 173     NMT_shutdown_pending,                // shutdown pending
 174     NMT_final_shutdown,                  // in final phase of shutdown
 175     NMT_shutdown                         // shutdown
 176   };
 177 
 178  public:
 179   class Tracker : public StackObj {
 180     friend class MemTracker;
 181    public:
 182     enum MemoryOperation {
 183       NoOp,                   // no op
 184       Malloc,                 // malloc
 185       Realloc,                // realloc
 186       Free,                   // free
 187       Reserve,                // virtual memory reserve
 188       Commit,                 // virtual memory commit
 189       ReserveAndCommit,       // virtual memory reserve and commit
 190       StackAlloc = ReserveAndCommit, // allocate thread stack
 191       Type,                   // assign virtual memory type
 192       Uncommit,               // virtual memory uncommit
 193       Release,                // virtual memory release
 194       ArenaSize,              // set arena size
 195       StackRelease            // release thread stack
 196     };
 197 
 198 
 199    protected:
 200     Tracker(MemoryOperation op, Thread* thr = NULL);
 201 
 202    public:
 203     void discard();
 204 
 205     void record(address addr, size_t size = 0, MEMFLAGS flags = mtNone, address pc = NULL);
 206     void record(address old_addr, address new_addr, size_t size,
 207       MEMFLAGS flags, address pc = NULL);
 208 
 209    private:
 210     bool            _need_thread_critical_lock;
 211     JavaThread*     _java_thread;
 212     MemoryOperation _op;          // memory operation
 213     jint            _seq;         // reserved sequence number
 214   };
 215 
 216 
 217  public:
 218   // native memory tracking level
 219   enum NMTLevel {
 220     NMT_off,              // native memory tracking is off
 221     NMT_summary,          // don't track callsite
 222     NMT_detail            // track callsite also
 223   };
 224 
 225    enum ShutdownReason {
 226      NMT_shutdown_none,     // no shutdown requested
 227      NMT_shutdown_user,     // user requested shutdown
 228      NMT_normal,            // normal shutdown, process exit
 229      NMT_out_of_memory,     // shutdown due to out of memory
 230      NMT_initialization,    // shutdown due to initialization failure
 231      NMT_use_malloc_only,   // can not combine NMT with UseMallocOnly flag
 232      NMT_error_reporting,   // shutdown by vmError::report_and_die()
 233      NMT_out_of_generation, // running out of generation queue
 234      NMT_sequence_overflow  // overflow the sequence number
 235    };
 236 
 237  public:
 238   // initialize NMT tracking level from command line options, called
 239    // from VM command line parsing code
 240   static void init_tracking_options(const char* option_line);
 241 
 242   // if NMT is enabled to record memory activities
 243   static inline bool is_on() {
 244     return (_tracking_level >= NMT_summary &&
 245       _state >= NMT_bootstrapping_single_thread);
 246   }
 247 
 248   static inline enum NMTLevel tracking_level() {
 249     return _tracking_level;
 250   }
 251 
 252   // user readable reason for shutting down NMT
 253   static const char* reason() {
 254     switch(_reason) {
 255       case NMT_shutdown_none:
 256         return "Native memory tracking is not enabled";
 257       case NMT_shutdown_user:
 258         return "Native memory tracking has been shutdown by user";
 259       case NMT_normal:
 260         return "Native memory tracking has been shutdown due to process exiting";
 261       case NMT_out_of_memory:
 262         return "Native memory tracking has been shutdown due to out of native memory";
 263       case NMT_initialization:
 264         return "Native memory tracking failed to initialize";
 265       case NMT_error_reporting:
 266         return "Native memory tracking has been shutdown due to error reporting";
 267       case NMT_out_of_generation:
 268         return "Native memory tracking has been shutdown due to running out of generation buffer";
 269       case NMT_sequence_overflow:
 270         return "Native memory tracking has been shutdown due to overflow the sequence number";
 271       case NMT_use_malloc_only:
 272         return "Native memory tracking is not supported when UseMallocOnly is on";
 273       default:
 274         ShouldNotReachHere();
 275         return NULL;
 276     }
 277   }
 278 
 279   // test if we can walk native stack
 280   static bool can_walk_stack() {
 281   // native stack is not walkable during bootstrapping on sparc
 282 #if defined(SPARC)
 283     return (_state == NMT_started);
 284 #else
 285     return (_state >= NMT_bootstrapping_single_thread && _state  <= NMT_started);
 286 #endif
 287   }
 288 
 289   // if native memory tracking tracks callsite
 290   static inline bool track_callsite() { return _tracking_level == NMT_detail; }
 291 
 292   // NMT automatically shuts itself down under extreme situation by default.
 293   // When the value is set to false,  NMT will try its best to stay alive,
 294   // even it has to slow down VM.
 295   static inline void set_autoShutdown(bool value) {
 296     AutoShutdownNMT = value;
 297     if (AutoShutdownNMT && _slowdown_calling_thread) {
 298       _slowdown_calling_thread = false;
 299     }
 300   }
 301 
 302   // shutdown native memory tracking capability. Native memory tracking
 303   // can be shutdown by VM when it encounters low memory scenarios.
 304   // Memory tracker should gracefully shutdown itself, and preserve the
 305   // latest memory statistics for post morten diagnosis.
 306   static void shutdown(ShutdownReason reason);
 307 
 308   // if there is shutdown requested
 309   static inline bool shutdown_in_progress() {
 310     return (_state >= NMT_shutdown_pending);
 311   }
 312 
 313   // bootstrap native memory tracking, so it can start to collect raw data
 314   // before worker thread can start
 315 
 316   // the first phase of bootstrapping, when VM still in single-threaded mode
 317   static void bootstrap_single_thread();
 318   // the second phase of bootstrapping, VM is about or already in multi-threaded mode
 319   static void bootstrap_multi_thread();
 320 
 321 
 322   // start() has to be called when VM still in single thread mode, but after
 323   // command line option parsing is done.
 324   static void start();
 325 
 326   // record a 'malloc' call
 327   static inline void record_malloc(address addr, size_t size, MEMFLAGS flags,
 328                             address pc = 0, Thread* thread = NULL) {
 329     Tracker tkr(Tracker::Malloc, thread);
 330     tkr.record(addr, size, flags, pc);
 331   }
 332   // record a 'free' call
 333   static inline void record_free(address addr, MEMFLAGS flags, Thread* thread = NULL) {
 334     Tracker tkr(Tracker::Free, thread);
 335     tkr.record(addr, 0, flags, DEBUG_CALLER_PC);
 336   }
 337 
 338   static inline void record_arena_size(address addr, size_t size) {
 339     Tracker tkr(Tracker::ArenaSize);
 340     tkr.record(addr, size);
 341   }
 342 
 343   // record a virtual memory 'reserve' call
 344   static inline void record_virtual_memory_reserve(address addr, size_t size,
 345                      MEMFLAGS flags, address pc = 0, Thread* thread = NULL) {
 346     assert(size > 0, "Sanity check");
 347     Tracker tkr(Tracker::Reserve, thread);
 348     tkr.record(addr, size, flags, pc);
 349   }
 350 
 351   static inline void record_thread_stack(address addr, size_t size, Thread* thr,
 352                            address pc = 0) {
 353     Tracker tkr(Tracker::StackAlloc, thr);
 354     tkr.record(addr, size, mtThreadStack, pc);
 355   }
 356 
 357   static inline void release_thread_stack(address addr, size_t size, Thread* thr) {
 358     Tracker tkr(Tracker::StackRelease, thr);
 359     tkr.record(addr, size, mtThreadStack, DEBUG_CALLER_PC);
 360   }
 361 
 362   // record a virtual memory 'commit' call
 363   static inline void record_virtual_memory_commit(address addr, size_t size,
 364                             address pc, Thread* thread = NULL) {
 365     Tracker tkr(Tracker::Commit, thread);
 366     tkr.record(addr, size, mtNone, pc);
 367   }
 368 
 369   static inline void record_virtual_memory_reserve_and_commit(address addr, size_t size,
 370     MEMFLAGS flags, address pc, Thread* thread = NULL) {
 371     Tracker tkr(Tracker::ReserveAndCommit, thread);
 372     tkr.record(addr, size, flags, pc);
 373   }
 374 
 375 
 376   // record memory type on virtual memory base address
 377   static inline void record_virtual_memory_type(address base, MEMFLAGS flags,
 378                             Thread* thread = NULL) {
 379     Tracker tkr(Tracker::Type);
 380     tkr.record(base, 0, flags);
 381   }
 382 
 383   // Get memory trackers for memory operations that can result race conditions.
 384   // The memory tracker has to be obtained before realloc, virtual memory uncommit
 385   // and virtual memory release, and call tracker.record() method if operation
 386   // succeeded, or tracker.discard() to abort the tracking.
 387   static inline Tracker get_realloc_tracker() {
 388     return Tracker(Tracker::Realloc);
 389   }
 390 
 391   static inline Tracker get_virtual_memory_uncommit_tracker() {
 392     return Tracker(Tracker::Uncommit);
 393   }
 394 
 395   static inline Tracker get_virtual_memory_release_tracker() {
 396     return Tracker(Tracker::Release);
 397   }
 398 
 399 
 400   // create memory baseline of current memory snapshot
 401   static bool baseline();
 402   // is there a memory baseline
 403   static bool has_baseline() {
 404     return _baseline.baselined();
 405   }
 406 
 407   // print memory usage from current snapshot
 408   static bool print_memory_usage(BaselineOutputer& out, size_t unit,
 409            bool summary_only = true);
 410   // compare memory usage between current snapshot and baseline
 411   static bool compare_memory_usage(BaselineOutputer& out, size_t unit,
 412            bool summary_only = true);
 413 
 414   // the version for whitebox testing support, it ensures that all memory
 415   // activities before this method call, are reflected in the snapshot
 416   // database.
 417   static bool wbtest_wait_for_data_merge();
 418 
 419   // sync is called within global safepoint to synchronize nmt data
 420   static void sync();
 421 
 422   // called when a thread is about to exit
 423   static void thread_exiting(JavaThread* thread);
 424 
 425   // retrieve global snapshot
 426   static MemSnapshot* get_snapshot() {
 427     if (shutdown_in_progress()) {
 428       return NULL;
 429     }
 430     return _snapshot;
 431   }
 432 
 433   // print tracker stats
 434   NOT_PRODUCT(static void print_tracker_stats(outputStream* st);)
 435   NOT_PRODUCT(static void walk_stack(int toSkip, char* buf, int len);)
 436 
 437  private:
 438   // start native memory tracking worker thread
 439   static bool start_worker(MemSnapshot* snapshot);
 440 
 441   // called by worker thread to complete shutdown process
 442   static void final_shutdown();
 443 
 444  protected:
 445   // retrieve per-thread recorder of the specified thread.
 446   // if the recorder is full, it will be enqueued to overflow
 447   // queue, a new recorder is acquired from recorder pool or a
 448   // new instance is created.
 449   // when thread == NULL, it means global recorder
 450   static MemRecorder* get_thread_recorder(JavaThread* thread);
 451 
 452   // per-thread recorder pool
 453   static void release_thread_recorder(MemRecorder* rec);
 454   static void delete_all_pooled_recorders();
 455 
 456   // pending recorder queue. Recorders are queued to pending queue
 457   // when they are overflowed or collected at nmt sync point.
 458   static void enqueue_pending_recorder(MemRecorder* rec);
 459   static MemRecorder* get_pending_recorders();
 460   static void delete_all_pending_recorders();
 461 
 462   // write a memory tracking record in recorder
 463   static void write_tracking_record(address addr, MEMFLAGS type,
 464     size_t size, jint seq, address pc, JavaThread* thread);
 465 
 466   static bool is_single_threaded_bootstrap() {
 467     return _state == NMT_bootstrapping_single_thread;
 468   }
 469 
 470   static void check_NMT_load(Thread* thr) {
 471     assert(thr != NULL, "Sanity check");
 472     if (_slowdown_calling_thread && thr != _worker_thread) {
 473 #ifdef _WINDOWS
 474       // On Windows, os::NakedYield() does not work as well
 475       // as os::yield_all()
 476       os::yield_all();
 477 #else
 478      // On Solaris, os::yield_all() depends on os::sleep()
 479      // which requires JavaTherad in _thread_in_vm state.
 480      // Transits thread to _thread_in_vm state can be dangerous
 481      // if caller holds lock, as it may deadlock with Threads_lock.
 482      // So use NaKedYield instead.
 483      //
 484      // Linux and BSD, NakedYield() and yield_all() implementations
 485      // are the same.
 486       os::NakedYield();
 487 #endif
 488     }
 489   }
 490 
 491   static void inc_pending_op_count() {
 492     Atomic::inc(&_pending_op_count);
 493   }
 494 
 495   static void dec_pending_op_count() {
 496     Atomic::dec(&_pending_op_count);
 497     assert(_pending_op_count >= 0, "Sanity check");
 498   }
 499 
 500 
 501  private:
 502   // retrieve a pooled memory record or create new one if there is not
 503   // one available
 504   static MemRecorder* get_new_or_pooled_instance();
 505   static void create_memory_record(address addr, MEMFLAGS type,
 506                    size_t size, address pc, Thread* thread);
 507   static void create_record_in_recorder(address addr, MEMFLAGS type,
 508                    size_t size, address pc, JavaThread* thread);
 509 
 510   static void set_current_processing_generation(unsigned long generation) {
 511     _worker_thread_idle = false;
 512     _processing_generation = generation;
 513   }
 514 
 515   static void report_worker_idle() {
 516     _worker_thread_idle = true;
 517   }
 518 
 519  private:
 520   // global memory snapshot
 521   static MemSnapshot*     _snapshot;
 522 
 523   // a memory baseline of snapshot
 524   static MemBaseline      _baseline;
 525 
 526   // query lock
 527   static Mutex*           _query_lock;
 528 
 529   // a thread can start to allocate memory before it is attached
 530   // to VM 'Thread', those memory activities are recorded here.
 531   // ThreadCritical is required to guard this global recorder.
 532   static MemRecorder* volatile _global_recorder;
 533 
 534   // main thread id
 535   debug_only(static intx   _main_thread_tid;)
 536 
 537   // pending recorders to be merged
 538   static MemRecorder* volatile     _merge_pending_queue;
 539 
 540   NOT_PRODUCT(static volatile jint   _pending_recorder_count;)
 541 
 542   // pooled memory recorders
 543   static MemRecorder* volatile     _pooled_recorders;
 544 
 545   // memory recorder pool management, uses following
 546   // counter to determine if a released memory recorder
 547   // should be pooled
 548 
 549   // latest thread count
 550   static int               _thread_count;
 551   // pooled recorder count
 552   static volatile jint     _pooled_recorder_count;
 553 
 554 
 555   // worker thread to merge pending recorders into snapshot
 556   static MemTrackWorker*  _worker_thread;
 557 
 558   // how many safepoints we skipped without entering sync point
 559   static int              _sync_point_skip_count;
 560 
 561   // if the tracker is properly intialized
 562   static bool             _is_tracker_ready;
 563   // tracking level (off, summary and detail)
 564   static enum NMTLevel    _tracking_level;
 565 
 566   // current nmt state
 567   static volatile enum NMTStates   _state;
 568   // the reason for shutting down nmt
 569   static enum ShutdownReason       _reason;
 570   // the generation that NMT is processing
 571   static volatile unsigned long    _processing_generation;
 572   // although NMT is still procesing current generation, but
 573   // there is not more recorder to process, set idle state
 574   static volatile bool             _worker_thread_idle;
 575 
 576   // if NMT should slow down calling thread to allow
 577   // worker thread to catch up
 578   static volatile bool             _slowdown_calling_thread;
 579 
 580   // pending memory op count.
 581   // Certain memory ops need to pre-reserve sequence number
 582   // before memory operation can happen to avoid race condition.
 583   // See MemTracker::Tracker for detail
 584   static volatile jint             _pending_op_count;
 585 };
 586 
 587 #endif // !INCLUDE_NMT
 588 
 589 #endif // SHARE_VM_SERVICES_MEM_TRACKER_HPP