1 /*
   2  * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_SERVICES_MEM_TRACKER_HPP
  26 #define SHARE_VM_SERVICES_MEM_TRACKER_HPP
  27 
  28 #include "memory/allocation.hpp"
  29 #include "runtime/globals.hpp"
  30 #include "runtime/mutex.hpp"
  31 #include "runtime/os.hpp"
  32 #include "runtime/thread.hpp"
  33 #include "services/memPtr.hpp"
  34 #include "services/memRecorder.hpp"
  35 #include "services/memSnapshot.hpp"
  36 #include "services/memTrackWorker.hpp"
  37 
  38 #ifdef SOLARIS
  39 #include "thread_solaris.inline.hpp"
  40 #endif
  41 
  42 extern bool NMT_track_callsite;
  43 
  44 #ifndef MAX_UNSIGNED_LONG
  45 #define MAX_UNSIGNED_LONG    (unsigned long)(-1)
  46 #endif
  47 
  48 #ifdef ASSERT
  49   #define DEBUG_CALLER_PC  (NMT_track_callsite ? os::get_caller_pc(2) : 0)
  50 #else
  51   #define DEBUG_CALLER_PC  0
  52 #endif
  53 
  54 // The thread closure walks threads to collect per-thread
  55 // memory recorders at NMT sync point
  56 class SyncThreadRecorderClosure : public ThreadClosure {
  57  private:
  58   int _thread_count;
  59 
  60  public:
  61   SyncThreadRecorderClosure() {
  62     _thread_count =0;
  63   }
  64 
  65   void do_thread(Thread* thread);
  66   int  get_thread_count() const {
  67     return _thread_count;
  68   }
  69 };
  70 
  71 class BaselineOutputer;
  72 class MemSnapshot;
  73 class MemTrackWorker;
  74 class Thread;
  75 /*
  76  * MemTracker is the 'gate' class to native memory tracking runtime.
  77  */
  78 class MemTracker : AllStatic {
  79   friend class GenerationData;
  80   friend class MemTrackWorker;
  81   friend class MemSnapshot;
  82   friend class SyncThreadRecorderClosure;
  83 
  84   // NMT state
  85   enum NMTStates {
  86     NMT_uninited,                        // not yet initialized
  87     NMT_bootstrapping_single_thread,     // bootstrapping, VM is in single thread mode
  88     NMT_bootstrapping_multi_thread,      // bootstrapping, VM is about to enter multi-thread mode
  89     NMT_started,                         // NMT fully started
  90     NMT_shutdown_pending,                // shutdown pending
  91     NMT_final_shutdown,                  // in final phase of shutdown
  92     NMT_shutdown                         // shutdown
  93   };
  94 
  95  public:
  96   class Tracker : public StackObj {
  97     friend class MemTracker;
  98    public:
  99     enum MemoryOperation {
 100       NoOp,                   // no op
 101       Malloc,                 // malloc
 102       Realloc,                // realloc
 103       Free,                   // free
 104       Reserve,                // virtual memory reserve
 105       Commit,                 // virtual memory commit
 106       ReserveAndCommit,       // virtual memory reserve and commit
 107       StackAlloc = ReserveAndCommit, // allocate thread stack
 108       Type,                   // assign virtual memory type
 109       Uncommit,               // virtual memory uncommit
 110       Release,                // virtual memory release
 111       ArenaSize,              // set arena size
 112       StackRelease            // release thread stack
 113     };
 114 
 115 
 116    protected:
 117     Tracker(MemoryOperation op, Thread* thr = NULL);
 118 
 119    public:
 120     void discard();
 121 
 122     void record(address addr, size_t size = 0, MEMFLAGS flags = mtNone, address pc = NULL);
 123     void record(address old_addr, address new_addr, size_t size,
 124       MEMFLAGS flags, address pc = NULL);
 125 
 126    private:
 127     bool            _need_thread_critical_lock;
 128     JavaThread*     _java_thread;
 129     MemoryOperation _op;          // memory operation
 130     jint            _seq;         // reserved sequence number
 131   };
 132 
 133 
 134  public:
 135   // native memory tracking level
 136   enum NMTLevel {
 137     NMT_off,              // native memory tracking is off
 138     NMT_summary,          // don't track callsite
 139     NMT_detail            // track callsite also
 140   };
 141 
 142    enum ShutdownReason {
 143      NMT_shutdown_none,     // no shutdown requested
 144      NMT_shutdown_user,     // user requested shutdown
 145      NMT_normal,            // normal shutdown, process exit
 146      NMT_out_of_memory,     // shutdown due to out of memory
 147      NMT_initialization,    // shutdown due to initialization failure
 148      NMT_use_malloc_only,   // can not combine NMT with UseMallocOnly flag
 149      NMT_error_reporting,   // shutdown by vmError::report_and_die()
 150      NMT_out_of_generation, // running out of generation queue
 151      NMT_sequence_overflow  // overflow the sequence number
 152    };
 153 
 154  public:
 155   // initialize NMT tracking level from command line options, called
 156    // from VM command line parsing code
 157   static void init_tracking_options(const char* option_line);
 158 
 159   // if NMT is enabled to record memory activities
 160   static inline bool is_on() {
 161     return (_tracking_level >= NMT_summary &&
 162       _state >= NMT_bootstrapping_single_thread);
 163   }
 164 
 165   static inline enum NMTLevel tracking_level() {
 166     return _tracking_level;
 167   }
 168 
 169   // user readable reason for shutting down NMT
 170   static const char* reason() {
 171     switch(_reason) {
 172       case NMT_shutdown_none:
 173         return "Native memory tracking is not enabled";
 174       case NMT_shutdown_user:
 175         return "Native memory tracking has been shutdown by user";
 176       case NMT_normal:
 177         return "Native memory tracking has been shutdown due to process exiting";
 178       case NMT_out_of_memory:
 179         return "Native memory tracking has been shutdown due to out of native memory";
 180       case NMT_initialization:
 181         return "Native memory tracking failed to initialize";
 182       case NMT_error_reporting:
 183         return "Native memory tracking has been shutdown due to error reporting";
 184       case NMT_out_of_generation:
 185         return "Native memory tracking has been shutdown due to running out of generation buffer";
 186       case NMT_sequence_overflow:
 187         return "Native memory tracking has been shutdown due to overflow the sequence number";
 188       case NMT_use_malloc_only:
 189         return "Native memory tracking is not supported when UseMallocOnly is on";
 190       default:
 191         ShouldNotReachHere();
 192         return NULL;
 193     }
 194   }
 195 
 196   // test if we can walk native stack
 197   static bool can_walk_stack() {
 198   // native stack is not walkable during bootstrapping on sparc
 199 #if defined(SPARC)
 200     return (_state == NMT_started);
 201 #else
 202     return (_state >= NMT_bootstrapping_single_thread && _state  <= NMT_started);
 203 #endif
 204   }
 205 
 206   // if native memory tracking tracks callsite
 207   static inline bool track_callsite() { return _tracking_level == NMT_detail; }
 208 
 209   // NMT automatically shuts itself down under extreme situation by default.
 210   // When the value is set to false,  NMT will try its best to stay alive,
 211   // even it has to slow down VM.
 212   static inline void set_autoShutdown(bool value) {
 213     AutoShutdownNMT = value;
 214     if (AutoShutdownNMT && _slowdown_calling_thread) {
 215       _slowdown_calling_thread = false;
 216     }
 217   }
 218 
 219   // shutdown native memory tracking capability. Native memory tracking
 220   // can be shutdown by VM when it encounters low memory scenarios.
 221   // Memory tracker should gracefully shutdown itself, and preserve the
 222   // latest memory statistics for post morten diagnosis.
 223   static void shutdown(ShutdownReason reason);
 224 
 225   // if there is shutdown requested
 226   static inline bool shutdown_in_progress() {
 227     return (_state >= NMT_shutdown_pending);
 228   }
 229 
 230   // bootstrap native memory tracking, so it can start to collect raw data
 231   // before worker thread can start
 232 
 233   // the first phase of bootstrapping, when VM still in single-threaded mode
 234   static void bootstrap_single_thread();
 235   // the second phase of bootstrapping, VM is about or already in multi-threaded mode
 236   static void bootstrap_multi_thread();
 237 
 238  private:
 239   static Tracker _tkr;
 240 
 241  public:
 242   // start() has to be called when VM still in single thread mode, but after
 243   // command line option parsing is done.
 244   static void start();
 245 
 246   // record a 'malloc' call
 247   static inline void record_malloc(address addr, size_t size, MEMFLAGS flags,
 248                             address pc = 0, Thread* thread = NULL) {
 249     Tracker tkr(Tracker::Malloc, thread);
 250     tkr.record(addr, size, flags, pc);
 251   }
 252   // record a 'free' call
 253   static inline void record_free(address addr, MEMFLAGS flags, Thread* thread = NULL) {
 254     Tracker tkr(Tracker::Free, thread);
 255     tkr.record(addr, 0, flags, DEBUG_CALLER_PC);
 256   }
 257 
 258   static inline void record_arena_size(address addr, size_t size) {
 259     Tracker tkr(Tracker::ArenaSize);
 260     tkr.record(addr, size);
 261   }
 262 
 263   // record a virtual memory 'reserve' call
 264   static inline void record_virtual_memory_reserve(address addr, size_t size,
 265                      MEMFLAGS flags, address pc = 0, Thread* thread = NULL) {
 266     if (is_on()) {
 267       assert(size > 0, "Sanity check");
 268       Tracker tkr(Tracker::Reserve, thread);
 269       tkr.record(addr, size, flags, pc);
 270     }
 271   }
 272 
 273   static inline void record_thread_stack(address addr, size_t size, Thread* thr,
 274                            address pc = 0) {
 275     if (is_on()) {
 276       Tracker tkr(Tracker::StackAlloc, thr);
 277       tkr.record(addr, size, mtThreadStack, pc);
 278     }
 279   }
 280 
 281   static inline void release_thread_stack(address addr, size_t size, Thread* thr) {
 282     if (is_on()) {
 283       Tracker tkr(Tracker::StackRelease, thr);
 284       tkr.record(addr, size, mtThreadStack, DEBUG_CALLER_PC);
 285     }
 286   }
 287 
 288   // record a virtual memory 'commit' call
 289   static inline void record_virtual_memory_commit(address addr, size_t size,
 290                             address pc, Thread* thread = NULL) {
 291     if (is_on()) {
 292       Tracker tkr(Tracker::Commit, thread);
 293       tkr.record(addr, size, mtNone, pc);
 294     }
 295   }
 296 
 297   static inline void record_virtual_memory_reserve_and_commit(address addr, size_t size,
 298     MEMFLAGS flags, address pc, Thread* thread = NULL) {
 299     if (is_on()) {
 300       Tracker tkr(Tracker::ReserveAndCommit, thread);
 301       tkr.record(addr, size, flags, pc);
 302     }
 303   }
 304 
 305 
 306   // record memory type on virtual memory base address
 307   static inline void record_virtual_memory_type(address base, MEMFLAGS flags,
 308                             Thread* thread = NULL) {
 309     if (is_on()) {
 310       Tracker tkr(Tracker::Type);
 311       tkr.record(base, 0, flags);
 312     }
 313   }
 314 
 315   // Get memory trackers for memory operations that can result race conditions.
 316   // The memory tracker has to be obtained before realloc, virtual memory uncommit
 317   // and virtual memory release, and call tracker.record() method if operation
 318   // succeeded, or tracker.discard() to abort the tracking.
 319   static inline Tracker get_realloc_tracker() {
 320     return Tracker(Tracker::Realloc);
 321   }
 322 
 323   static inline Tracker get_virtual_memory_uncommit_tracker() {
 324     return Tracker(Tracker::Uncommit);
 325   }
 326 
 327   static inline Tracker get_virtual_memory_release_tracker() {
 328     return Tracker(Tracker::Release);
 329   }
 330 
 331 
 332   // create memory baseline of current memory snapshot
 333   static bool baseline();
 334   // is there a memory baseline
 335   static bool has_baseline() {
 336     return _baseline.baselined();
 337   }
 338 
 339   // print memory usage from current snapshot
 340   static bool print_memory_usage(BaselineOutputer& out, size_t unit,
 341            bool summary_only = true);
 342   // compare memory usage between current snapshot and baseline
 343   static bool compare_memory_usage(BaselineOutputer& out, size_t unit,
 344            bool summary_only = true);
 345 
 346   // the version for whitebox testing support, it ensures that all memory
 347   // activities before this method call, are reflected in the snapshot
 348   // database.
 349   static bool wbtest_wait_for_data_merge();
 350 
 351   // sync is called within global safepoint to synchronize nmt data
 352   static void sync();
 353 
 354   // called when a thread is about to exit
 355   static void thread_exiting(JavaThread* thread);
 356 
 357   // retrieve global snapshot
 358   static MemSnapshot* get_snapshot() {
 359     if (shutdown_in_progress()) {
 360       return NULL;
 361     }
 362     return _snapshot;
 363   }
 364 
 365   // print tracker stats
 366   NOT_PRODUCT(static void print_tracker_stats(outputStream* st);)
 367   NOT_PRODUCT(static void walk_stack(int toSkip, char* buf, int len);)
 368 
 369  private:
 370   // start native memory tracking worker thread
 371   static bool start_worker(MemSnapshot* snapshot);
 372 
 373   // called by worker thread to complete shutdown process
 374   static void final_shutdown();
 375 
 376  protected:
 377   // retrieve per-thread recorder of the specified thread.
 378   // if the recorder is full, it will be enqueued to overflow
 379   // queue, a new recorder is acquired from recorder pool or a
 380   // new instance is created.
 381   // when thread == NULL, it means global recorder
 382   static MemRecorder* get_thread_recorder(JavaThread* thread);
 383 
 384   // per-thread recorder pool
 385   static void release_thread_recorder(MemRecorder* rec);
 386   static void delete_all_pooled_recorders();
 387 
 388   // pending recorder queue. Recorders are queued to pending queue
 389   // when they are overflowed or collected at nmt sync point.
 390   static void enqueue_pending_recorder(MemRecorder* rec);
 391   static MemRecorder* get_pending_recorders();
 392   static void delete_all_pending_recorders();
 393 
 394   // write a memory tracking record in recorder
 395   static void write_tracking_record(address addr, MEMFLAGS type,
 396     size_t size, jint seq, address pc, JavaThread* thread);
 397 
 398   static bool is_single_threaded_bootstrap() {
 399     return _state == NMT_bootstrapping_single_thread;
 400   }
 401 
 402   static void check_NMT_load(Thread* thr) {
 403     assert(thr != NULL, "Sanity check");
 404     if (_slowdown_calling_thread && thr != _worker_thread) {
 405       os::yield_all();
 406     }
 407   }
 408 
 409   static void inc_pending_op_count() {
 410     Atomic::inc(&_pending_op_count);
 411   }
 412 
 413   static void dec_pending_op_count() {
 414     Atomic::dec(&_pending_op_count);
 415     assert(_pending_op_count >= 0, "Sanity check");
 416   }
 417 
 418 
 419  private:
 420   // retrieve a pooled memory record or create new one if there is not
 421   // one available
 422   static MemRecorder* get_new_or_pooled_instance();
 423   static void create_memory_record(address addr, MEMFLAGS type,
 424                    size_t size, address pc, Thread* thread);
 425   static void create_record_in_recorder(address addr, MEMFLAGS type,
 426                    size_t size, address pc, JavaThread* thread);
 427 
 428   static void set_current_processing_generation(unsigned long generation) {
 429     _worker_thread_idle = false;
 430     _processing_generation = generation;
 431   }
 432 
 433   static void report_worker_idle() {
 434     _worker_thread_idle = true;
 435   }
 436 
 437  private:
 438   // global memory snapshot
 439   static MemSnapshot*     _snapshot;
 440 
 441   // a memory baseline of snapshot
 442   static MemBaseline      _baseline;
 443 
 444   // query lock
 445   static Mutex*           _query_lock;
 446 
 447   // a thread can start to allocate memory before it is attached
 448   // to VM 'Thread', those memory activities are recorded here.
 449   // ThreadCritical is required to guard this global recorder.
 450   static MemRecorder* volatile _global_recorder;
 451 
 452   // main thread id
 453   debug_only(static intx   _main_thread_tid;)
 454 
 455   // pending recorders to be merged
 456   static MemRecorder* volatile     _merge_pending_queue;
 457 
 458   NOT_PRODUCT(static volatile jint   _pending_recorder_count;)
 459 
 460   // pooled memory recorders
 461   static MemRecorder* volatile     _pooled_recorders;
 462 
 463   // memory recorder pool management, uses following
 464   // counter to determine if a released memory recorder
 465   // should be pooled
 466 
 467   // latest thread count
 468   static int               _thread_count;
 469   // pooled recorder count
 470   static volatile jint     _pooled_recorder_count;
 471 
 472 
 473   // worker thread to merge pending recorders into snapshot
 474   static MemTrackWorker*  _worker_thread;
 475 
 476   // how many safepoints we skipped without entering sync point
 477   static int              _sync_point_skip_count;
 478 
 479   // if the tracker is properly intialized
 480   static bool             _is_tracker_ready;
 481   // tracking level (off, summary and detail)
 482   static enum NMTLevel    _tracking_level;
 483 
 484   // current nmt state
 485   static volatile enum NMTStates   _state;
 486   // the reason for shutting down nmt
 487   static enum ShutdownReason       _reason;
 488   // the generation that NMT is processing
 489   static volatile unsigned long    _processing_generation;
 490   // although NMT is still procesing current generation, but
 491   // there is not more recorder to process, set idle state
 492   static volatile bool             _worker_thread_idle;
 493 
 494   // if NMT should slow down calling thread to allow
 495   // worker thread to catch up
 496   static volatile bool             _slowdown_calling_thread;
 497 
 498   // pending memory op count.
 499   // Certain memory ops need to pre-reserve sequence number
 500   // before memory operation can happen to avoid race condition.
 501   // See MemTracker::Tracker for detail
 502   static volatile jint             _pending_op_count;
 503 };
 504 
 505 #endif // SHARE_VM_SERVICES_MEM_TRACKER_HPP