src/share/vm/services/memTracker.hpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File hotspot Sdiff src/share/vm/services

src/share/vm/services/memTracker.hpp

Print this page


   1 /*
   2  * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


  48       NMT_normal,            // normal shutdown, process exit
  49       NMT_out_of_memory,     // shutdown due to out of memory
  50       NMT_initialization,    // shutdown due to initialization failure
  51       NMT_use_malloc_only,   // can not combine NMT with UseMallocOnly flag
  52       NMT_error_reporting,   // shutdown by vmError::report_and_die()
  53       NMT_out_of_generation, // running out of generation queue
  54       NMT_sequence_overflow  // overflow the sequence number
  55    };
  56 
  57 
  58   public:
  59    static inline void init_tracking_options(const char* option_line) { }
  60    static inline bool is_on()   { return false; }
  61    static const char* reason()  { return "Native memory tracking is not implemented"; }
  62    static inline bool can_walk_stack() { return false; }
  63 
  64    static inline void bootstrap_single_thread() { }
  65    static inline void bootstrap_multi_thread() { }
  66    static inline void start() { }
  67 
  68    static inline void record_malloc(address addr, size_t size, MEMFLAGS flags,
  69         address pc = 0, Thread* thread = NULL) { }
  70    static inline void record_free(address addr, MEMFLAGS flags, Thread* thread = NULL) { }
  71    static inline void record_realloc(address old_addr, address new_addr, size_t size,
  72         MEMFLAGS flags, address pc = 0, Thread* thread = NULL) { }
  73    static inline void record_arena_size(address addr, size_t size) { }
  74    static inline void record_virtual_memory_reserve(address addr, size_t size,
  75         address pc = 0, Thread* thread = NULL) { }
  76    static inline void record_virtual_memory_commit(address addr, size_t size,
  77         address pc = 0, Thread* thread = NULL) { }
  78    static inline void record_virtual_memory_uncommit(address addr, size_t size,
  79         Thread* thread = NULL) { }
  80    static inline void record_virtual_memory_release(address addr, size_t size,
  81         Thread* thread = NULL) { }
  82    static inline void record_virtual_memory_type(address base, MEMFLAGS flags,
  83         Thread* thread = NULL) { }
  84    static inline bool baseline() { return false; }
  85    static inline bool has_baseline() { return false; }
  86 
  87    static inline void set_autoShutdown(bool value) { }
  88    static void shutdown(ShutdownReason reason) { }
  89    static inline bool shutdown_in_progress() { return false; }
  90    static bool print_memory_usage(BaselineOutputer& out, size_t unit,
  91             bool summary_only = true) { return false; }
  92    static bool compare_memory_usage(BaselineOutputer& out, size_t unit,
  93             bool summary_only = true) { return false; }
  94 
  95    static bool wbtest_wait_for_data_merge() { return false; }
  96 
  97    static inline void sync() { }
  98    static inline void thread_exiting(JavaThread* thread) { }
  99 };
 100 




























 101 
 102 #else // !INCLUDE_NMT
 103 
 104 #include "memory/allocation.hpp"

 105 #include "runtime/globals.hpp"
 106 #include "runtime/mutex.hpp"
 107 #include "runtime/os.hpp"
 108 #include "runtime/thread.hpp"
 109 #include "services/memPtr.hpp"
 110 #include "services/memRecorder.hpp"
 111 #include "services/memSnapshot.hpp"
 112 #include "services/memTrackWorker.hpp"
 113 
 114 extern bool NMT_track_callsite;
 115 
 116 #ifndef MAX_UNSIGNED_LONG
 117 #define MAX_UNSIGNED_LONG    (unsigned long)(-1)
 118 #endif
 119 
 120 #ifdef ASSERT
 121   #define DEBUG_CALLER_PC  (NMT_track_callsite ? os::get_caller_pc(2) : 0)
 122 #else
 123   #define DEBUG_CALLER_PC  0
 124 #endif


 126 // The thread closure walks threads to collect per-thread
 127 // memory recorders at NMT sync point
 128 class SyncThreadRecorderClosure : public ThreadClosure {
 129  private:
 130   int _thread_count;
 131 
 132  public:
 133   SyncThreadRecorderClosure() {
 134     _thread_count =0;
 135   }
 136 
 137   void do_thread(Thread* thread);
 138   int  get_thread_count() const {
 139     return _thread_count;
 140   }
 141 };
 142 
 143 class BaselineOutputer;
 144 class MemSnapshot;
 145 class MemTrackWorker;

 146 class Thread;
 147 /*
 148  * MemTracker is the 'gate' class to native memory tracking runtime.
 149  */
 150 class MemTracker : AllStatic {
 151   friend class GenerationData;
 152   friend class MemTrackWorker;
 153   friend class MemSnapshot;

 154   friend class SyncThreadRecorderClosure;
 155 
 156   // NMT state
 157   enum NMTStates {
 158     NMT_uninited,                        // not yet initialized
 159     NMT_bootstrapping_single_thread,     // bootstrapping, VM is in single thread mode
 160     NMT_bootstrapping_multi_thread,      // bootstrapping, VM is about to enter multi-thread mode
 161     NMT_started,                         // NMT fully started
 162     NMT_shutdown_pending,                // shutdown pending
 163     NMT_final_shutdown,                  // in final phase of shutdown
 164     NMT_shutdown                         // shutdown
 165   };
 166 
 167  public:
 168   // native memory tracking level
 169   enum NMTLevel {
 170     NMT_off,              // native memory tracking is off
 171     NMT_summary,          // don't track callsite
 172     NMT_detail            // track callsite also
 173   };


 256   static void shutdown(ShutdownReason reason);
 257 
 258   // if there is shutdown requested
 259   static inline bool shutdown_in_progress() {
 260     return (_state >= NMT_shutdown_pending);
 261   }
 262 
 263   // bootstrap native memory tracking, so it can start to collect raw data
 264   // before worker thread can start
 265 
 266   // the first phase of bootstrapping, when VM still in single-threaded mode
 267   static void bootstrap_single_thread();
 268   // the second phase of bootstrapping, VM is about or already in multi-threaded mode
 269   static void bootstrap_multi_thread();
 270 
 271 
 272   // start() has to be called when VM still in single thread mode, but after
 273   // command line option parsing is done.
 274   static void start();
 275 
 276   // record a 'malloc' call
 277   static inline void record_malloc(address addr, size_t size, MEMFLAGS flags,
 278                             address pc = 0, Thread* thread = NULL) {
 279     if (is_on() && NMT_CAN_TRACK(flags)) {
 280       assert(size > 0, "Sanity check");
 281       create_memory_record(addr, (flags|MemPointerRecord::malloc_tag()), size, pc, thread);
 282     }
 283   }
 284   // record a 'free' call
 285   static inline void record_free(address addr, MEMFLAGS flags, Thread* thread = NULL) {
 286     if (is_on() && NMT_CAN_TRACK(flags)) {
 287       create_memory_record(addr, MemPointerRecord::free_tag(), 0, 0, thread);
 288     }
 289   }
 290   // record a 'realloc' call
 291   static inline void record_realloc(address old_addr, address new_addr, size_t size,
 292        MEMFLAGS flags, address pc = 0, Thread* thread = NULL) {
 293     if (is_on() && NMT_CAN_TRACK(flags)) {
 294       assert(size > 0, "Sanity check");
 295       record_free(old_addr, flags, thread);
 296       record_malloc(new_addr, size, flags, pc, thread);
 297     }
 298   }
 299 
 300   // record arena memory size
 301   static inline void record_arena_size(address addr, size_t size) {
 302     // we add a positive offset to arena address, so we can have arena memory record
 303     // sorted after arena record
 304     if (is_on() && !UseMallocOnly) {
 305       assert(addr != NULL, "Sanity check");
 306       create_memory_record((addr + sizeof(void*)), MemPointerRecord::arena_size_tag(), size,
 307         DEBUG_CALLER_PC, NULL);
 308     }
 309   }
 310 
 311   // record a virtual memory 'reserve' call
 312   static inline void record_virtual_memory_reserve(address addr, size_t size,
 313                             address pc = 0, Thread* thread = NULL) {
 314     if (is_on()) {
 315       assert(size > 0, "Sanity check");
 316       create_memory_record(addr, MemPointerRecord::virtual_memory_reserve_tag(),
 317                            size, pc, thread);
 318     }
 319   }
 320 
 321   static inline void record_thread_stack(address addr, size_t size, Thread* thr,
 322                            address pc = 0) {
 323     if (is_on()) {
 324       assert(size > 0 && thr != NULL, "Sanity check");
 325       create_memory_record(addr, MemPointerRecord::virtual_memory_reserve_tag() | mtThreadStack,
 326                           size, pc, thr);
 327       create_memory_record(addr, MemPointerRecord::virtual_memory_commit_tag() | mtThreadStack,
 328                           size, pc, thr);
 329     }
 330   }
 331 
 332   static inline void release_thread_stack(address addr, size_t size, Thread* thr) {
 333     if (is_on()) {
 334       assert(size > 0 && thr != NULL, "Sanity check");
 335       assert(!thr->is_Java_thread(), "too early");
 336       create_memory_record(addr, MemPointerRecord::virtual_memory_uncommit_tag() | mtThreadStack,
 337                           size, DEBUG_CALLER_PC, thr);
 338       create_memory_record(addr, MemPointerRecord::virtual_memory_release_tag() | mtThreadStack,
 339                           size, DEBUG_CALLER_PC, thr);
 340     }
 341   }
 342 
 343   // record a virtual memory 'commit' call
 344   static inline void record_virtual_memory_commit(address addr, size_t size,
 345                             address pc, Thread* thread = NULL) {
 346     if (is_on()) {
 347       assert(size > 0, "Sanity check");
 348       create_memory_record(addr, MemPointerRecord::virtual_memory_commit_tag(),
 349                            size, pc, thread);
 350     }
 351   }
 352 
 353   // record a virtual memory 'uncommit' call
 354   static inline void record_virtual_memory_uncommit(address addr, size_t size,
 355                             Thread* thread = NULL) {
 356     if (is_on()) {
 357       assert(size > 0, "Sanity check");
 358       create_memory_record(addr, MemPointerRecord::virtual_memory_uncommit_tag(),
 359                            size, DEBUG_CALLER_PC, thread);
 360     }
 361   }
 362 
 363   // record a virtual memory 'release' call
 364   static inline void record_virtual_memory_release(address addr, size_t size,
 365                             Thread* thread = NULL) {
 366     if (is_on()) {
 367       assert(size > 0, "Sanity check");
 368       create_memory_record(addr, MemPointerRecord::virtual_memory_release_tag(),
 369                            size, DEBUG_CALLER_PC, thread);
 370     }
 371   }
 372 
 373   // record memory type on virtual memory base address
 374   static inline void record_virtual_memory_type(address base, MEMFLAGS flags,
 375                             Thread* thread = NULL) {
 376     if (is_on()) {
 377       assert(base > 0, "wrong base address");
 378       assert((flags & (~mt_masks)) == 0, "memory type only");
 379       create_memory_record(base, (flags | MemPointerRecord::virtual_memory_type_tag()),
 380                            0, DEBUG_CALLER_PC, thread);
 381     }
 382   }
 383 
 384 
 385   // create memory baseline of current memory snapshot
 386   static bool baseline();
 387   // is there a memory baseline
 388   static bool has_baseline() {
 389     return _baseline.baselined();
 390   }
 391 
 392   // print memory usage from current snapshot
 393   static bool print_memory_usage(BaselineOutputer& out, size_t unit,
 394            bool summary_only = true);
 395   // compare memory usage between current snapshot and baseline
 396   static bool compare_memory_usage(BaselineOutputer& out, size_t unit,
 397            bool summary_only = true);
 398 
 399   // the version for whitebox testing support, it ensures that all memory
 400   // activities before this method call, are reflected in the snapshot
 401   // database.
 402   static bool wbtest_wait_for_data_merge();
 403 
 404   // sync is called within global safepoint to synchronize nmt data


 427   static void final_shutdown();
 428 
 429  protected:
 430   // retrieve per-thread recorder of the specified thread.
 431   // if the recorder is full, it will be enqueued to overflow
 432   // queue, a new recorder is acquired from recorder pool or a
 433   // new instance is created.
 434   // when thread == NULL, it means global recorder
 435   static MemRecorder* get_thread_recorder(JavaThread* thread);
 436 
 437   // per-thread recorder pool
 438   static void release_thread_recorder(MemRecorder* rec);
 439   static void delete_all_pooled_recorders();
 440 
 441   // pending recorder queue. Recorders are queued to pending queue
 442   // when they are overflowed or collected at nmt sync point.
 443   static void enqueue_pending_recorder(MemRecorder* rec);
 444   static MemRecorder* get_pending_recorders();
 445   static void delete_all_pending_recorders();
 446 





















 447  private:
 448   // retrieve a pooled memory record or create new one if there is not
 449   // one available
 450   static MemRecorder* get_new_or_pooled_instance();
 451   static void create_memory_record(address addr, MEMFLAGS type,
 452                    size_t size, address pc, Thread* thread);
 453   static void create_record_in_recorder(address addr, MEMFLAGS type,
 454                    size_t size, address pc, JavaThread* thread);
 455 
 456   static void set_current_processing_generation(unsigned long generation) {
 457     _worker_thread_idle = false;
 458     _processing_generation = generation;
 459   }
 460 
 461   static void report_worker_idle() {
 462     _worker_thread_idle = true;
 463   }
 464 
 465  private:
 466   // global memory snapshot
 467   static MemSnapshot*     _snapshot;
 468 
 469   // a memory baseline of snapshot
 470   static MemBaseline      _baseline;
 471 
 472   // query lock
 473   static Mutex*           _query_lock;
 474 


 502   static MemTrackWorker*  _worker_thread;
 503 
 504   // how many safepoints we skipped without entering sync point
 505   static int              _sync_point_skip_count;
 506 
 507   // if the tracker is properly intialized
 508   static bool             _is_tracker_ready;
 509   // tracking level (off, summary and detail)
 510   static enum NMTLevel    _tracking_level;
 511 
 512   // current nmt state
 513   static volatile enum NMTStates   _state;
 514   // the reason for shutting down nmt
 515   static enum ShutdownReason       _reason;
 516   // the generation that NMT is processing
 517   static volatile unsigned long    _processing_generation;
 518   // although NMT is still procesing current generation, but
 519   // there is not more recorder to process, set idle state
 520   static volatile bool             _worker_thread_idle;
 521 






 522   // if NMT should slow down calling thread to allow
 523   // worker thread to catch up
 524   static volatile bool             _slowdown_calling_thread;
 525 };
 526 














































 527 #endif // !INCLUDE_NMT
 528 
 529 #endif // SHARE_VM_SERVICES_MEM_TRACKER_HPP
   1 /*
   2  * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


  48       NMT_normal,            // normal shutdown, process exit
  49       NMT_out_of_memory,     // shutdown due to out of memory
  50       NMT_initialization,    // shutdown due to initialization failure
  51       NMT_use_malloc_only,   // can not combine NMT with UseMallocOnly flag
  52       NMT_error_reporting,   // shutdown by vmError::report_and_die()
  53       NMT_out_of_generation, // running out of generation queue
  54       NMT_sequence_overflow  // overflow the sequence number
  55    };
  56 
  57 
  58   public:
  59    static inline void init_tracking_options(const char* option_line) { }
  60    static inline bool is_on()   { return false; }
  61    static const char* reason()  { return "Native memory tracking is not implemented"; }
  62    static inline bool can_walk_stack() { return false; }
  63 
  64    static inline void bootstrap_single_thread() { }
  65    static inline void bootstrap_multi_thread() { }
  66    static inline void start() { }
  67 
















  68    static inline bool baseline() { return false; }
  69    static inline bool has_baseline() { return false; }
  70 
  71    static inline void set_autoShutdown(bool value) { }
  72    static void shutdown(ShutdownReason reason) { }
  73    static inline bool shutdown_in_progress() { return false; }
  74    static bool print_memory_usage(BaselineOutputer& out, size_t unit,
  75             bool summary_only = true) { return false; }
  76    static bool compare_memory_usage(BaselineOutputer& out, size_t unit,
  77             bool summary_only = true) { return false; }
  78 
  79    static bool wbtest_wait_for_data_merge() { return false; }
  80 
  81    static inline void sync() { }
  82    static inline void thread_exiting(JavaThread* thread) { }
  83 };
  84 
  85 class NMTTrackOp : public StackObj {
  86 
  87  public:
  88   enum NMTMemoryOps {
  89     NoOp,
  90     MallocOp,
  91     ReallocOp,
  92     FreeOp,
  93     ReserveOp,
  94     CommitOp,
  95     ReserveAndCommitOp,
  96     StackAlloc = ReserveAndCommitOp,
  97     TypeOp,
  98     UncommitOp,
  99     ReleaseOp,
 100     ArenaSizeOp,
 101     StackReleaseOp
 102   };
 103 
 104   NMTTrackOp(NMTMemoryOps op, Thread* thr = NULL) { }
 105   ~NMTTrackOp() { }
 106 
 107   void abort_op() { }
 108 
 109   void execute_op(address addr, size_t size = 0, MEMFLAGS flags = 0, address pc = NULL) { }
 110   void execute_op(address old_addr, address new_addr, size_t size,
 111     MEMFLAGS flags, address pc = NULL) { }
 112 };
 113 
 114 #else // !INCLUDE_NMT
 115 
 116 #include "memory/allocation.hpp"
 117 #include "runtime/atomic.hpp"
 118 #include "runtime/globals.hpp"
 119 #include "runtime/mutex.hpp"
 120 #include "runtime/os.hpp"
 121 #include "runtime/thread.hpp"
 122 #include "services/memPtr.hpp"
 123 #include "services/memRecorder.hpp"
 124 #include "services/memSnapshot.hpp"
 125 #include "services/memTrackWorker.hpp"
 126 
 127 extern bool NMT_track_callsite;
 128 
 129 #ifndef MAX_UNSIGNED_LONG
 130 #define MAX_UNSIGNED_LONG    (unsigned long)(-1)
 131 #endif
 132 
 133 #ifdef ASSERT
 134   #define DEBUG_CALLER_PC  (NMT_track_callsite ? os::get_caller_pc(2) : 0)
 135 #else
 136   #define DEBUG_CALLER_PC  0
 137 #endif


 139 // The thread closure walks threads to collect per-thread
 140 // memory recorders at NMT sync point
 141 class SyncThreadRecorderClosure : public ThreadClosure {
 142  private:
 143   int _thread_count;
 144 
 145  public:
 146   SyncThreadRecorderClosure() {
 147     _thread_count =0;
 148   }
 149 
 150   void do_thread(Thread* thread);
 151   int  get_thread_count() const {
 152     return _thread_count;
 153   }
 154 };
 155 
 156 class BaselineOutputer;
 157 class MemSnapshot;
 158 class MemTrackWorker;
 159 class NMTTrackOp;
 160 class Thread;
 161 /*
 162  * MemTracker is the 'gate' class to native memory tracking runtime.
 163  */
 164 class MemTracker : AllStatic {
 165   friend class GenerationData;
 166   friend class MemTrackWorker;
 167   friend class MemSnapshot;
 168   friend class NMTTrackOp;
 169   friend class SyncThreadRecorderClosure;
 170 
 171   // NMT state
 172   enum NMTStates {
 173     NMT_uninited,                        // not yet initialized
 174     NMT_bootstrapping_single_thread,     // bootstrapping, VM is in single thread mode
 175     NMT_bootstrapping_multi_thread,      // bootstrapping, VM is about to enter multi-thread mode
 176     NMT_started,                         // NMT fully started
 177     NMT_shutdown_pending,                // shutdown pending
 178     NMT_final_shutdown,                  // in final phase of shutdown
 179     NMT_shutdown                         // shutdown
 180   };
 181 
 182  public:
 183   // native memory tracking level
 184   enum NMTLevel {
 185     NMT_off,              // native memory tracking is off
 186     NMT_summary,          // don't track callsite
 187     NMT_detail            // track callsite also
 188   };


 271   static void shutdown(ShutdownReason reason);
 272 
 273   // if there is shutdown requested
 274   static inline bool shutdown_in_progress() {
 275     return (_state >= NMT_shutdown_pending);
 276   }
 277 
 278   // bootstrap native memory tracking, so it can start to collect raw data
 279   // before worker thread can start
 280 
 281   // the first phase of bootstrapping, when VM still in single-threaded mode
 282   static void bootstrap_single_thread();
 283   // the second phase of bootstrapping, VM is about or already in multi-threaded mode
 284   static void bootstrap_multi_thread();
 285 
 286 
 287   // start() has to be called when VM still in single thread mode, but after
 288   // command line option parsing is done.
 289   static void start();
 290 













































































































 291   // create memory baseline of current memory snapshot
 292   static bool baseline();
 293   // is there a memory baseline
 294   static bool has_baseline() {
 295     return _baseline.baselined();
 296   }
 297 
 298   // print memory usage from current snapshot
 299   static bool print_memory_usage(BaselineOutputer& out, size_t unit,
 300            bool summary_only = true);
 301   // compare memory usage between current snapshot and baseline
 302   static bool compare_memory_usage(BaselineOutputer& out, size_t unit,
 303            bool summary_only = true);
 304 
 305   // the version for whitebox testing support, it ensures that all memory
 306   // activities before this method call, are reflected in the snapshot
 307   // database.
 308   static bool wbtest_wait_for_data_merge();
 309 
 310   // sync is called within global safepoint to synchronize nmt data


 333   static void final_shutdown();
 334 
 335  protected:
 336   // retrieve per-thread recorder of the specified thread.
 337   // if the recorder is full, it will be enqueued to overflow
 338   // queue, a new recorder is acquired from recorder pool or a
 339   // new instance is created.
 340   // when thread == NULL, it means global recorder
 341   static MemRecorder* get_thread_recorder(JavaThread* thread);
 342 
 343   // per-thread recorder pool
 344   static void release_thread_recorder(MemRecorder* rec);
 345   static void delete_all_pooled_recorders();
 346 
 347   // pending recorder queue. Recorders are queued to pending queue
 348   // when they are overflowed or collected at nmt sync point.
 349   static void enqueue_pending_recorder(MemRecorder* rec);
 350   static MemRecorder* get_pending_recorders();
 351   static void delete_all_pending_recorders();
 352 
 353   // write a memory tracking record in recorder
 354   static void write_tracking_record(address addr, MEMFLAGS type,
 355                    size_t size, jint seq, address pc, JavaThread* thread);
 356 
 357 
 358   static bool is_single_threaded_bootstrap() {
 359     return _state == NMT_bootstrapping_single_thread;
 360   }
 361 
 362   static void check_NMT_load(Thread* thr) {
 363     assert(thr != NULL, "Sanity check");
 364     if (_slowdown_calling_thread && thr != _worker_thread) {
 365       os::yield_all();
 366     }
 367   }
 368 
 369   static void inc_pending_op_count() { Atomic::inc(&_pending_op_count); }
 370   static void dec_pending_op_count() {
 371     Atomic::dec(&_pending_op_count);
 372     assert(_pending_op_count >= 0, "Sanity check");
 373   }
 374  private:
 375   // retrieve a pooled memory record or create new one if there is not
 376   // one available
 377   static MemRecorder* get_new_or_pooled_instance();




 378 
 379   static void set_current_processing_generation(unsigned long generation) {
 380     _worker_thread_idle = false;
 381     _processing_generation = generation;
 382   }
 383 
 384   static void report_worker_idle() {
 385     _worker_thread_idle = true;
 386   }
 387 
 388  private:
 389   // global memory snapshot
 390   static MemSnapshot*     _snapshot;
 391 
 392   // a memory baseline of snapshot
 393   static MemBaseline      _baseline;
 394 
 395   // query lock
 396   static Mutex*           _query_lock;
 397 


 425   static MemTrackWorker*  _worker_thread;
 426 
 427   // how many safepoints we skipped without entering sync point
 428   static int              _sync_point_skip_count;
 429 
 430   // if the tracker is properly intialized
 431   static bool             _is_tracker_ready;
 432   // tracking level (off, summary and detail)
 433   static enum NMTLevel    _tracking_level;
 434 
 435   // current nmt state
 436   static volatile enum NMTStates   _state;
 437   // the reason for shutting down nmt
 438   static enum ShutdownReason       _reason;
 439   // the generation that NMT is processing
 440   static volatile unsigned long    _processing_generation;
 441   // although NMT is still procesing current generation, but
 442   // there is not more recorder to process, set idle state
 443   static volatile bool             _worker_thread_idle;
 444 
 445   // pending memory op count.
 446   // Certain memory ops need to pre-reserve sequence number
 447   // before memory operation can happen to avoid race condition.
 448   // See NMTTrackOp for detail
 449   static volatile jint             _pending_op_count;
 450 
 451   // if NMT should slow down calling thread to allow
 452   // worker thread to catch up
 453   static volatile bool             _slowdown_calling_thread;
 454 };
 455 
 456 /*
 457  * This class intercepts memory operations, and this class has
 458  * to be instantiated before the memory operations.
 459  * It also decides if it has to a pre-reserve sequence number
 460  * ahead of the memory operation. If so, it reserves a sequence
 461  * number and increments MemTracker::_pending_op_count to prevent
 462  * NMT from reaching NMT sync point.
 463  *
 464  */
 465 class NMTTrackOp : public StackObj {
 466 
 467  public:
 468   enum NMTMemoryOps {
 469     NoOp,                   // no op
 470     MallocOp,               // malloc
 471     ReallocOp,              // realloc
 472     FreeOp,                 // free
 473     ReserveOp,              // virtual memory reserve
 474     CommitOp,               // virtual memory commit
 475     ReserveAndCommitOp,     // virtual memory reserve and commit
 476     StackAllocOp = ReserveAndCommitOp, // allocate thread stack
 477     TypeOp,                 // assign virtual memory type
 478     UncommitOp,             // virtual memory uncommit
 479     ReleaseOp,              // virtual memory release
 480     ArenaSizeOp,            // set arena size
 481     StackReleaseOp          // release thread stack
 482   };
 483 
 484   NMTTrackOp(NMTMemoryOps op, Thread* thr = NULL);
 485   ~NMTTrackOp() { }
 486 
 487   // abort this tracking op
 488   void abort_op();
 489 
 490   // execute this tracking op
 491   void execute_op(address addr, size_t size = 0, MEMFLAGS flags = 0, address pc = NULL);
 492   void execute_op(address old_addr, address new_addr, size_t size, MEMFLAGS flags, address pc = NULL);
 493 
 494  private:
 495 
 496   bool         _need_thread_critical_lock;
 497   JavaThread*  _java_thread;
 498   NMTMemoryOps _op;
 499   jint         _seq;         // reserved sequence number
 500 };
 501 
 502 #endif // !INCLUDE_NMT
 503 
 504 #endif // SHARE_VM_SERVICES_MEM_TRACKER_HPP
src/share/vm/services/memTracker.hpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File