src/share/vm/services/memTracker.cpp

Print this page
rev 3901 : JDK-8005012: Add WB APIs to better support NMT testing
Summary:


  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 
  26 #include "runtime/atomic.hpp"
  27 #include "runtime/interfaceSupport.hpp"
  28 #include "runtime/mutexLocker.hpp"
  29 #include "runtime/safepoint.hpp"
  30 #include "runtime/threadCritical.hpp"

  31 #include "services/memPtr.hpp"
  32 #include "services/memReporter.hpp"
  33 #include "services/memTracker.hpp"
  34 #include "utilities/decoder.hpp"
  35 #include "utilities/globalDefinitions.hpp"
  36 
  37 bool NMT_track_callsite = false;
  38 
  39 // walk all 'known' threads at NMT sync point, and collect their recorders
  40 void SyncThreadRecorderClosure::do_thread(Thread* thread) {
  41   assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required");
  42   if (thread->is_Java_thread()) {
  43     JavaThread* javaThread = (JavaThread*)thread;
  44     MemRecorder* recorder = javaThread->get_recorder();
  45     if (recorder != NULL) {
  46       MemTracker::enqueue_pending_recorder(recorder);
  47       javaThread->set_recorder(NULL);
  48     }
  49   }
  50   _thread_count ++;
  51 }
  52 
  53 
  54 MemRecorder*                    MemTracker::_global_recorder = NULL;
  55 MemSnapshot*                    MemTracker::_snapshot = NULL;
  56 MemBaseline                     MemTracker::_baseline;
  57 Mutex*                          MemTracker::_query_lock = NULL;
  58 volatile MemRecorder*           MemTracker::_merge_pending_queue = NULL;
  59 volatile MemRecorder*           MemTracker::_pooled_recorders = NULL;
  60 MemTrackWorker*                 MemTracker::_worker_thread = NULL;
  61 int                             MemTracker::_sync_point_skip_count = 0;
  62 MemTracker::NMTLevel            MemTracker::_tracking_level = MemTracker::NMT_off;
  63 volatile MemTracker::NMTStates  MemTracker::_state = NMT_uninited;
  64 MemTracker::ShutdownReason      MemTracker::_reason = NMT_shutdown_none;
  65 int                             MemTracker::_thread_count = 255;
  66 volatile jint                   MemTracker::_pooled_recorder_count = 0;


  67 debug_only(intx                 MemTracker::_main_thread_tid = 0;)
  68 NOT_PRODUCT(volatile jint       MemTracker::_pending_recorder_count = 0;)
  69 
  70 void MemTracker::init_tracking_options(const char* option_line) {
  71   _tracking_level = NMT_off;
  72   if (strcmp(option_line, "=summary") == 0) {
  73     _tracking_level = NMT_summary;
  74   } else if (strcmp(option_line, "=detail") == 0) {
  75     _tracking_level = NMT_detail;
  76   } else if (strcmp(option_line, "=off") != 0) {
  77     vm_exit_during_initialization("Syntax error, expecting -XX:NativeMemoryTracking=[off|summary|detail]", NULL);
  78   }
  79 }
  80 
  81 // first phase of bootstrapping, when VM is still in single-threaded mode.
  82 void MemTracker::bootstrap_single_thread() {
  83   if (_tracking_level > NMT_off) {
  84     assert(_state == NMT_uninited, "wrong state");
  85 
  86     // NMT is not supported with UseMallocOnly is on. NMT can NOT


 158     // we want to know who initialized shutdown
 159     if ((jint)NMT_started == Atomic::cmpxchg((jint)NMT_shutdown_pending,
 160                                        (jint*)&_state, (jint)NMT_started)) {
 161         _reason = reason;
 162     }
 163   }
 164 }
 165 
 166 // final phase of shutdown
 167 void MemTracker::final_shutdown() {
 168   // delete all pending recorders and pooled recorders
 169   delete_all_pending_recorders();
 170   delete_all_pooled_recorders();
 171 
 172   {
 173     // shared baseline and snapshot are the only objects needed to
 174     // create query results
 175     MutexLockerEx locker(_query_lock, true);
 176     // cleanup baseline data and snapshot
 177     _baseline.clear();
 178     delete _snapshot;
 179     _snapshot = NULL;

 180   }
 181 
 182   // shutdown shared decoder instance, since it is only
 183   // used by native memory tracking so far.
 184   Decoder::shutdown();
 185 
 186   MemTrackWorker* worker = NULL;
 187   {
 188     ThreadCritical tc;
 189     // can not delete worker inside the thread critical
 190     if (_worker_thread != NULL && Thread::current() == _worker_thread) {
 191       worker = _worker_thread;
 192       _worker_thread = NULL;
 193     }
 194   }
 195   if (worker != NULL) {
 196     delete worker;
 197   }
 198   _state = NMT_final_shutdown;
 199 }


 261 MemRecorder* MemTracker::get_new_or_pooled_instance() {
 262    MemRecorder* cur_head = const_cast<MemRecorder*> (_pooled_recorders);
 263    if (cur_head == NULL) {
 264      MemRecorder* rec = new (std::nothrow)MemRecorder();
 265      if (rec == NULL || rec->out_of_memory()) {
 266        shutdown(NMT_out_of_memory);
 267        if (rec != NULL) {
 268          delete rec;
 269          rec = NULL;
 270        }
 271      }
 272      return rec;
 273    } else {
 274      MemRecorder* next_head = cur_head->next();
 275      if ((void*)cur_head != Atomic::cmpxchg_ptr((void*)next_head, (void*)&_pooled_recorders,
 276        (void*)cur_head)) {
 277        return get_new_or_pooled_instance();
 278      }
 279      cur_head->set_next(NULL);
 280      Atomic::dec(&_pooled_recorder_count);
 281      debug_only(cur_head->set_generation();)
 282      return cur_head;
 283   }
 284 }
 285 
 286 /*
 287  * retrieve all recorders in pending queue, and empty the queue
 288  */
 289 MemRecorder* MemTracker::get_pending_recorders() {
 290   MemRecorder* cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
 291   MemRecorder* null_ptr = NULL;
 292   while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)null_ptr, (void*)&_merge_pending_queue,
 293     (void*)cur_head)) {
 294     cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
 295   }
 296   NOT_PRODUCT(Atomic::store(0, &_pending_recorder_count));
 297   return cur_head;
 298 }
 299 
 300 /*
 301  * release a recorder to recorder pool.


 550 bool MemTracker::baseline() {
 551   MutexLockerEx lock(_query_lock, true);
 552   MemSnapshot* snapshot = get_snapshot();
 553   if (snapshot != NULL) {
 554     return _baseline.baseline(*snapshot, false);
 555   }
 556   return false;
 557 }
 558 
 559 // print memory usage from current snapshot
 560 bool MemTracker::print_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) {
 561   MemBaseline  baseline;
 562   MutexLockerEx lock(_query_lock, true);
 563   MemSnapshot* snapshot = get_snapshot();
 564   if (snapshot != NULL && baseline.baseline(*snapshot, summary_only)) {
 565     BaselineReporter reporter(out, unit);
 566     reporter.report_baseline(baseline, summary_only);
 567     return true;
 568   }
 569   return false;











































 570 }
 571 
 572 // compare memory usage between current snapshot and baseline
 573 bool MemTracker::compare_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) {
 574   MutexLockerEx lock(_query_lock, true);
 575   if (_baseline.baselined()) {
 576     MemBaseline baseline;
 577     MemSnapshot* snapshot = get_snapshot();
 578     if (snapshot != NULL && baseline.baseline(*snapshot, summary_only)) {
 579       BaselineReporter reporter(out, unit);
 580       reporter.diff_baselines(baseline, _baseline, summary_only);
 581       return true;
 582     }
 583   }
 584   return false;
 585 }
 586 
 587 #ifndef PRODUCT
 588 void MemTracker::walk_stack(int toSkip, char* buf, int len) {
 589   int cur_len = 0;




  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 
  26 #include "runtime/atomic.hpp"
  27 #include "runtime/interfaceSupport.hpp"
  28 #include "runtime/mutexLocker.hpp"
  29 #include "runtime/safepoint.hpp"
  30 #include "runtime/threadCritical.hpp"
  31 #include "runtime/vm_operations.hpp"
  32 #include "services/memPtr.hpp"
  33 #include "services/memReporter.hpp"
  34 #include "services/memTracker.hpp"
  35 #include "utilities/decoder.hpp"
  36 #include "utilities/globalDefinitions.hpp"
  37 
  38 bool NMT_track_callsite = false;
  39 
  40 // walk all 'known' threads at NMT sync point, and collect their recorders
  41 void SyncThreadRecorderClosure::do_thread(Thread* thread) {
  42   assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required");
  43   if (thread->is_Java_thread()) {
  44     JavaThread* javaThread = (JavaThread*)thread;
  45     MemRecorder* recorder = javaThread->get_recorder();
  46     if (recorder != NULL) {
  47       MemTracker::enqueue_pending_recorder(recorder);
  48       javaThread->set_recorder(NULL);
  49     }
  50   }
  51   _thread_count ++;
  52 }
  53 
  54 
  55 MemRecorder*                    MemTracker::_global_recorder = NULL;
  56 MemSnapshot*                    MemTracker::_snapshot = NULL;
  57 MemBaseline                     MemTracker::_baseline;
  58 Mutex*                          MemTracker::_query_lock = NULL;
  59 volatile MemRecorder*           MemTracker::_merge_pending_queue = NULL;
  60 volatile MemRecorder*           MemTracker::_pooled_recorders = NULL;
  61 MemTrackWorker*                 MemTracker::_worker_thread = NULL;
  62 int                             MemTracker::_sync_point_skip_count = 0;
  63 MemTracker::NMTLevel            MemTracker::_tracking_level = MemTracker::NMT_off;
  64 volatile MemTracker::NMTStates  MemTracker::_state = NMT_uninited;
  65 MemTracker::ShutdownReason      MemTracker::_reason = NMT_shutdown_none;
  66 int                             MemTracker::_thread_count = 255;
  67 volatile jint                   MemTracker::_pooled_recorder_count = 0;
  68 volatile unsigned long          MemTracker::_processing_generation = 0;
  69 volatile bool                   MemTracker::_worker_thread_idle = false;
  70 debug_only(intx                 MemTracker::_main_thread_tid = 0;)
  71 NOT_PRODUCT(volatile jint       MemTracker::_pending_recorder_count = 0;)
  72 
  73 void MemTracker::init_tracking_options(const char* option_line) {
  74   _tracking_level = NMT_off;
  75   if (strcmp(option_line, "=summary") == 0) {
  76     _tracking_level = NMT_summary;
  77   } else if (strcmp(option_line, "=detail") == 0) {
  78     _tracking_level = NMT_detail;
  79   } else if (strcmp(option_line, "=off") != 0) {
  80     vm_exit_during_initialization("Syntax error, expecting -XX:NativeMemoryTracking=[off|summary|detail]", NULL);
  81   }
  82 }
  83 
  84 // first phase of bootstrapping, when VM is still in single-threaded mode.
  85 void MemTracker::bootstrap_single_thread() {
  86   if (_tracking_level > NMT_off) {
  87     assert(_state == NMT_uninited, "wrong state");
  88 
  89     // NMT is not supported with UseMallocOnly is on. NMT can NOT


 161     // we want to know who initialized shutdown
 162     if ((jint)NMT_started == Atomic::cmpxchg((jint)NMT_shutdown_pending,
 163                                        (jint*)&_state, (jint)NMT_started)) {
 164         _reason = reason;
 165     }
 166   }
 167 }
 168 
 169 // final phase of shutdown
 170 void MemTracker::final_shutdown() {
 171   // delete all pending recorders and pooled recorders
 172   delete_all_pending_recorders();
 173   delete_all_pooled_recorders();
 174 
 175   {
 176     // shared baseline and snapshot are the only objects needed to
 177     // create query results
 178     MutexLockerEx locker(_query_lock, true);
 179     // cleanup baseline data and snapshot
 180     _baseline.clear();
 181     MemSnapshot* the_snapshot = _snapshot;
 182     _snapshot = NULL;
 183     delete the_snapshot;
 184   }
 185 
 186   // shutdown shared decoder instance, since it is only
 187   // used by native memory tracking so far.
 188   Decoder::shutdown();
 189 
 190   MemTrackWorker* worker = NULL;
 191   {
 192     ThreadCritical tc;
 193     // can not delete worker inside the thread critical
 194     if (_worker_thread != NULL && Thread::current() == _worker_thread) {
 195       worker = _worker_thread;
 196       _worker_thread = NULL;
 197     }
 198   }
 199   if (worker != NULL) {
 200     delete worker;
 201   }
 202   _state = NMT_final_shutdown;
 203 }


 265 MemRecorder* MemTracker::get_new_or_pooled_instance() {
 266    MemRecorder* cur_head = const_cast<MemRecorder*> (_pooled_recorders);
 267    if (cur_head == NULL) {
 268      MemRecorder* rec = new (std::nothrow)MemRecorder();
 269      if (rec == NULL || rec->out_of_memory()) {
 270        shutdown(NMT_out_of_memory);
 271        if (rec != NULL) {
 272          delete rec;
 273          rec = NULL;
 274        }
 275      }
 276      return rec;
 277    } else {
 278      MemRecorder* next_head = cur_head->next();
 279      if ((void*)cur_head != Atomic::cmpxchg_ptr((void*)next_head, (void*)&_pooled_recorders,
 280        (void*)cur_head)) {
 281        return get_new_or_pooled_instance();
 282      }
 283      cur_head->set_next(NULL);
 284      Atomic::dec(&_pooled_recorder_count);
 285      cur_head->set_generation();
 286      return cur_head;
 287   }
 288 }
 289 
 290 /*
 291  * retrieve all recorders in pending queue, and empty the queue
 292  */
 293 MemRecorder* MemTracker::get_pending_recorders() {
 294   MemRecorder* cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
 295   MemRecorder* null_ptr = NULL;
 296   while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)null_ptr, (void*)&_merge_pending_queue,
 297     (void*)cur_head)) {
 298     cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
 299   }
 300   NOT_PRODUCT(Atomic::store(0, &_pending_recorder_count));
 301   return cur_head;
 302 }
 303 
 304 /*
 305  * release a recorder to recorder pool.


 554 bool MemTracker::baseline() {
 555   MutexLockerEx lock(_query_lock, true);
 556   MemSnapshot* snapshot = get_snapshot();
 557   if (snapshot != NULL) {
 558     return _baseline.baseline(*snapshot, false);
 559   }
 560   return false;
 561 }
 562 
 563 // print memory usage from current snapshot
 564 bool MemTracker::print_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) {
 565   MemBaseline  baseline;
 566   MutexLockerEx lock(_query_lock, true);
 567   MemSnapshot* snapshot = get_snapshot();
 568   if (snapshot != NULL && baseline.baseline(*snapshot, summary_only)) {
 569     BaselineReporter reporter(out, unit);
 570     reporter.report_baseline(baseline, summary_only);
 571     return true;
 572   }
 573   return false;
 574 }
 575 
 576 // Whitebox API for blocking until the current generation of NMT data has been merged
 577 bool MemTracker::wbtest_wait_for_data_merge() {
 578   MutexLockerEx lock(_query_lock, true);
 579   assert(_worker_thread != NULL, "Invalid query");
 580   // the generation at query time, so NMT will spin till this generation is processed
 581   unsigned long generation_at_query_time = SequenceGenerator::current_generation();
 582   unsigned long current_processing_generation = _processing_generation;
 583   // if generation counter overflown
 584   bool generation_overflown = (generation_at_query_time < current_processing_generation);
 585   long generations_to_wrap = MAX_UNSIGNED_LONG - current_processing_generation;
 586   // spin
 587   while (!shutdown_in_progress()) {
 588     if (!generation_overflown) {
 589       if (current_processing_generation > generation_at_query_time) {
 590         break;
 591       }
 592     } else {
 593       assert(generations_to_wrap >= 0, "Sanity check");
 594       long current_generations_to_wrap = MAX_UNSIGNED_LONG - current_processing_generation;
 595       assert(current_generations_to_wrap >= 0, "Sanity check");
 596       // to overflow an unsigned long should take long time, so to_wrap check should be sufficient
 597       if (current_generations_to_wrap > generations_to_wrap &&
 598           current_processing_generation > generation_at_query_time) {
 599         break;
 600       } 
 601     }
 602 
 603     // if worker thread is idle, but generation is not advancing, that means
 604     // there is not safepoint to let NMT advance generation, force one.
 605     if (_worker_thread_idle) {
 606       VM_ForceSafepoint vfs;
 607       VMThread::execute(&vfs);
 608     }
 609     MemSnapshot* snapshot = get_snapshot();
 610     if (snapshot == NULL) {
 611       return false;
 612     } 
 613     snapshot->wait(1000);
 614     current_processing_generation = _processing_generation;
 615   }
 616   return true;
 617 }
 618 
 619 // compare memory usage between current snapshot and baseline
 620 bool MemTracker::compare_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) {
 621   MutexLockerEx lock(_query_lock, true);
 622   if (_baseline.baselined()) {
 623     MemBaseline baseline;
 624     MemSnapshot* snapshot = get_snapshot();
 625     if (snapshot != NULL && baseline.baseline(*snapshot, summary_only)) {
 626       BaselineReporter reporter(out, unit);
 627       reporter.diff_baselines(baseline, _baseline, summary_only);
 628       return true;
 629     }
 630   }
 631   return false;
 632 }
 633 
 634 #ifndef PRODUCT
 635 void MemTracker::walk_stack(int toSkip, char* buf, int len) {
 636   int cur_len = 0;