12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24 #include "precompiled.hpp"
25
26 #include "oops/instanceKlass.hpp"
27 #include "runtime/atomic.hpp"
28 #include "runtime/interfaceSupport.hpp"
29 #include "runtime/mutexLocker.hpp"
30 #include "runtime/safepoint.hpp"
31 #include "runtime/threadCritical.hpp"
32 #include "services/memPtr.hpp"
33 #include "services/memReporter.hpp"
34 #include "services/memTracker.hpp"
35 #include "utilities/decoder.hpp"
36 #include "utilities/globalDefinitions.hpp"
37
38 bool NMT_track_callsite = false;
39
40 // walk all 'known' threads at NMT sync point, and collect their recorders
41 void SyncThreadRecorderClosure::do_thread(Thread* thread) {
42 assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required");
43 if (thread->is_Java_thread()) {
44 JavaThread* javaThread = (JavaThread*)thread;
45 MemRecorder* recorder = javaThread->get_recorder();
46 if (recorder != NULL) {
47 MemTracker::enqueue_pending_recorder(recorder);
48 javaThread->set_recorder(NULL);
49 }
50 }
51 _thread_count ++;
52 }
53
54
55 MemRecorder* MemTracker::_global_recorder = NULL;
56 MemSnapshot* MemTracker::_snapshot = NULL;
57 MemBaseline MemTracker::_baseline;
58 Mutex* MemTracker::_query_lock = NULL;
59 volatile MemRecorder* MemTracker::_merge_pending_queue = NULL;
60 volatile MemRecorder* MemTracker::_pooled_recorders = NULL;
61 MemTrackWorker* MemTracker::_worker_thread = NULL;
62 int MemTracker::_sync_point_skip_count = 0;
63 MemTracker::NMTLevel MemTracker::_tracking_level = MemTracker::NMT_off;
64 volatile MemTracker::NMTStates MemTracker::_state = NMT_uninited;
65 MemTracker::ShutdownReason MemTracker::_reason = NMT_shutdown_none;
66 int MemTracker::_thread_count = 255;
67 volatile jint MemTracker::_pooled_recorder_count = 0;
68 debug_only(intx MemTracker::_main_thread_tid = 0;)
69 NOT_PRODUCT(volatile jint MemTracker::_pending_recorder_count = 0;)
70
71 void MemTracker::init_tracking_options(const char* option_line) {
72 _tracking_level = NMT_off;
73 if (strcmp(option_line, "=summary") == 0) {
74 _tracking_level = NMT_summary;
75 } else if (strcmp(option_line, "=detail") == 0) {
76 _tracking_level = NMT_detail;
77 } else if (strcmp(option_line, "=off") != 0) {
78 vm_exit_during_initialization("Syntax error, expecting -XX:NativeMemoryTracking=[off|summary|detail]", NULL);
79 }
80 }
81
82 // first phase of bootstrapping, when VM is still in single-threaded mode.
83 void MemTracker::bootstrap_single_thread() {
84 if (_tracking_level > NMT_off) {
85 assert(_state == NMT_uninited, "wrong state");
86
87 // NMT is not supported with UseMallocOnly is on. NMT can NOT
262 MemRecorder* MemTracker::get_new_or_pooled_instance() {
263 MemRecorder* cur_head = const_cast<MemRecorder*> (_pooled_recorders);
264 if (cur_head == NULL) {
265 MemRecorder* rec = new (std::nothrow)MemRecorder();
266 if (rec == NULL || rec->out_of_memory()) {
267 shutdown(NMT_out_of_memory);
268 if (rec != NULL) {
269 delete rec;
270 rec = NULL;
271 }
272 }
273 return rec;
274 } else {
275 MemRecorder* next_head = cur_head->next();
276 if ((void*)cur_head != Atomic::cmpxchg_ptr((void*)next_head, (void*)&_pooled_recorders,
277 (void*)cur_head)) {
278 return get_new_or_pooled_instance();
279 }
280 cur_head->set_next(NULL);
281 Atomic::dec(&_pooled_recorder_count);
282 debug_only(cur_head->set_generation();)
283 return cur_head;
284 }
285 }
286
287 /*
288 * retrieve all recorders in pending queue, and empty the queue
289 */
290 MemRecorder* MemTracker::get_pending_recorders() {
291 MemRecorder* cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
292 MemRecorder* null_ptr = NULL;
293 while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)null_ptr, (void*)&_merge_pending_queue,
294 (void*)cur_head)) {
295 cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
296 }
297 NOT_PRODUCT(Atomic::store(0, &_pending_recorder_count));
298 return cur_head;
299 }
300
301 /*
302 * release a recorder to recorder pool.
550 // baseline current memory snapshot
551 bool MemTracker::baseline() {
552 MutexLockerEx lock(_query_lock, true);
553 MemSnapshot* snapshot = get_snapshot();
554 if (snapshot != NULL) {
555 return _baseline.baseline(*snapshot, false);
556 }
557 return false;
558 }
559
560 // print memory usage from current snapshot
561 bool MemTracker::print_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) {
562 MemBaseline baseline;
563 MutexLockerEx lock(_query_lock, true);
564 MemSnapshot* snapshot = get_snapshot();
565 if (snapshot != NULL && baseline.baseline(*snapshot, summary_only)) {
566 BaselineReporter reporter(out, unit);
567 reporter.report_baseline(baseline, summary_only);
568 return true;
569 }
570 return false;
571 }
572
573 // compare memory usage between current snapshot and baseline
574 bool MemTracker::compare_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) {
575 MutexLockerEx lock(_query_lock, true);
576 if (_baseline.baselined()) {
577 MemBaseline baseline;
578 MemSnapshot* snapshot = get_snapshot();
579 if (snapshot != NULL && baseline.baseline(*snapshot, summary_only)) {
580 BaselineReporter reporter(out, unit);
581 reporter.diff_baselines(baseline, _baseline, summary_only);
582 return true;
583 }
584 }
585 return false;
586 }
587
588 #ifndef PRODUCT
589 void MemTracker::walk_stack(int toSkip, char* buf, int len) {
|
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24 #include "precompiled.hpp"
25
26 #include "oops/instanceKlass.hpp"
27 #include "runtime/atomic.hpp"
28 #include "runtime/interfaceSupport.hpp"
29 #include "runtime/mutexLocker.hpp"
30 #include "runtime/safepoint.hpp"
31 #include "runtime/threadCritical.hpp"
32 #include "runtime/vm_operations.hpp"
33 #include "services/memPtr.hpp"
34 #include "services/memReporter.hpp"
35 #include "services/memTracker.hpp"
36 #include "utilities/decoder.hpp"
37 #include "utilities/globalDefinitions.hpp"
38
39 bool NMT_track_callsite = false;
40
41 // walk all 'known' threads at NMT sync point, and collect their recorders
42 void SyncThreadRecorderClosure::do_thread(Thread* thread) {
43 assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required");
44 if (thread->is_Java_thread()) {
45 JavaThread* javaThread = (JavaThread*)thread;
46 MemRecorder* recorder = javaThread->get_recorder();
47 if (recorder != NULL) {
48 MemTracker::enqueue_pending_recorder(recorder);
49 javaThread->set_recorder(NULL);
50 }
51 }
52 _thread_count ++;
53 }
54
55
56 MemRecorder* MemTracker::_global_recorder = NULL;
57 MemSnapshot* MemTracker::_snapshot = NULL;
58 MemBaseline MemTracker::_baseline;
59 Mutex* MemTracker::_query_lock = NULL;
60 volatile MemRecorder* MemTracker::_merge_pending_queue = NULL;
61 volatile MemRecorder* MemTracker::_pooled_recorders = NULL;
62 MemTrackWorker* MemTracker::_worker_thread = NULL;
63 int MemTracker::_sync_point_skip_count = 0;
64 MemTracker::NMTLevel MemTracker::_tracking_level = MemTracker::NMT_off;
65 volatile MemTracker::NMTStates MemTracker::_state = NMT_uninited;
66 MemTracker::ShutdownReason MemTracker::_reason = NMT_shutdown_none;
67 int MemTracker::_thread_count = 255;
68 volatile jint MemTracker::_pooled_recorder_count = 0;
69 volatile unsigned long MemTracker::_processing_generation = 0;
70 volatile bool MemTracker::_worker_thread_idle = false;
71 debug_only(intx MemTracker::_main_thread_tid = 0;)
72 NOT_PRODUCT(volatile jint MemTracker::_pending_recorder_count = 0;)
73
74 void MemTracker::init_tracking_options(const char* option_line) {
75 _tracking_level = NMT_off;
76 if (strcmp(option_line, "=summary") == 0) {
77 _tracking_level = NMT_summary;
78 } else if (strcmp(option_line, "=detail") == 0) {
79 _tracking_level = NMT_detail;
80 } else if (strcmp(option_line, "=off") != 0) {
81 vm_exit_during_initialization("Syntax error, expecting -XX:NativeMemoryTracking=[off|summary|detail]", NULL);
82 }
83 }
84
85 // first phase of bootstrapping, when VM is still in single-threaded mode.
86 void MemTracker::bootstrap_single_thread() {
87 if (_tracking_level > NMT_off) {
88 assert(_state == NMT_uninited, "wrong state");
89
90 // NMT is not supported with UseMallocOnly is on. NMT can NOT
265 MemRecorder* MemTracker::get_new_or_pooled_instance() {
266 MemRecorder* cur_head = const_cast<MemRecorder*> (_pooled_recorders);
267 if (cur_head == NULL) {
268 MemRecorder* rec = new (std::nothrow)MemRecorder();
269 if (rec == NULL || rec->out_of_memory()) {
270 shutdown(NMT_out_of_memory);
271 if (rec != NULL) {
272 delete rec;
273 rec = NULL;
274 }
275 }
276 return rec;
277 } else {
278 MemRecorder* next_head = cur_head->next();
279 if ((void*)cur_head != Atomic::cmpxchg_ptr((void*)next_head, (void*)&_pooled_recorders,
280 (void*)cur_head)) {
281 return get_new_or_pooled_instance();
282 }
283 cur_head->set_next(NULL);
284 Atomic::dec(&_pooled_recorder_count);
285 cur_head->set_generation();
286 return cur_head;
287 }
288 }
289
290 /*
291 * retrieve all recorders in pending queue, and empty the queue
292 */
293 MemRecorder* MemTracker::get_pending_recorders() {
294 MemRecorder* cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
295 MemRecorder* null_ptr = NULL;
296 while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)null_ptr, (void*)&_merge_pending_queue,
297 (void*)cur_head)) {
298 cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
299 }
300 NOT_PRODUCT(Atomic::store(0, &_pending_recorder_count));
301 return cur_head;
302 }
303
304 /*
305 * release a recorder to recorder pool.
553 // baseline current memory snapshot
554 bool MemTracker::baseline() {
555 MutexLockerEx lock(_query_lock, true);
556 MemSnapshot* snapshot = get_snapshot();
557 if (snapshot != NULL) {
558 return _baseline.baseline(*snapshot, false);
559 }
560 return false;
561 }
562
563 // print memory usage from current snapshot
564 bool MemTracker::print_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) {
565 MemBaseline baseline;
566 MutexLockerEx lock(_query_lock, true);
567 MemSnapshot* snapshot = get_snapshot();
568 if (snapshot != NULL && baseline.baseline(*snapshot, summary_only)) {
569 BaselineReporter reporter(out, unit);
570 reporter.report_baseline(baseline, summary_only);
571 return true;
572 }
573 return false;
574 }
575
576 // Whitebox API for blocking until the current generation of NMT data has been merged
577 bool MemTracker::wbtest_wait_for_data_merge() {
578 MutexLockerEx lock(_query_lock, true);
579 assert(_worker_thread != NULL, "Invalid query");
580 // the generation at query time, so NMT will spin till this generation is processed
581 unsigned long generation_at_query_time = SequenceGenerator::current_generation();
582 unsigned long current_processing_generation = _processing_generation;
583 // if generation counter overflown
584 bool generation_overflown = (generation_at_query_time < current_processing_generation);
585 long generations_to_wrap = MAX_UNSIGNED_LONG - current_processing_generation;
586 // spin
587 while (!shutdown_in_progress()) {
588 if (!generation_overflown) {
589 if (current_processing_generation > generation_at_query_time) {
590 return true;
591 }
592 } else {
593 assert(generations_to_wrap >= 0, "Sanity check");
594 long current_generations_to_wrap = MAX_UNSIGNED_LONG - current_processing_generation;
595 assert(current_generations_to_wrap >= 0, "Sanity check");
596 // to overflow an unsigned long should take long time, so to_wrap check should be sufficient
597 if (current_generations_to_wrap > generations_to_wrap &&
598 current_processing_generation > generation_at_query_time) {
599 return true;
600 }
601 }
602
603 // if worker thread is idle, but generation is not advancing, that means
604 // there is not safepoint to let NMT advance generation, force one.
605 if (_worker_thread_idle) {
606 VM_ForceSafepoint vfs;
607 VMThread::execute(&vfs);
608 }
609 MemSnapshot* snapshot = get_snapshot();
610 if (snapshot == NULL) {
611 return false;
612 }
613 snapshot->wait(1000);
614 current_processing_generation = _processing_generation;
615 }
616 // We end up here if NMT is shutting down before our data has been merged
617 return false;
618 }
619
620 // compare memory usage between current snapshot and baseline
621 bool MemTracker::compare_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) {
622 MutexLockerEx lock(_query_lock, true);
623 if (_baseline.baselined()) {
624 MemBaseline baseline;
625 MemSnapshot* snapshot = get_snapshot();
626 if (snapshot != NULL && baseline.baseline(*snapshot, summary_only)) {
627 BaselineReporter reporter(out, unit);
628 reporter.diff_baselines(baseline, _baseline, summary_only);
629 return true;
630 }
631 }
632 return false;
633 }
634
635 #ifndef PRODUCT
636 void MemTracker::walk_stack(int toSkip, char* buf, int len) {
|