556 _worker_thread->start();
557 return true;
558 }
559
560 /*
561 * We need to collect a JavaThread's per-thread recorder
562 * before it exits.
563 */
564 void MemTracker::thread_exiting(JavaThread* thread) {
565 if (is_on()) {
566 MemRecorder* rec = thread->get_recorder();
567 if (rec != NULL) {
568 enqueue_pending_recorder(rec);
569 thread->set_recorder(NULL);
570 }
571 }
572 }
573
574 // baseline current memory snapshot
575 bool MemTracker::baseline() {
576 MutexLockerEx lock(_query_lock, true);
577 MemSnapshot* snapshot = get_snapshot();
578 if (snapshot != NULL) {
579 return _baseline.baseline(*snapshot, false);
580 }
581 return false;
582 }
583
584 // print memory usage from current snapshot
585 bool MemTracker::print_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) {
586 MemBaseline baseline;
587 MutexLockerEx lock(_query_lock, true);
588 MemSnapshot* snapshot = get_snapshot();
589 if (snapshot != NULL && baseline.baseline(*snapshot, summary_only)) {
590 BaselineReporter reporter(out, unit);
591 reporter.report_baseline(baseline, summary_only);
592 return true;
593 }
594 return false;
595 }
596
597 // Whitebox API for blocking until the current generation of NMT data has been merged
598 bool MemTracker::wbtest_wait_for_data_merge() {
599 // NMT can't be shutdown while we're holding _query_lock
600 MutexLockerEx lock(_query_lock, true);
601 assert(_worker_thread != NULL, "Invalid query");
602 // the generation at query time, so NMT will spin till this generation is processed
603 unsigned long generation_at_query_time = SequenceGenerator::current_generation();
604 unsigned long current_processing_generation = _processing_generation;
605 // if generation counter overflown
606 bool generation_overflown = (generation_at_query_time < current_processing_generation);
607 long generations_to_wrap = MAX_UNSIGNED_LONG - current_processing_generation;
608 // spin
609 while (!shutdown_in_progress()) {
610 if (!generation_overflown) {
611 if (current_processing_generation > generation_at_query_time) {
612 return true;
613 }
614 } else {
615 assert(generations_to_wrap >= 0, "Sanity check");
616 long current_generations_to_wrap = MAX_UNSIGNED_LONG - current_processing_generation;
617 assert(current_generations_to_wrap >= 0, "Sanity check");
618 // to overflow an unsigned long should take long time, so to_wrap check should be sufficient
619 if (current_generations_to_wrap > generations_to_wrap &&
620 current_processing_generation > generation_at_query_time) {
624
625 // if worker thread is idle, but generation is not advancing, that means
626 // there is not safepoint to let NMT advance generation, force one.
627 if (_worker_thread_idle) {
628 VM_ForceSafepoint vfs;
629 VMThread::execute(&vfs);
630 }
631 MemSnapshot* snapshot = get_snapshot();
632 if (snapshot == NULL) {
633 return false;
634 }
635 snapshot->wait(1000);
636 current_processing_generation = _processing_generation;
637 }
638 // We end up here if NMT is shutting down before our data has been merged
639 return false;
640 }
641
642 // compare memory usage between current snapshot and baseline
643 bool MemTracker::compare_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) {
644 MutexLockerEx lock(_query_lock, true);
645 if (_baseline.baselined()) {
646 MemBaseline baseline;
647 MemSnapshot* snapshot = get_snapshot();
648 if (snapshot != NULL && baseline.baseline(*snapshot, summary_only)) {
649 BaselineReporter reporter(out, unit);
650 reporter.diff_baselines(baseline, _baseline, summary_only);
651 return true;
652 }
653 }
654 return false;
655 }
656
657 #ifndef PRODUCT
658 void MemTracker::walk_stack(int toSkip, char* buf, int len) {
659 int cur_len = 0;
660 char tmp[1024];
661 address pc;
662
663 while (cur_len < len) {
664 pc = os::get_caller_pc(toSkip + 1);
|
556 _worker_thread->start();
557 return true;
558 }
559
560 /*
561 * We need to collect a JavaThread's per-thread recorder
562 * before it exits.
563 */
564 void MemTracker::thread_exiting(JavaThread* thread) {
565 if (is_on()) {
566 MemRecorder* rec = thread->get_recorder();
567 if (rec != NULL) {
568 enqueue_pending_recorder(rec);
569 thread->set_recorder(NULL);
570 }
571 }
572 }
573
574 // baseline current memory snapshot
575 bool MemTracker::baseline() {
576 MutexLocker lock(_query_lock);
577 MemSnapshot* snapshot = get_snapshot();
578 if (snapshot != NULL) {
579 return _baseline.baseline(*snapshot, false);
580 }
581 return false;
582 }
583
584 // print memory usage from current snapshot
585 bool MemTracker::print_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) {
586 MemBaseline baseline;
587 MutexLocker lock(_query_lock);
588 MemSnapshot* snapshot = get_snapshot();
589 if (snapshot != NULL && baseline.baseline(*snapshot, summary_only)) {
590 BaselineReporter reporter(out, unit);
591 reporter.report_baseline(baseline, summary_only);
592 return true;
593 }
594 return false;
595 }
596
597 // Whitebox API for blocking until the current generation of NMT data has been merged
598 bool MemTracker::wbtest_wait_for_data_merge() {
599 // NMT can't be shutdown while we're holding _query_lock
600 MutexLocker lock(_query_lock);
601 assert(_worker_thread != NULL, "Invalid query");
602 // the generation at query time, so NMT will spin till this generation is processed
603 unsigned long generation_at_query_time = SequenceGenerator::current_generation();
604 unsigned long current_processing_generation = _processing_generation;
605 // if generation counter overflown
606 bool generation_overflown = (generation_at_query_time < current_processing_generation);
607 long generations_to_wrap = MAX_UNSIGNED_LONG - current_processing_generation;
608 // spin
609 while (!shutdown_in_progress()) {
610 if (!generation_overflown) {
611 if (current_processing_generation > generation_at_query_time) {
612 return true;
613 }
614 } else {
615 assert(generations_to_wrap >= 0, "Sanity check");
616 long current_generations_to_wrap = MAX_UNSIGNED_LONG - current_processing_generation;
617 assert(current_generations_to_wrap >= 0, "Sanity check");
618 // to overflow an unsigned long should take long time, so to_wrap check should be sufficient
619 if (current_generations_to_wrap > generations_to_wrap &&
620 current_processing_generation > generation_at_query_time) {
624
625 // if worker thread is idle, but generation is not advancing, that means
626 // there is not safepoint to let NMT advance generation, force one.
627 if (_worker_thread_idle) {
628 VM_ForceSafepoint vfs;
629 VMThread::execute(&vfs);
630 }
631 MemSnapshot* snapshot = get_snapshot();
632 if (snapshot == NULL) {
633 return false;
634 }
635 snapshot->wait(1000);
636 current_processing_generation = _processing_generation;
637 }
638 // We end up here if NMT is shutting down before our data has been merged
639 return false;
640 }
641
642 // compare memory usage between current snapshot and baseline
643 bool MemTracker::compare_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) {
644 MutexLocker lock(_query_lock);
645 if (_baseline.baselined()) {
646 MemBaseline baseline;
647 MemSnapshot* snapshot = get_snapshot();
648 if (snapshot != NULL && baseline.baseline(*snapshot, summary_only)) {
649 BaselineReporter reporter(out, unit);
650 reporter.diff_baselines(baseline, _baseline, summary_only);
651 return true;
652 }
653 }
654 return false;
655 }
656
657 #ifndef PRODUCT
658 void MemTracker::walk_stack(int toSkip, char* buf, int len) {
659 int cur_len = 0;
660 char tmp[1024];
661 address pc;
662
663 while (cur_len < len) {
664 pc = os::get_caller_pc(toSkip + 1);
|