2761 workers()->print_worker_threads_on(st);
2762 _cmThread->print_on(st);
2763 st->cr();
2764 _cm->print_worker_threads_on(st);
2765 _cg1r->print_worker_threads_on(st);
2766 if (G1StringDedup::is_enabled()) {
2767 G1StringDedup::print_worker_threads_on(st);
2768 }
2769 }
2770
2771 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
2772 workers()->threads_do(tc);
2773 tc->do_thread(_cmThread);
2774 _cg1r->threads_do(tc);
2775 if (G1StringDedup::is_enabled()) {
2776 G1StringDedup::threads_do(tc);
2777 }
2778 }
2779
2780 void G1CollectedHeap::print_tracing_info() const {
2781 // We'll overload this to mean "trace GC pause statistics."
2782 if (TraceYoungGenTime || TraceOldGenTime) {
2783 // The "G1CollectorPolicy" is keeping track of these stats, so delegate
2784 // to that.
2785 g1_policy()->print_tracing_info();
2786 }
2787 g1_rem_set()->print_summary_info();
2788 concurrent_mark()->print_summary_info();
2789 g1_policy()->print_yg_surv_rate_info();
2790 }
2791
2792 #ifndef PRODUCT
2793 // Helpful for debugging RSet issues.
2794
2795 class PrintRSetsClosure : public HeapRegionClosure {
2796 private:
2797 const char* _msg;
2798 size_t _occupied_sum;
2799
2800 public:
2801 bool doHeapRegion(HeapRegion* r) {
2802 HeapRegionRemSet* hrrs = r->rem_set();
2803 size_t occupied = hrrs->occupied();
2804 _occupied_sum += occupied;
2805
2806 tty->print_cr("Printing RSet for region " HR_FORMAT, HR_FORMAT_PARAMS(r));
2892 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
2893 // is set.
2894 #if defined(COMPILER2) || INCLUDE_JVMCI
2895 assert(DerivedPointerTable::is_empty(), "derived pointer present");
2896 #endif
2897 // always_do_update_barrier = true;
2898
2899 resize_all_tlabs();
2900 allocation_context_stats().update(full);
2901
2902 // We have just completed a GC. Update the soft reference
2903 // policy with the new heap occupancy
2904 Universe::update_heap_info_at_gc();
2905 }
2906
2907 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
2908 uint gc_count_before,
2909 bool* succeeded,
2910 GCCause::Cause gc_cause) {
2911 assert_heap_not_locked_and_not_at_safepoint();
2912 g1_policy()->record_stop_world_start();
2913 VM_G1IncCollectionPause op(gc_count_before,
2914 word_size,
2915 false, /* should_initiate_conc_mark */
2916 g1_policy()->max_pause_time_ms(),
2917 gc_cause);
2918
2919 op.set_allocation_context(AllocationContext::current());
2920 VMThread::execute(&op);
2921
2922 HeapWord* result = op.result();
2923 bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
2924 assert(result == NULL || ret_succeeded,
2925 "the result should be NULL if the VM did not succeed");
2926 *succeeded = ret_succeeded;
2927
2928 assert_heap_not_locked();
2929 return result;
2930 }
2931
2932 void
|
2761 workers()->print_worker_threads_on(st);
2762 _cmThread->print_on(st);
2763 st->cr();
2764 _cm->print_worker_threads_on(st);
2765 _cg1r->print_worker_threads_on(st);
2766 if (G1StringDedup::is_enabled()) {
2767 G1StringDedup::print_worker_threads_on(st);
2768 }
2769 }
2770
2771 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
2772 workers()->threads_do(tc);
2773 tc->do_thread(_cmThread);
2774 _cg1r->threads_do(tc);
2775 if (G1StringDedup::is_enabled()) {
2776 G1StringDedup::threads_do(tc);
2777 }
2778 }
2779
2780 void G1CollectedHeap::print_tracing_info() const {
2781 g1_rem_set()->print_summary_info();
2782 concurrent_mark()->print_summary_info();
2783 g1_policy()->print_yg_surv_rate_info();
2784 }
2785
2786 #ifndef PRODUCT
2787 // Helpful for debugging RSet issues.
2788
2789 class PrintRSetsClosure : public HeapRegionClosure {
2790 private:
2791 const char* _msg;
2792 size_t _occupied_sum;
2793
2794 public:
2795 bool doHeapRegion(HeapRegion* r) {
2796 HeapRegionRemSet* hrrs = r->rem_set();
2797 size_t occupied = hrrs->occupied();
2798 _occupied_sum += occupied;
2799
2800 tty->print_cr("Printing RSet for region " HR_FORMAT, HR_FORMAT_PARAMS(r));
2886 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
2887 // is set.
2888 #if defined(COMPILER2) || INCLUDE_JVMCI
2889 assert(DerivedPointerTable::is_empty(), "derived pointer present");
2890 #endif
2891 // always_do_update_barrier = true;
2892
2893 resize_all_tlabs();
2894 allocation_context_stats().update(full);
2895
2896 // We have just completed a GC. Update the soft reference
2897 // policy with the new heap occupancy
2898 Universe::update_heap_info_at_gc();
2899 }
2900
2901 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
2902 uint gc_count_before,
2903 bool* succeeded,
2904 GCCause::Cause gc_cause) {
2905 assert_heap_not_locked_and_not_at_safepoint();
2906 VM_G1IncCollectionPause op(gc_count_before,
2907 word_size,
2908 false, /* should_initiate_conc_mark */
2909 g1_policy()->max_pause_time_ms(),
2910 gc_cause);
2911
2912 op.set_allocation_context(AllocationContext::current());
2913 VMThread::execute(&op);
2914
2915 HeapWord* result = op.result();
2916 bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
2917 assert(result == NULL || ret_succeeded,
2918 "the result should be NULL if the VM did not succeed");
2919 *succeeded = ret_succeeded;
2920
2921 assert_heap_not_locked();
2922 return result;
2923 }
2924
2925 void
|