< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Print this page




2763   size_t candidate_humongous() const { return _candidate_humongous; }
2764 
2765   void flush_rem_set_entries() { _dcq.flush(); }
2766 };
2767 
2768 void G1CollectedHeap::register_regions_with_region_attr() {
2769   Ticks start = Ticks::now();
2770 
2771   RegisterRegionsWithRegionAttrTableClosure cl;
2772   heap_region_iterate(&cl);
2773 
2774   phase_times()->record_register_regions((Ticks::now() - start).seconds() * 1000.0,
2775                                          cl.total_humongous(),
2776                                          cl.candidate_humongous());
2777   _has_humongous_reclaim_candidates = cl.candidate_humongous() > 0;
2778 
2779   // Finally flush all remembered set entries to re-check into the global DCQS.
2780   cl.flush_rem_set_entries();
2781 }
2782 

















2783 class VerifyRegionRemSetClosure : public HeapRegionClosure {
2784   public:
2785     bool do_heap_region(HeapRegion* hr) {
2786       if (!hr->is_archive() && !hr->is_continues_humongous()) {
2787         hr->verify_rem_set();
2788       }
2789       return false;
2790     }
2791 };
2792 
2793 uint G1CollectedHeap::num_task_queues() const {
2794   return _task_queues->size();
2795 }
2796 
2797 #if TASKQUEUE_STATS
2798 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {
2799   st->print_raw_cr("GC Task Stats");
2800   st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
2801   st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
2802 }


3042         // on again later if we do. Using a scoped
3043         // NoRefDiscovery object will do this.
3044         NoRefDiscovery no_cm_discovery(_ref_processor_cm);
3045 
3046         policy()->record_collection_pause_start(sample_start_time_sec);
3047 
3048         // Forget the current allocation region (we might even choose it to be part
3049         // of the collection set!).
3050         _allocator->release_mutator_alloc_region();
3051 
3052         calculate_collection_set(evacuation_info, target_pause_time_ms);
3053 
3054         G1ParScanThreadStateSet per_thread_states(this,
3055                                                   workers()->active_workers(),
3056                                                   collection_set()->young_region_length(),
3057                                                   collection_set()->optional_region_length());
3058         pre_evacuate_collection_set(evacuation_info);
3059 
3060         // Actually do the work...
3061         evacuate_initial_collection_set(&per_thread_states);

3062         if (_collection_set.optional_region_length() != 0) {
3063           evacuate_optional_collection_set(&per_thread_states);
3064         }
3065         post_evacuate_collection_set(evacuation_info, &per_thread_states);
3066 
3067         start_new_collection_set();
3068 
3069         _survivor_evac_stats.adjust_desired_plab_sz();
3070         _old_evac_stats.adjust_desired_plab_sz();
3071 
3072         if (should_start_conc_mark) {
3073           // We have to do this before we notify the CM threads that
3074           // they can start working to make sure that all the
3075           // appropriate initialization is done on the CM object.
3076           concurrent_mark()->post_initial_mark();
3077           // Note that we don't actually trigger the CM thread at
3078           // this point. We do that later when we're sure that
3079           // the current thread has completed its logging output.
3080         }
3081 




2763   size_t candidate_humongous() const { return _candidate_humongous; }
2764 
2765   void flush_rem_set_entries() { _dcq.flush(); }
2766 };
2767 
2768 void G1CollectedHeap::register_regions_with_region_attr() {
2769   Ticks start = Ticks::now();
2770 
2771   RegisterRegionsWithRegionAttrTableClosure cl;
2772   heap_region_iterate(&cl);
2773 
2774   phase_times()->record_register_regions((Ticks::now() - start).seconds() * 1000.0,
2775                                          cl.total_humongous(),
2776                                          cl.candidate_humongous());
2777   _has_humongous_reclaim_candidates = cl.candidate_humongous() > 0;
2778 
2779   // Finally flush all remembered set entries to re-check into the global DCQS.
2780   cl.flush_rem_set_entries();
2781 }
2782 
2783 #ifndef PRODUCT
2784 void G1CollectedHeap::verify_region_attr_remset_update() {
2785   class VerifyRegionAttrRemSet : public HeapRegionClosure {
2786   public:
2787     virtual bool do_heap_region(HeapRegion* r) {
2788       G1CollectedHeap* g1h = G1CollectedHeap::heap();
2789       bool const needs_remset_update = g1h->region_attr(r->bottom()).needs_remset_update();
2790       assert(r->rem_set()->is_tracked() == needs_remset_update,
2791              "Region %u remset tracking status (%s) different to region attribute (%s)",
2792              r->hrm_index(), BOOL_TO_STR(r->rem_set()->is_tracked()), BOOL_TO_STR(needs_remset_update));
2793       return false;
2794     }
2795   } cl;
2796   heap_region_iterate(&cl);
2797 }
2798 #endif
2799 
2800 class VerifyRegionRemSetClosure : public HeapRegionClosure {
2801   public:
2802     bool do_heap_region(HeapRegion* hr) {
2803       if (!hr->is_archive() && !hr->is_continues_humongous()) {
2804         hr->verify_rem_set();
2805       }
2806       return false;
2807     }
2808 };
2809 
2810 uint G1CollectedHeap::num_task_queues() const {
2811   return _task_queues->size();
2812 }
2813 
2814 #if TASKQUEUE_STATS
2815 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {
2816   st->print_raw_cr("GC Task Stats");
2817   st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
2818   st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
2819 }


3059         // on again later if we do. Using a scoped
3060         // NoRefDiscovery object will do this.
3061         NoRefDiscovery no_cm_discovery(_ref_processor_cm);
3062 
3063         policy()->record_collection_pause_start(sample_start_time_sec);
3064 
3065         // Forget the current allocation region (we might even choose it to be part
3066         // of the collection set!).
3067         _allocator->release_mutator_alloc_region();
3068 
3069         calculate_collection_set(evacuation_info, target_pause_time_ms);
3070 
3071         G1ParScanThreadStateSet per_thread_states(this,
3072                                                   workers()->active_workers(),
3073                                                   collection_set()->young_region_length(),
3074                                                   collection_set()->optional_region_length());
3075         pre_evacuate_collection_set(evacuation_info);
3076 
3077         // Actually do the work...
3078         evacuate_initial_collection_set(&per_thread_states);
3079 
3080         if (_collection_set.optional_region_length() != 0) {
3081           evacuate_optional_collection_set(&per_thread_states);
3082         }
3083         post_evacuate_collection_set(evacuation_info, &per_thread_states);
3084 
3085         start_new_collection_set();
3086 
3087         _survivor_evac_stats.adjust_desired_plab_sz();
3088         _old_evac_stats.adjust_desired_plab_sz();
3089 
3090         if (should_start_conc_mark) {
3091           // We have to do this before we notify the CM threads that
3092           // they can start working to make sure that all the
3093           // appropriate initialization is done on the CM object.
3094           concurrent_mark()->post_initial_mark();
3095           // Note that we don't actually trigger the CM thread at
3096           // this point. We do that later when we're sure that
3097           // the current thread has completed its logging output.
3098         }
3099 


< prev index next >