< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Print this page


   1 /*
   2  * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.


2761     }
2762   } count_from_threads;
2763   Threads::threads_do(&count_from_threads);
2764 
2765   G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
2766   dcqs.verify_num_cards();
2767 
2768   return dcqs.num_cards() + count_from_threads._cards;
2769 }
2770 
2771 bool G1CollectedHeap::is_potential_eager_reclaim_candidate(HeapRegion* r) const {
2772   // We don't nominate objects with many remembered set entries, on
2773   // the assumption that such objects are likely still live.
2774   HeapRegionRemSet* rem_set = r->rem_set();
2775 
2776   return G1EagerReclaimHumongousObjectsWithStaleRefs ?
2777          rem_set->occupancy_less_or_equal_than(G1RSetSparseRegionEntries) :
2778          G1EagerReclaimHumongousObjects && rem_set->is_empty();
2779 }
2780 
2781 class RegisterRegionsWithRegionAttrTableClosure : public HeapRegionClosure {
2782  private:
2783   size_t _total_humongous;
2784   size_t _candidate_humongous;
2785 
2786   bool humongous_region_is_candidate(G1CollectedHeap* g1h, HeapRegion* region) const {
2787     assert(region->is_starts_humongous(), "Must start a humongous object");
2788 
2789     oop obj = oop(region->bottom());
2790 
2791     // Dead objects cannot be eager reclaim candidates. Due to class
2792     // unloading it is unsafe to query their classes so we return early.
2793     if (g1h->is_obj_dead(obj, region)) {
2794       return false;
2795     }
2796 
2797     // If we do not have a complete remembered set for the region, then we can
2798     // not be sure that we have all references to it.
2799     if (!region->rem_set()->is_complete()) {
2800       return false;
2801     }
2802     // Candidate selection must satisfy the following constraints
2803     // while concurrent marking is in progress:
2804     //
2805     // * In order to maintain SATB invariants, an object must not be
2806     // reclaimed if it was allocated before the start of marking and
2807     // has not had its references scanned.  Such an object must have
2808     // its references (including type metadata) scanned to ensure no
2809     // live objects are missed by the marking process.  Objects
2810     // allocated after the start of concurrent marking don't need to
2811     // be scanned.
2812     //
2813     // * An object must not be reclaimed if it is on the concurrent
2814     // mark stack.  Objects allocated after the start of concurrent
2815     // marking are never pushed on the mark stack.
2816     //
2817     // Nominating only objects allocated after the start of concurrent
2818     // marking is sufficient to meet both constraints.  This may miss
2819     // some objects that satisfy the constraints, but the marking data
2820     // structures don't support efficiently performing the needed
2821     // additional tests or scrubbing of the mark stack.
2822     //
2823     // However, we presently only nominate is_typeArray() objects.
2824     // A humongous object containing references induces remembered
2825     // set entries on other regions.  In order to reclaim such an
2826     // object, those remembered sets would need to be cleaned up.
2827     //
2828     // We also treat is_typeArray() objects specially, allowing them
2829     // to be reclaimed even if allocated before the start of
2830     // concurrent mark.  For this we rely on mark stack insertion to
2831     // exclude is_typeArray() objects, preventing reclaiming an object
2832     // that is in the mark stack.  We also rely on the metadata for
2833     // such objects to be built-in and so ensured to be kept live.
2834     // Frequent allocation and drop of large binary blobs is an
2835     // important use case for eager reclaim, and this special handling
2836     // may reduce needed headroom.
2837 
2838     return obj->is_typeArray() &&
2839            g1h->is_potential_eager_reclaim_candidate(region);
2840   }
2841 
2842  public:
2843   RegisterRegionsWithRegionAttrTableClosure()
2844   : _total_humongous(0),
2845     _candidate_humongous(0) {
2846   }
2847 
2848   virtual bool do_heap_region(HeapRegion* r) {
2849     G1CollectedHeap* g1h = G1CollectedHeap::heap();
2850 
2851     if (!r->is_starts_humongous()) {
2852       g1h->register_region_with_region_attr(r);
2853       return false;
2854     }
2855 
2856     bool is_candidate = humongous_region_is_candidate(g1h, r);
2857     uint rindex = r->hrm_index();
2858     g1h->set_humongous_reclaim_candidate(rindex, is_candidate);
2859     if (is_candidate) {
2860       g1h->register_humongous_region_with_region_attr(rindex);
2861       _candidate_humongous++;
2862       // We will later handle the remembered sets of these regions.
2863     } else {
2864       g1h->register_region_with_region_attr(r);
2865     }
2866     _total_humongous++;
2867 
2868     return false;
2869   }
2870 
2871   size_t total_humongous() const { return _total_humongous; }
2872   size_t candidate_humongous() const { return _candidate_humongous; }
2873 };
2874 
2875 void G1CollectedHeap::register_regions_with_region_attr() {
2876   Ticks start = Ticks::now();
2877 
2878   RegisterRegionsWithRegionAttrTableClosure cl;
2879   heap_region_iterate(&cl);
2880 
2881   phase_times()->record_register_regions((Ticks::now() - start).seconds() * 1000.0,
2882                                          cl.total_humongous(),
2883                                          cl.candidate_humongous());
2884   _has_humongous_reclaim_candidates = cl.candidate_humongous() > 0;
2885 }
2886 
2887 #ifndef PRODUCT
2888 void G1CollectedHeap::verify_region_attr_remset_update() {
2889   class VerifyRegionAttrRemSet : public HeapRegionClosure {
2890   public:
2891     virtual bool do_heap_region(HeapRegion* r) {
2892       G1CollectedHeap* g1h = G1CollectedHeap::heap();
2893       bool const needs_remset_update = g1h->region_attr(r->bottom()).needs_remset_update();
2894       assert(r->rem_set()->is_tracked() == needs_remset_update,
2895              "Region %u remset tracking status (%s) different to region attribute (%s)",
2896              r->hrm_index(), BOOL_TO_STR(r->rem_set()->is_tracked()), BOOL_TO_STR(needs_remset_update));
2897       return false;
2898     }
2899   } cl;
2900   heap_region_iterate(&cl);
2901 }
2902 #endif
2903 
2904 class VerifyRegionRemSetClosure : public HeapRegionClosure {
2905   public:
2906     bool do_heap_region(HeapRegion* hr) {


3684   double ref_proc_time = os::elapsedTime() - ref_proc_start;
3685   phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
3686 }
3687 
3688 void G1CollectedHeap::make_pending_list_reachable() {
3689   if (collector_state()->in_initial_mark_gc()) {
3690     oop pll_head = Universe::reference_pending_list();
3691     if (pll_head != NULL) {
3692       // Any valid worker id is fine here as we are in the VM thread and single-threaded.
3693       _cm->mark_in_next_bitmap(0 /* worker_id */, pll_head);
3694     }
3695   }
3696 }
3697 
3698 void G1CollectedHeap::merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states) {
3699   double merge_pss_time_start = os::elapsedTime();
3700   per_thread_states->flush();
3701   phase_times()->record_merge_pss_time_ms((os::elapsedTime() - merge_pss_time_start) * 1000.0);
3702 }
3703 











































































































































3704 void G1CollectedHeap::pre_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
3705   _expand_heap_after_alloc_failure = true;
3706   _evacuation_failed = false;
3707 
3708   // Disable the hot card cache.
3709   _hot_card_cache->reset_hot_cache_claimed_index();
3710   _hot_card_cache->set_use_cache(false);
3711 
3712   // Initialize the GC alloc regions.
3713   _allocator->init_gc_alloc_regions(evacuation_info);
3714 
3715   {
3716     Ticks start = Ticks::now();
3717     rem_set()->prepare_for_scan_heap_roots();
3718     phase_times()->record_prepare_heap_roots_time_ms((Ticks::now() - start).seconds() * 1000.0);
3719   }
3720 
3721   register_regions_with_region_attr();
3722   assert(_verifier->check_region_attr_table(), "Inconsistency in the region attributes table.");

3723 






3724   _preserved_marks_set.assert_empty();
3725 
3726 #if COMPILER2_OR_JVMCI
3727   DerivedPointerTable::clear();
3728 #endif
3729 
3730   // InitialMark needs claim bits to keep track of the marked-through CLDs.
3731   if (collector_state()->in_initial_mark_gc()) {
3732     concurrent_mark()->pre_initial_mark();
3733 
3734     double start_clear_claimed_marks = os::elapsedTime();
3735 
3736     ClassLoaderDataGraph::clear_claimed_marks();
3737 
3738     double recorded_clear_claimed_marks_time_ms = (os::elapsedTime() - start_clear_claimed_marks) * 1000.0;
3739     phase_times()->record_clear_claimed_marks_time_ms(recorded_clear_claimed_marks_time_ms);
3740   }
3741 
3742   // Should G1EvacuationFailureALot be in effect for this GC?
3743   NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)


   1  /*
   2  * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.


2761     }
2762   } count_from_threads;
2763   Threads::threads_do(&count_from_threads);
2764 
2765   G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
2766   dcqs.verify_num_cards();
2767 
2768   return dcqs.num_cards() + count_from_threads._cards;
2769 }
2770 
2771 bool G1CollectedHeap::is_potential_eager_reclaim_candidate(HeapRegion* r) const {
2772   // We don't nominate objects with many remembered set entries, on
2773   // the assumption that such objects are likely still live.
2774   HeapRegionRemSet* rem_set = r->rem_set();
2775 
2776   return G1EagerReclaimHumongousObjectsWithStaleRefs ?
2777          rem_set->occupancy_less_or_equal_than(G1RSetSparseRegionEntries) :
2778          G1EagerReclaimHumongousObjects && rem_set->is_empty();
2779 }
2780 










































































































2781 #ifndef PRODUCT
2782 void G1CollectedHeap::verify_region_attr_remset_update() {
2783   class VerifyRegionAttrRemSet : public HeapRegionClosure {
2784   public:
2785     virtual bool do_heap_region(HeapRegion* r) {
2786       G1CollectedHeap* g1h = G1CollectedHeap::heap();
2787       bool const needs_remset_update = g1h->region_attr(r->bottom()).needs_remset_update();
2788       assert(r->rem_set()->is_tracked() == needs_remset_update,
2789              "Region %u remset tracking status (%s) different to region attribute (%s)",
2790              r->hrm_index(), BOOL_TO_STR(r->rem_set()->is_tracked()), BOOL_TO_STR(needs_remset_update));
2791       return false;
2792     }
2793   } cl;
2794   heap_region_iterate(&cl);
2795 }
2796 #endif
2797 
2798 class VerifyRegionRemSetClosure : public HeapRegionClosure {
2799   public:
2800     bool do_heap_region(HeapRegion* hr) {


3578   double ref_proc_time = os::elapsedTime() - ref_proc_start;
3579   phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
3580 }
3581 
3582 void G1CollectedHeap::make_pending_list_reachable() {
3583   if (collector_state()->in_initial_mark_gc()) {
3584     oop pll_head = Universe::reference_pending_list();
3585     if (pll_head != NULL) {
3586       // Any valid worker id is fine here as we are in the VM thread and single-threaded.
3587       _cm->mark_in_next_bitmap(0 /* worker_id */, pll_head);
3588     }
3589   }
3590 }
3591 
3592 void G1CollectedHeap::merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states) {
3593   double merge_pss_time_start = os::elapsedTime();
3594   per_thread_states->flush();
3595   phase_times()->record_merge_pss_time_ms((os::elapsedTime() - merge_pss_time_start) * 1000.0);
3596 }
3597 
3598 class G1PrepareEvacuationTask : public AbstractGangTask {
3599   class G1PrepareRegionsClosure : public HeapRegionClosure {
3600     G1CollectedHeap* _g1h;
3601     G1PrepareEvacuationTask* _parent_task;
3602     size_t _worker_humongous_total;
3603     size_t _worker_humongous_candidates;
3604 
3605     bool humongous_region_is_candidate(HeapRegion* region) const {
3606       assert(region->is_starts_humongous(), "Must start a humongous object");
3607 
3608       oop obj = oop(region->bottom());
3609 
3610       // Dead objects cannot be eager reclaim candidates. Due to class
3611       // unloading it is unsafe to query their classes so we return early.
3612       if (_g1h->is_obj_dead(obj, region)) {
3613         return false;
3614       }
3615 
3616       // If we do not have a complete remembered set for the region, then we can
3617       // not be sure that we have all references to it.
3618       if (!region->rem_set()->is_complete()) {
3619         return false;
3620       }
3621       // Candidate selection must satisfy the following constraints
3622       // while concurrent marking is in progress:
3623       //
3624       // * In order to maintain SATB invariants, an object must not be
3625       // reclaimed if it was allocated before the start of marking and
3626       // has not had its references scanned.  Such an object must have
3627       // its references (including type metadata) scanned to ensure no
3628       // live objects are missed by the marking process.  Objects
3629       // allocated after the start of concurrent marking don't need to
3630       // be scanned.
3631       //
3632       // * An object must not be reclaimed if it is on the concurrent
3633       // mark stack.  Objects allocated after the start of concurrent
3634       // marking are never pushed on the mark stack.
3635       //
3636       // Nominating only objects allocated after the start of concurrent
3637       // marking is sufficient to meet both constraints.  This may miss
3638       // some objects that satisfy the constraints, but the marking data
3639       // structures don't support efficiently performing the needed
3640       // additional tests or scrubbing of the mark stack.
3641       //
3642       // However, we presently only nominate is_typeArray() objects.
3643       // A humongous object containing references induces remembered
3644       // set entries on other regions.  In order to reclaim such an
3645       // object, those remembered sets would need to be cleaned up.
3646       //
3647       // We also treat is_typeArray() objects specially, allowing them
3648       // to be reclaimed even if allocated before the start of
3649       // concurrent mark.  For this we rely on mark stack insertion to
3650       // exclude is_typeArray() objects, preventing reclaiming an object
3651       // that is in the mark stack.  We also rely on the metadata for
3652       // such objects to be built-in and so ensured to be kept live.
3653       // Frequent allocation and drop of large binary blobs is an
3654       // important use case for eager reclaim, and this special handling
3655       // may reduce needed headroom.
3656 
3657       return obj->is_typeArray() &&
3658              _g1h->is_potential_eager_reclaim_candidate(region);
3659     }
3660 
3661   public:
3662     G1PrepareRegionsClosure(G1CollectedHeap* g1h, G1PrepareEvacuationTask* parent_task) :
3663       _g1h(g1h),
3664       _parent_task(parent_task),
3665       _worker_humongous_total(0),
3666       _worker_humongous_candidates(0) { }
3667 
3668     ~G1PrepareRegionsClosure() {
3669       _parent_task->add_humongous_candidates(_worker_humongous_candidates);
3670       _parent_task->add_humongous_total(_worker_humongous_total);
3671     }
3672 
3673     virtual bool do_heap_region(HeapRegion* hr) {
3674       // First prepare the region for scanning
3675       _g1h->rem_set()->prepare_region_for_scan(hr);
3676 
3677       // Now check if region is a humongous candidate
3678       if (!hr->is_starts_humongous()) {
3679         _g1h->register_region_with_region_attr(hr);
3680         return false;
3681       }
3682 
3683       uint index = hr->hrm_index();
3684       if (humongous_region_is_candidate(hr)) {
3685         _g1h->set_humongous_reclaim_candidate(index, true);
3686         _g1h->register_humongous_region_with_region_attr(index);
3687         _worker_humongous_candidates++;
3688         // We will later handle the remembered sets of these regions.
3689       } else {
3690         _g1h->set_humongous_reclaim_candidate(index, false);
3691         _g1h->register_region_with_region_attr(hr);
3692       }
3693       _worker_humongous_total++;
3694 
3695       return false;
3696     }
3697   };
3698 
3699   G1CollectedHeap* _g1h;
3700   HeapRegionClaimer _claimer;
3701   volatile size_t _humongous_total;
3702   volatile size_t _humongous_candidates;
3703 public:
3704   G1PrepareEvacuationTask(G1CollectedHeap* g1h) :
3705     AbstractGangTask("Prepare Evacuation"),
3706     _g1h(g1h),
3707     _claimer(_g1h->workers()->active_workers()),
3708     _humongous_total(0),
3709     _humongous_candidates(0) { }
3710 
3711   ~G1PrepareEvacuationTask() {
3712     _g1h->set_has_humongous_reclaim_candidate(_humongous_candidates > 0);
3713   }
3714 
3715   void work(uint worker_id) {
3716     G1PrepareRegionsClosure cl(_g1h, this);
3717     _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_claimer, worker_id);
3718   }
3719 
3720   void add_humongous_candidates(size_t candidates) {
3721     Atomic::add(candidates, &_humongous_candidates);
3722   }
3723 
3724   void add_humongous_total(size_t total) {
3725     Atomic::add(total, &_humongous_total);
3726   }
3727 
3728   size_t humongous_candidates() {
3729     return _humongous_candidates;
3730   }
3731 
3732   size_t humongous_total() {
3733     return _humongous_total;
3734   }
3735 };
3736 
3737 void G1CollectedHeap::pre_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
3738   _expand_heap_after_alloc_failure = true;
3739   _evacuation_failed = false;
3740 
3741   // Disable the hot card cache.
3742   _hot_card_cache->reset_hot_cache_claimed_index();
3743   _hot_card_cache->set_use_cache(false);
3744 
3745   // Initialize the GC alloc regions.
3746   _allocator->init_gc_alloc_regions(evacuation_info);
3747 
3748   {
3749     Ticks start = Ticks::now();
3750     rem_set()->prepare_for_scan_heap_roots();
3751     phase_times()->record_prepare_heap_roots_time_ms((Ticks::now() - start).seconds() * 1000.0);
3752   }
3753 
3754   {
3755     G1PrepareEvacuationTask g1_prep_task(this);
3756     Tickspan task_time = run_task(&g1_prep_task);
3757 
3758     phase_times()->record_register_regions(task_time.seconds() * 1000.0,
3759                                            g1_prep_task.humongous_total(),
3760                                            g1_prep_task.humongous_candidates());
3761   }
3762 
3763   assert(_verifier->check_region_attr_table(), "Inconsistency in the region attributes table.");
3764   _preserved_marks_set.assert_empty();
3765 
3766 #if COMPILER2_OR_JVMCI
3767   DerivedPointerTable::clear();
3768 #endif
3769 
3770   // InitialMark needs claim bits to keep track of the marked-through CLDs.
3771   if (collector_state()->in_initial_mark_gc()) {
3772     concurrent_mark()->pre_initial_mark();
3773 
3774     double start_clear_claimed_marks = os::elapsedTime();
3775 
3776     ClassLoaderDataGraph::clear_claimed_marks();
3777 
3778     double recorded_clear_claimed_marks_time_ms = (os::elapsedTime() - start_clear_claimed_marks) * 1000.0;
3779     phase_times()->record_clear_claimed_marks_time_ms(recorded_clear_claimed_marks_time_ms);
3780   }
3781 
3782   // Should G1EvacuationFailureALot be in effect for this GC?
3783   NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)


< prev index next >