< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Print this page


   1 /*
   2  * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.


2760     }
2761   } count_from_threads;
2762   Threads::threads_do(&count_from_threads);
2763 
2764   G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
2765   dcqs.verify_num_cards();
2766 
2767   return dcqs.num_cards() + count_from_threads._cards;
2768 }
2769 
2770 bool G1CollectedHeap::is_potential_eager_reclaim_candidate(HeapRegion* r) const {
2771   // We don't nominate objects with many remembered set entries, on
2772   // the assumption that such objects are likely still live.
2773   HeapRegionRemSet* rem_set = r->rem_set();
2774 
2775   return G1EagerReclaimHumongousObjectsWithStaleRefs ?
2776          rem_set->occupancy_less_or_equal_than(G1RSetSparseRegionEntries) :
2777          G1EagerReclaimHumongousObjects && rem_set->is_empty();
2778 }
2779 
2780 class RegisterRegionsWithRegionAttrTableClosure : public HeapRegionClosure {
2781  private:
2782   size_t _total_humongous;
2783   size_t _candidate_humongous;
2784 
2785   bool humongous_region_is_candidate(G1CollectedHeap* g1h, HeapRegion* region) const {
2786     assert(region->is_starts_humongous(), "Must start a humongous object");
2787 
2788     oop obj = oop(region->bottom());
2789 
2790     // Dead objects cannot be eager reclaim candidates. Due to class
2791     // unloading it is unsafe to query their classes so we return early.
2792     if (g1h->is_obj_dead(obj, region)) {
2793       return false;
2794     }
2795 
2796     // If we do not have a complete remembered set for the region, then we can
2797     // not be sure that we have all references to it.
2798     if (!region->rem_set()->is_complete()) {
2799       return false;
2800     }
2801     // Candidate selection must satisfy the following constraints
2802     // while concurrent marking is in progress:
2803     //
2804     // * In order to maintain SATB invariants, an object must not be
2805     // reclaimed if it was allocated before the start of marking and
2806     // has not had its references scanned.  Such an object must have
2807     // its references (including type metadata) scanned to ensure no
2808     // live objects are missed by the marking process.  Objects
2809     // allocated after the start of concurrent marking don't need to
2810     // be scanned.
2811     //
2812     // * An object must not be reclaimed if it is on the concurrent
2813     // mark stack.  Objects allocated after the start of concurrent
2814     // marking are never pushed on the mark stack.
2815     //
2816     // Nominating only objects allocated after the start of concurrent
2817     // marking is sufficient to meet both constraints.  This may miss
2818     // some objects that satisfy the constraints, but the marking data
2819     // structures don't support efficiently performing the needed
2820     // additional tests or scrubbing of the mark stack.
2821     //
2822     // However, we presently only nominate is_typeArray() objects.
2823     // A humongous object containing references induces remembered
2824     // set entries on other regions.  In order to reclaim such an
2825     // object, those remembered sets would need to be cleaned up.
2826     //
2827     // We also treat is_typeArray() objects specially, allowing them
2828     // to be reclaimed even if allocated before the start of
2829     // concurrent mark.  For this we rely on mark stack insertion to
2830     // exclude is_typeArray() objects, preventing reclaiming an object
2831     // that is in the mark stack.  We also rely on the metadata for
2832     // such objects to be built-in and so ensured to be kept live.
2833     // Frequent allocation and drop of large binary blobs is an
2834     // important use case for eager reclaim, and this special handling
2835     // may reduce needed headroom.
2836 
2837     return obj->is_typeArray() &&
2838            g1h->is_potential_eager_reclaim_candidate(region);
2839   }
2840 
2841  public:
2842   RegisterRegionsWithRegionAttrTableClosure()
2843   : _total_humongous(0),
2844     _candidate_humongous(0) {
2845   }
2846 
2847   virtual bool do_heap_region(HeapRegion* r) {
2848     G1CollectedHeap* g1h = G1CollectedHeap::heap();
2849 
2850     if (!r->is_starts_humongous()) {
2851       g1h->register_region_with_region_attr(r);
2852       return false;
2853     }
2854 
2855     bool is_candidate = humongous_region_is_candidate(g1h, r);
2856     uint rindex = r->hrm_index();
2857     g1h->set_humongous_reclaim_candidate(rindex, is_candidate);
2858     if (is_candidate) {
2859       g1h->register_humongous_region_with_region_attr(rindex);
2860       _candidate_humongous++;
2861       // We will later handle the remembered sets of these regions.
2862     } else {
2863       g1h->register_region_with_region_attr(r);
2864     }
2865     _total_humongous++;
2866 
2867     return false;
2868   }
2869 
2870   size_t total_humongous() const { return _total_humongous; }
2871   size_t candidate_humongous() const { return _candidate_humongous; }
2872 };
2873 
2874 void G1CollectedHeap::register_regions_with_region_attr() {
2875   Ticks start = Ticks::now();
2876 
2877   RegisterRegionsWithRegionAttrTableClosure cl;
2878   heap_region_iterate(&cl);
2879 
2880   phase_times()->record_register_regions((Ticks::now() - start).seconds() * 1000.0,
2881                                          cl.total_humongous(),
2882                                          cl.candidate_humongous());
2883   _has_humongous_reclaim_candidates = cl.candidate_humongous() > 0;
2884 }
2885 
2886 #ifndef PRODUCT
2887 void G1CollectedHeap::verify_region_attr_remset_update() {
2888   class VerifyRegionAttrRemSet : public HeapRegionClosure {
2889   public:
2890     virtual bool do_heap_region(HeapRegion* r) {
2891       G1CollectedHeap* g1h = G1CollectedHeap::heap();
2892       bool const needs_remset_update = g1h->region_attr(r->bottom()).needs_remset_update();
2893       assert(r->rem_set()->is_tracked() == needs_remset_update,
2894              "Region %u remset tracking status (%s) different to region attribute (%s)",
2895              r->hrm_index(), BOOL_TO_STR(r->rem_set()->is_tracked()), BOOL_TO_STR(needs_remset_update));
2896       return false;
2897     }
2898   } cl;
2899   heap_region_iterate(&cl);
2900 }
2901 #endif
2902 
2903 class VerifyRegionRemSetClosure : public HeapRegionClosure {
2904   public:
2905     bool do_heap_region(HeapRegion* hr) {


3682   double ref_proc_time = os::elapsedTime() - ref_proc_start;
3683   phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
3684 }
3685 
3686 void G1CollectedHeap::make_pending_list_reachable() {
3687   if (collector_state()->in_initial_mark_gc()) {
3688     oop pll_head = Universe::reference_pending_list();
3689     if (pll_head != NULL) {
3690       // Any valid worker id is fine here as we are in the VM thread and single-threaded.
3691       _cm->mark_in_next_bitmap(0 /* worker_id */, pll_head);
3692     }
3693   }
3694 }
3695 
3696 void G1CollectedHeap::merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states) {
3697   Ticks start = Ticks::now();
3698   per_thread_states->flush();
3699   phase_times()->record_or_add_time_secs(G1GCPhaseTimes::MergePSS, 0 /* worker_id */, (Ticks::now() - start).seconds());
3700 }
3701 











































































































































3702 void G1CollectedHeap::pre_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
3703   _bytes_used_during_gc = 0;
3704 
3705   _expand_heap_after_alloc_failure = true;
3706   _evacuation_failed = false;
3707 
3708   // Disable the hot card cache.
3709   _hot_card_cache->reset_hot_cache_claimed_index();
3710   _hot_card_cache->set_use_cache(false);
3711 
3712   // Initialize the GC alloc regions.
3713   _allocator->init_gc_alloc_regions(evacuation_info);
3714 
3715   {
3716     Ticks start = Ticks::now();
3717     rem_set()->prepare_for_scan_heap_roots();
3718     phase_times()->record_prepare_heap_roots_time_ms((Ticks::now() - start).seconds() * 1000.0);
3719   }
3720 
3721   register_regions_with_region_attr();
3722   assert(_verifier->check_region_attr_table(), "Inconsistency in the region attributes table.");

3723 






3724   _preserved_marks_set.assert_empty();
3725 
3726 #if COMPILER2_OR_JVMCI
3727   DerivedPointerTable::clear();
3728 #endif
3729 
3730   // InitialMark needs claim bits to keep track of the marked-through CLDs.
3731   if (collector_state()->in_initial_mark_gc()) {
3732     concurrent_mark()->pre_initial_mark();
3733 
3734     double start_clear_claimed_marks = os::elapsedTime();
3735 
3736     ClassLoaderDataGraph::clear_claimed_marks();
3737 
3738     double recorded_clear_claimed_marks_time_ms = (os::elapsedTime() - start_clear_claimed_marks) * 1000.0;
3739     phase_times()->record_clear_claimed_marks_time_ms(recorded_clear_claimed_marks_time_ms);
3740   }
3741 
3742   // Should G1EvacuationFailureALot be in effect for this GC?
3743   NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)


   1  /*
   2  * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.


2760     }
2761   } count_from_threads;
2762   Threads::threads_do(&count_from_threads);
2763 
2764   G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
2765   dcqs.verify_num_cards();
2766 
2767   return dcqs.num_cards() + count_from_threads._cards;
2768 }
2769 
2770 bool G1CollectedHeap::is_potential_eager_reclaim_candidate(HeapRegion* r) const {
2771   // We don't nominate objects with many remembered set entries, on
2772   // the assumption that such objects are likely still live.
2773   HeapRegionRemSet* rem_set = r->rem_set();
2774 
2775   return G1EagerReclaimHumongousObjectsWithStaleRefs ?
2776          rem_set->occupancy_less_or_equal_than(G1RSetSparseRegionEntries) :
2777          G1EagerReclaimHumongousObjects && rem_set->is_empty();
2778 }
2779 










































































































2780 #ifndef PRODUCT
2781 void G1CollectedHeap::verify_region_attr_remset_update() {
2782   class VerifyRegionAttrRemSet : public HeapRegionClosure {
2783   public:
2784     virtual bool do_heap_region(HeapRegion* r) {
2785       G1CollectedHeap* g1h = G1CollectedHeap::heap();
2786       bool const needs_remset_update = g1h->region_attr(r->bottom()).needs_remset_update();
2787       assert(r->rem_set()->is_tracked() == needs_remset_update,
2788              "Region %u remset tracking status (%s) different to region attribute (%s)",
2789              r->hrm_index(), BOOL_TO_STR(r->rem_set()->is_tracked()), BOOL_TO_STR(needs_remset_update));
2790       return false;
2791     }
2792   } cl;
2793   heap_region_iterate(&cl);
2794 }
2795 #endif
2796 
2797 class VerifyRegionRemSetClosure : public HeapRegionClosure {
2798   public:
2799     bool do_heap_region(HeapRegion* hr) {


3576   double ref_proc_time = os::elapsedTime() - ref_proc_start;
3577   phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
3578 }
3579 
3580 void G1CollectedHeap::make_pending_list_reachable() {
3581   if (collector_state()->in_initial_mark_gc()) {
3582     oop pll_head = Universe::reference_pending_list();
3583     if (pll_head != NULL) {
3584       // Any valid worker id is fine here as we are in the VM thread and single-threaded.
3585       _cm->mark_in_next_bitmap(0 /* worker_id */, pll_head);
3586     }
3587   }
3588 }
3589 
3590 void G1CollectedHeap::merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states) {
3591   Ticks start = Ticks::now();
3592   per_thread_states->flush();
3593   phase_times()->record_or_add_time_secs(G1GCPhaseTimes::MergePSS, 0 /* worker_id */, (Ticks::now() - start).seconds());
3594 }
3595 
3596 class G1PrepareEvacuationTask : public AbstractGangTask {
3597   class G1PrepareRegionsClosure : public HeapRegionClosure {
3598     G1CollectedHeap* _g1h;
3599     G1PrepareEvacuationTask* _parent_task;
3600     size_t _worker_humongous_total;
3601     size_t _worker_humongous_candidates;
3602 
3603     bool humongous_region_is_candidate(HeapRegion* region) const {
3604       assert(region->is_starts_humongous(), "Must start a humongous object");
3605 
3606       oop obj = oop(region->bottom());
3607 
3608       // Dead objects cannot be eager reclaim candidates. Due to class
3609       // unloading it is unsafe to query their classes so we return early.
3610       if (_g1h->is_obj_dead(obj, region)) {
3611         return false;
3612       }
3613 
3614       // If we do not have a complete remembered set for the region, then we can
3615       // not be sure that we have all references to it.
3616       if (!region->rem_set()->is_complete()) {
3617         return false;
3618       }
3619       // Candidate selection must satisfy the following constraints
3620       // while concurrent marking is in progress:
3621       //
3622       // * In order to maintain SATB invariants, an object must not be
3623       // reclaimed if it was allocated before the start of marking and
3624       // has not had its references scanned.  Such an object must have
3625       // its references (including type metadata) scanned to ensure no
3626       // live objects are missed by the marking process.  Objects
3627       // allocated after the start of concurrent marking don't need to
3628       // be scanned.
3629       //
3630       // * An object must not be reclaimed if it is on the concurrent
3631       // mark stack.  Objects allocated after the start of concurrent
3632       // marking are never pushed on the mark stack.
3633       //
3634       // Nominating only objects allocated after the start of concurrent
3635       // marking is sufficient to meet both constraints.  This may miss
3636       // some objects that satisfy the constraints, but the marking data
3637       // structures don't support efficiently performing the needed
3638       // additional tests or scrubbing of the mark stack.
3639       //
3640       // However, we presently only nominate is_typeArray() objects.
3641       // A humongous object containing references induces remembered
3642       // set entries on other regions.  In order to reclaim such an
3643       // object, those remembered sets would need to be cleaned up.
3644       //
3645       // We also treat is_typeArray() objects specially, allowing them
3646       // to be reclaimed even if allocated before the start of
3647       // concurrent mark.  For this we rely on mark stack insertion to
3648       // exclude is_typeArray() objects, preventing reclaiming an object
3649       // that is in the mark stack.  We also rely on the metadata for
3650       // such objects to be built-in and so ensured to be kept live.
3651       // Frequent allocation and drop of large binary blobs is an
3652       // important use case for eager reclaim, and this special handling
3653       // may reduce needed headroom.
3654 
3655       return obj->is_typeArray() &&
3656              _g1h->is_potential_eager_reclaim_candidate(region);
3657     }
3658 
3659   public:
3660     G1PrepareRegionsClosure(G1CollectedHeap* g1h, G1PrepareEvacuationTask* parent_task) :
3661       _g1h(g1h),
3662       _parent_task(parent_task),
3663       _worker_humongous_total(0),
3664       _worker_humongous_candidates(0) { }
3665 
3666     ~G1PrepareRegionsClosure() {
3667       _parent_task->add_humongous_candidates(_worker_humongous_candidates);
3668       _parent_task->add_humongous_total(_worker_humongous_total);
3669     }
3670 
3671     virtual bool do_heap_region(HeapRegion* hr) {
3672       // First prepare the region for scanning
3673       _g1h->rem_set()->prepare_region_for_scan(hr);
3674 
3675       // Now check if region is a humongous candidate
3676       if (!hr->is_starts_humongous()) {
3677         _g1h->register_region_with_region_attr(hr);
3678         return false;
3679       }
3680 
3681       uint index = hr->hrm_index();
3682       if (humongous_region_is_candidate(hr)) {
3683         _g1h->set_humongous_reclaim_candidate(index, true);
3684         _g1h->register_humongous_region_with_region_attr(index);
3685         _worker_humongous_candidates++;
3686         // We will later handle the remembered sets of these regions.
3687       } else {
3688         _g1h->set_humongous_reclaim_candidate(index, false);
3689         _g1h->register_region_with_region_attr(hr);
3690       }
3691       _worker_humongous_total++;
3692 
3693       return false;
3694     }
3695   };
3696 
3697   G1CollectedHeap* _g1h;
3698   HeapRegionClaimer _claimer;
3699   volatile size_t _humongous_total;
3700   volatile size_t _humongous_candidates;
3701 public:
3702   G1PrepareEvacuationTask(G1CollectedHeap* g1h) :
3703     AbstractGangTask("Prepare Evacuation"),
3704     _g1h(g1h),
3705     _claimer(_g1h->workers()->active_workers()),
3706     _humongous_total(0),
3707     _humongous_candidates(0) { }
3708 
3709   ~G1PrepareEvacuationTask() {
3710     _g1h->set_has_humongous_reclaim_candidate(_humongous_candidates > 0);
3711   }
3712 
3713   void work(uint worker_id) {
3714     G1PrepareRegionsClosure cl(_g1h, this);
3715     _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_claimer, worker_id);
3716   }
3717 
3718   void add_humongous_candidates(size_t candidates) {
3719     Atomic::add(&_humongous_candidates, candidates);
3720   }
3721 
3722   void add_humongous_total(size_t total) {
3723     Atomic::add(&_humongous_total, total);
3724   }
3725 
3726   size_t humongous_candidates() {
3727     return _humongous_candidates;
3728   }
3729 
3730   size_t humongous_total() {
3731     return _humongous_total;
3732   }
3733 };
3734 
3735 void G1CollectedHeap::pre_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
3736   _bytes_used_during_gc = 0;
3737 
3738   _expand_heap_after_alloc_failure = true;
3739   _evacuation_failed = false;
3740 
3741   // Disable the hot card cache.
3742   _hot_card_cache->reset_hot_cache_claimed_index();
3743   _hot_card_cache->set_use_cache(false);
3744 
3745   // Initialize the GC alloc regions.
3746   _allocator->init_gc_alloc_regions(evacuation_info);
3747 
3748   {
3749     Ticks start = Ticks::now();
3750     rem_set()->prepare_for_scan_heap_roots();
3751     phase_times()->record_prepare_heap_roots_time_ms((Ticks::now() - start).seconds() * 1000.0);
3752   }
3753 
3754   {
3755     G1PrepareEvacuationTask g1_prep_task(this);
3756     Tickspan task_time = run_task(&g1_prep_task);
3757 
3758     phase_times()->record_register_regions(task_time.seconds() * 1000.0,
3759                                            g1_prep_task.humongous_total(),
3760                                            g1_prep_task.humongous_candidates());
3761   }
3762 
3763   assert(_verifier->check_region_attr_table(), "Inconsistency in the region attributes table.");
3764   _preserved_marks_set.assert_empty();
3765 
3766 #if COMPILER2_OR_JVMCI
3767   DerivedPointerTable::clear();
3768 #endif
3769 
3770   // InitialMark needs claim bits to keep track of the marked-through CLDs.
3771   if (collector_state()->in_initial_mark_gc()) {
3772     concurrent_mark()->pre_initial_mark();
3773 
3774     double start_clear_claimed_marks = os::elapsedTime();
3775 
3776     ClassLoaderDataGraph::clear_claimed_marks();
3777 
3778     double recorded_clear_claimed_marks_time_ms = (os::elapsedTime() - start_clear_claimed_marks) * 1000.0;
3779     phase_times()->record_clear_claimed_marks_time_ms(recorded_clear_claimed_marks_time_ms);
3780   }
3781 
3782   // Should G1EvacuationFailureALot be in effect for this GC?
3783   NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)


< prev index next >