< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.hpp

Print this page
rev 52675 : 8213890: Implementation of JEP 344: Abortable Mixed Collections for G1
Reviewed-by:
Contributed-by: erik.helin@oracle.com, stefan.johansson@oracle.com
rev 52676 : imported patch AMGC-impl


  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_G1_G1COLLECTEDHEAP_HPP
  26 #define SHARE_VM_GC_G1_G1COLLECTEDHEAP_HPP
  27 
  28 #include "gc/g1/evacuationInfo.hpp"
  29 #include "gc/g1/g1BarrierSet.hpp"
  30 #include "gc/g1/g1BiasedArray.hpp"
  31 #include "gc/g1/g1CardTable.hpp"
  32 #include "gc/g1/g1CollectionSet.hpp"
  33 #include "gc/g1/g1CollectorState.hpp"
  34 #include "gc/g1/g1ConcurrentMark.hpp"
  35 #include "gc/g1/g1EdenRegions.hpp"
  36 #include "gc/g1/g1EvacFailure.hpp"
  37 #include "gc/g1/g1EvacStats.hpp"

  38 #include "gc/g1/g1HeapTransition.hpp"
  39 #include "gc/g1/g1HeapVerifier.hpp"
  40 #include "gc/g1/g1HRPrinter.hpp"
  41 #include "gc/g1/g1InCSetState.hpp"
  42 #include "gc/g1/g1MonitoringSupport.hpp"
  43 #include "gc/g1/g1SurvivorRegions.hpp"
  44 #include "gc/g1/g1YCTypes.hpp"
  45 #include "gc/g1/heapRegionManager.hpp"
  46 #include "gc/g1/heapRegionSet.hpp"
  47 #include "gc/shared/barrierSet.hpp"
  48 #include "gc/shared/collectedHeap.hpp"
  49 #include "gc/shared/gcHeapSummary.hpp"
  50 #include "gc/shared/plab.hpp"
  51 #include "gc/shared/preservedMarks.hpp"
  52 #include "gc/shared/softRefPolicy.hpp"
  53 #include "memory/memRegion.hpp"
  54 #include "utilities/stack.hpp"
  55 
  56 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
  57 // It uses the "Garbage First" heap organization and algorithm, which


 551   inline void set_humongous_reclaim_candidate(uint region, bool value);
 552   inline bool is_humongous_reclaim_candidate(uint region);
 553 
 554   // Remove from the reclaim candidate set.  Also remove from the
 555   // collection set so that later encounters avoid the slow path.
 556   inline void set_humongous_is_live(oop obj);
 557 
 558   // Register the given region to be part of the collection set.
 559   inline void register_humongous_region_with_cset(uint index);
 560   // Register regions with humongous objects (actually on the start region) in
 561   // the in_cset_fast_test table.
 562   void register_humongous_regions_with_cset();
 563   // We register a region with the fast "in collection set" test. We
 564   // simply set to true the array slot corresponding to this region.
 565   void register_young_region_with_cset(HeapRegion* r) {
 566     _in_cset_fast_test.set_in_young(r->hrm_index());
 567   }
 568   void register_old_region_with_cset(HeapRegion* r) {
 569     _in_cset_fast_test.set_in_old(r->hrm_index());
 570   }



 571   void clear_in_cset(const HeapRegion* hr) {
 572     _in_cset_fast_test.clear(hr);
 573   }
 574 
 575   void clear_cset_fast_test() {
 576     _in_cset_fast_test.clear();
 577   }
 578 
 579   bool is_user_requested_concurrent_full_gc(GCCause::Cause cause);
 580 
 581   // This is called at the start of either a concurrent cycle or a Full
 582   // GC to update the number of old marking cycles started.
 583   void increment_old_marking_cycles_started();
 584 
 585   // This is called at the end of either a concurrent cycle or a Full
 586   // GC to update the number of old marking cycles completed. Those two
 587   // can happen in a nested fashion, i.e., we start a concurrent
 588   // cycle, a Full GC happens half-way through it which ends first,
 589   // and then the cycle notices that a Full GC happened and ends
 590   // too. The concurrent parameter is a boolean to help us do a bit


 707   // locker was active). Given that we should not be holding the
 708   // Heap_lock when we enter this method, we will pass the
 709   // gc_count_before (i.e., total_collections()) as a parameter since
 710   // it has to be read while holding the Heap_lock. Currently, both
 711   // methods that call do_collection_pause() release the Heap_lock
 712   // before the call, so it's easy to read gc_count_before just before.
 713   HeapWord* do_collection_pause(size_t         word_size,
 714                                 uint           gc_count_before,
 715                                 bool*          succeeded,
 716                                 GCCause::Cause gc_cause);
 717 
 718   void wait_for_root_region_scanning();
 719 
 720   // The guts of the incremental collection pause, executed by the vm
 721   // thread. It returns false if it is unable to do the collection due
 722   // to the GC locker being active, true otherwise
 723   bool do_collection_pause_at_safepoint(double target_pause_time_ms);
 724 
 725   // Actually do the work of evacuating the collection set.
 726   void evacuate_collection_set(G1ParScanThreadStateSet* per_thread_states);


 727 
 728   void pre_evacuate_collection_set();
 729   void post_evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* pss);
 730 
 731   // Print the header for the per-thread termination statistics.
 732   static void print_termination_stats_hdr();
 733   // Print actual per-thread termination statistics.
 734   void print_termination_stats(uint worker_id,
 735                                double elapsed_ms,
 736                                double strong_roots_ms,
 737                                double term_ms,
 738                                size_t term_attempts,
 739                                size_t alloc_buffer_waste,
 740                                size_t undo_waste) const;
 741   // Update object copying statistics.
 742   void record_obj_copy_mem_stats();
 743 
 744   // The hot card cache for remembered set insertion optimization.
 745   G1HotCardCache* _hot_card_cache;
 746 


1389   // The following two methods are helpful for debugging RSet issues.
1390   void print_cset_rsets() PRODUCT_RETURN;
1391   void print_all_rsets() PRODUCT_RETURN;
1392 
1393   size_t pending_card_num();
1394 };
1395 
1396 class G1ParEvacuateFollowersClosure : public VoidClosure {
1397 private:
1398   double _start_term;
1399   double _term_time;
1400   size_t _term_attempts;
1401 
1402   void start_term_time() { _term_attempts++; _start_term = os::elapsedTime(); }
1403   void end_term_time() { _term_time += os::elapsedTime() - _start_term; }
1404 protected:
1405   G1CollectedHeap*              _g1h;
1406   G1ParScanThreadState*         _par_scan_state;
1407   RefToScanQueueSet*            _queues;
1408   ParallelTaskTerminator*       _terminator;

1409 
1410   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
1411   RefToScanQueueSet*      queues()         { return _queues; }
1412   ParallelTaskTerminator* terminator()     { return _terminator; }
1413 
1414 public:
1415   G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
1416                                 G1ParScanThreadState* par_scan_state,
1417                                 RefToScanQueueSet* queues,
1418                                 ParallelTaskTerminator* terminator)

1419     : _start_term(0.0), _term_time(0.0), _term_attempts(0),
1420       _g1h(g1h), _par_scan_state(par_scan_state),
1421       _queues(queues), _terminator(terminator) {}
1422 
1423   void do_void();
1424 
1425   double term_time() const { return _term_time; }
1426   size_t term_attempts() const { return _term_attempts; }
1427 
1428 private:
1429   inline bool offer_termination();
1430 };
1431 
1432 #endif // SHARE_VM_GC_G1_G1COLLECTEDHEAP_HPP


  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_G1_G1COLLECTEDHEAP_HPP
  26 #define SHARE_VM_GC_G1_G1COLLECTEDHEAP_HPP
  27 
  28 #include "gc/g1/evacuationInfo.hpp"
  29 #include "gc/g1/g1BarrierSet.hpp"
  30 #include "gc/g1/g1BiasedArray.hpp"
  31 #include "gc/g1/g1CardTable.hpp"
  32 #include "gc/g1/g1CollectionSet.hpp"
  33 #include "gc/g1/g1CollectorState.hpp"
  34 #include "gc/g1/g1ConcurrentMark.hpp"
  35 #include "gc/g1/g1EdenRegions.hpp"
  36 #include "gc/g1/g1EvacFailure.hpp"
  37 #include "gc/g1/g1EvacStats.hpp"
  38 #include "gc/g1/g1GCPhaseTimes.hpp"
  39 #include "gc/g1/g1HeapTransition.hpp"
  40 #include "gc/g1/g1HeapVerifier.hpp"
  41 #include "gc/g1/g1HRPrinter.hpp"
  42 #include "gc/g1/g1InCSetState.hpp"
  43 #include "gc/g1/g1MonitoringSupport.hpp"
  44 #include "gc/g1/g1SurvivorRegions.hpp"
  45 #include "gc/g1/g1YCTypes.hpp"
  46 #include "gc/g1/heapRegionManager.hpp"
  47 #include "gc/g1/heapRegionSet.hpp"
  48 #include "gc/shared/barrierSet.hpp"
  49 #include "gc/shared/collectedHeap.hpp"
  50 #include "gc/shared/gcHeapSummary.hpp"
  51 #include "gc/shared/plab.hpp"
  52 #include "gc/shared/preservedMarks.hpp"
  53 #include "gc/shared/softRefPolicy.hpp"
  54 #include "memory/memRegion.hpp"
  55 #include "utilities/stack.hpp"
  56 
  57 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
  58 // It uses the "Garbage First" heap organization and algorithm, which


 552   inline void set_humongous_reclaim_candidate(uint region, bool value);
 553   inline bool is_humongous_reclaim_candidate(uint region);
 554 
 555   // Remove from the reclaim candidate set.  Also remove from the
 556   // collection set so that later encounters avoid the slow path.
 557   inline void set_humongous_is_live(oop obj);
 558 
 559   // Register the given region to be part of the collection set.
 560   inline void register_humongous_region_with_cset(uint index);
 561   // Register regions with humongous objects (actually on the start region) in
 562   // the in_cset_fast_test table.
 563   void register_humongous_regions_with_cset();
 564   // We register a region with the fast "in collection set" test. We
 565   // simply set to true the array slot corresponding to this region.
 566   void register_young_region_with_cset(HeapRegion* r) {
 567     _in_cset_fast_test.set_in_young(r->hrm_index());
 568   }
 569   void register_old_region_with_cset(HeapRegion* r) {
 570     _in_cset_fast_test.set_in_old(r->hrm_index());
 571   }
 572   void register_optional_region_with_cset(HeapRegion* r) {
 573     _in_cset_fast_test.set_optional(r->hrm_index());
 574   }
 575   void clear_in_cset(const HeapRegion* hr) {
 576     _in_cset_fast_test.clear(hr);
 577   }
 578 
 579   void clear_cset_fast_test() {
 580     _in_cset_fast_test.clear();
 581   }
 582 
 583   bool is_user_requested_concurrent_full_gc(GCCause::Cause cause);
 584 
 585   // This is called at the start of either a concurrent cycle or a Full
 586   // GC to update the number of old marking cycles started.
 587   void increment_old_marking_cycles_started();
 588 
 589   // This is called at the end of either a concurrent cycle or a Full
 590   // GC to update the number of old marking cycles completed. Those two
 591   // can happen in a nested fashion, i.e., we start a concurrent
 592   // cycle, a Full GC happens half-way through it which ends first,
 593   // and then the cycle notices that a Full GC happened and ends
 594   // too. The concurrent parameter is a boolean to help us do a bit


 711   // locker was active). Given that we should not be holding the
 712   // Heap_lock when we enter this method, we will pass the
 713   // gc_count_before (i.e., total_collections()) as a parameter since
 714   // it has to be read while holding the Heap_lock. Currently, both
 715   // methods that call do_collection_pause() release the Heap_lock
 716   // before the call, so it's easy to read gc_count_before just before.
 717   HeapWord* do_collection_pause(size_t         word_size,
 718                                 uint           gc_count_before,
 719                                 bool*          succeeded,
 720                                 GCCause::Cause gc_cause);
 721 
 722   void wait_for_root_region_scanning();
 723 
 724   // The guts of the incremental collection pause, executed by the vm
 725   // thread. It returns false if it is unable to do the collection due
 726   // to the GC locker being active, true otherwise
 727   bool do_collection_pause_at_safepoint(double target_pause_time_ms);
 728 
 729   // Actually do the work of evacuating the collection set.
 730   void evacuate_collection_set(G1ParScanThreadStateSet* per_thread_states);
 731   void evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states);
 732   void evacuate_optional_regions(G1ParScanThreadStateSet* per_thread_states, G1OptionalCSet* ocset);
 733 
 734   void pre_evacuate_collection_set();
 735   void post_evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* pss);
 736 
 737   // Print the header for the per-thread termination statistics.
 738   static void print_termination_stats_hdr();
 739   // Print actual per-thread termination statistics.
 740   void print_termination_stats(uint worker_id,
 741                                double elapsed_ms,
 742                                double strong_roots_ms,
 743                                double term_ms,
 744                                size_t term_attempts,
 745                                size_t alloc_buffer_waste,
 746                                size_t undo_waste) const;
 747   // Update object copying statistics.
 748   void record_obj_copy_mem_stats();
 749 
 750   // The hot card cache for remembered set insertion optimization.
 751   G1HotCardCache* _hot_card_cache;
 752 


1395   // The following two methods are helpful for debugging RSet issues.
1396   void print_cset_rsets() PRODUCT_RETURN;
1397   void print_all_rsets() PRODUCT_RETURN;
1398 
1399   size_t pending_card_num();
1400 };
1401 
1402 class G1ParEvacuateFollowersClosure : public VoidClosure {
1403 private:
1404   double _start_term;
1405   double _term_time;
1406   size_t _term_attempts;
1407 
1408   void start_term_time() { _term_attempts++; _start_term = os::elapsedTime(); }
1409   void end_term_time() { _term_time += os::elapsedTime() - _start_term; }
1410 protected:
1411   G1CollectedHeap*              _g1h;
1412   G1ParScanThreadState*         _par_scan_state;
1413   RefToScanQueueSet*            _queues;
1414   ParallelTaskTerminator*       _terminator;
1415   G1GCPhaseTimes::GCParPhases   _phase;
1416 
1417   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
1418   RefToScanQueueSet*      queues()         { return _queues; }
1419   ParallelTaskTerminator* terminator()     { return _terminator; }
1420 
1421 public:
1422   G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
1423                                 G1ParScanThreadState* par_scan_state,
1424                                 RefToScanQueueSet* queues,
1425                                 ParallelTaskTerminator* terminator,
1426                                 G1GCPhaseTimes::GCParPhases phase)
1427     : _start_term(0.0), _term_time(0.0), _term_attempts(0),
1428       _g1h(g1h), _par_scan_state(par_scan_state),
1429       _queues(queues), _terminator(terminator), _phase(phase) {}
1430 
1431   void do_void();
1432 
1433   double term_time() const { return _term_time; }
1434   size_t term_attempts() const { return _term_attempts; }
1435 
1436 private:
1437   inline bool offer_termination();
1438 };
1439 
1440 #endif // SHARE_VM_GC_G1_G1COLLECTEDHEAP_HPP
< prev index next >