src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp

Print this page
rev 4773 : 8005849: JEP 167: Event-Based JVM Tracing
Reviewed-by: acorn, coleenp, sla
Contributed-by: Karen Kinnear <karen.kinnear@oracle.com>, Bengt Rutisson <bengt.rutisson@oracle.com>, Calvin Cheung <calvin.cheung@oracle.com>, Erik Gahlin <erik.gahlin@oracle.com>, Erik Helin <erik.helin@oracle.com>, Jesper Wilhelmsson <jesper.wilhelmsson@oracle.com>, Keith McGuigan <keith.mcguigan@oracle.com>, Mattias Tobiasson <mattias.tobiasson@oracle.com>, Markus Gronlund <markus.gronlund@oracle.com>, Mikael Auno <mikael.auno@oracle.com>, Nils Eliasson <nils.eliasson@oracle.com>, Nils Loodin <nils.loodin@oracle.com>, Rickard Backman <rickard.backman@oracle.com>, Staffan Larsen <staffan.larsen@oracle.com>, Stefan Karlsson <stefan.karlsson@oracle.com>, Yekaterina Kantserova <yekaterina.kantserova@oracle.com>


   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP
  27 

  28 #include "gc_implementation/shared/gSpaceCounters.hpp"
  29 #include "gc_implementation/shared/gcStats.hpp"

  30 #include "gc_implementation/shared/generationCounters.hpp"
  31 #include "memory/freeBlockDictionary.hpp"
  32 #include "memory/generation.hpp"
  33 #include "runtime/mutexLocker.hpp"
  34 #include "runtime/virtualspace.hpp"
  35 #include "services/memoryService.hpp"
  36 #include "utilities/bitMap.inline.hpp"
  37 #include "utilities/stack.inline.hpp"
  38 #include "utilities/taskqueue.hpp"
  39 #include "utilities/yieldingWorkgroup.hpp"
  40 
  41 // ConcurrentMarkSweepGeneration is in support of a concurrent
  42 // mark-sweep old generation in the Detlefs-Printezis--Boehm-Demers-Schenker
  43 // style. We assume, for now, that this generation is always the
  44 // seniormost generation and for simplicity
  45 // in the first implementation, that this generation is a single compactible
  46 // space. Neither of these restrictions appears essential, and will be
  47 // relaxed in the future when more time is available to implement the
  48 // greater generality (and there's a need for it).
  49 //
  50 // Concurrent mode failures are currently handled by
  51 // means of a sliding mark-compact.
  52 
  53 class CMSAdaptiveSizePolicy;
  54 class CMSConcMarkingTask;
  55 class CMSGCAdaptivePolicyCounters;


  56 class ConcurrentMarkSweepGeneration;
  57 class ConcurrentMarkSweepPolicy;
  58 class ConcurrentMarkSweepThread;
  59 class CompactibleFreeListSpace;
  60 class FreeChunk;
  61 class PromotionInfo;
  62 class ScanMarkedObjectsAgainCarefullyClosure;
  63 class TenuredGeneration;

  64 
  65 // A generic CMS bit map. It's the basis for both the CMS marking bit map
  66 // as well as for the mod union table (in each case only a subset of the
  67 // methods are used). This is essentially a wrapper around the BitMap class,
  68 // with one bit per (1<<_shifter) HeapWords. (i.e. for the marking bit map,
  69 // we have _shifter == 0. and for the mod union table we have
  70 // shifter == CardTableModRefBS::card_shift - LogHeapWordSize.)
  71 // XXX 64-bit issues in BitMap?
  72 class CMSBitMap VALUE_OBJ_CLASS_SPEC {
  73   friend class VMStructs;
  74 
  75   HeapWord* _bmStartWord;   // base address of range covered by map
  76   size_t    _bmWordSize;    // map size (in #HeapWords covered)
  77   const int _shifter;       // shifts to convert HeapWord to bit position
  78   VirtualSpace _virtual_space; // underlying the bit map
  79   BitMap    _bm;            // the bit map itself
  80  public:
  81   Mutex* const _lock;       // mutex protecting _bm;
  82 
  83  public:


 551   oop _overflow_list;
 552   // The following array-pair keeps track of mark words
 553   // displaced for accomodating overflow list above.
 554   // This code will likely be revisited under RFE#4922830.
 555   Stack<oop, mtGC>     _preserved_oop_stack;
 556   Stack<markOop, mtGC> _preserved_mark_stack;
 557 
 558   int*             _hash_seed;
 559 
 560   // In support of multi-threaded concurrent phases
 561   YieldingFlexibleWorkGang* _conc_workers;
 562 
 563   // Performance Counters
 564   CollectorCounters* _gc_counters;
 565 
 566   // Initialization Errors
 567   bool _completed_initialization;
 568 
 569   // In support of ExplicitGCInvokesConcurrent
 570   static   bool _full_gc_requested;

 571   unsigned int  _collection_count_start;
 572 
 573   // Should we unload classes this concurrent cycle?
 574   bool _should_unload_classes;
 575   unsigned int  _concurrent_cycles_since_last_unload;
 576   unsigned int concurrent_cycles_since_last_unload() const {
 577     return _concurrent_cycles_since_last_unload;
 578   }
 579   // Did we (allow) unload classes in the previous concurrent cycle?
 580   bool unloaded_classes_last_cycle() const {
 581     return concurrent_cycles_since_last_unload() == 0;
 582   }
 583   // Root scanning options for perm gen
 584   int _roots_scanning_options;
 585   int roots_scanning_options() const      { return _roots_scanning_options; }
 586   void add_root_scanning_option(int o)    { _roots_scanning_options |= o;   }
 587   void remove_root_scanning_option(int o) { _roots_scanning_options &= ~o;  }
 588 
 589   // Verification support
 590   CMSBitMap     _verification_mark_bm;


 592   void verify_after_remark_work_2();
 593 
 594   // true if any verification flag is on.
 595   bool _verifying;
 596   bool verifying() const { return _verifying; }
 597   void set_verifying(bool v) { _verifying = v; }
 598 
 599   // Collector policy
 600   ConcurrentMarkSweepPolicy* _collector_policy;
 601   ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; }
 602 
 603   void set_did_compact(bool v);
 604 
 605   // XXX Move these to CMSStats ??? FIX ME !!!
 606   elapsedTimer _inter_sweep_timer;   // time between sweeps
 607   elapsedTimer _intra_sweep_timer;   // time _in_ sweeps
 608   // padded decaying average estimates of the above
 609   AdaptivePaddedAverage _inter_sweep_estimate;
 610   AdaptivePaddedAverage _intra_sweep_estimate;
 611 














 612  protected:
 613   ConcurrentMarkSweepGeneration* _cmsGen;  // old gen (CMS)
 614   MemRegion                      _span;    // span covering above two
 615   CardTableRS*                   _ct;      // card table
 616 
 617   // CMS marking support structures
 618   CMSBitMap     _markBitMap;
 619   CMSBitMap     _modUnionTable;
 620   CMSMarkStack  _markStack;
 621 
 622   HeapWord*     _restart_addr; // in support of marking stack overflow
 623   void          lower_restart_addr(HeapWord* low);
 624 
 625   // Counters in support of marking stack / work queue overflow handling:
 626   // a non-zero value indicates certain types of overflow events during
 627   // the current CMS cycle and could lead to stack resizing efforts at
 628   // an opportune future time.
 629   size_t        _ser_pmc_preclean_ovflw;
 630   size_t        _ser_pmc_remark_ovflw;
 631   size_t        _par_pmc_remark_ovflw;


 810   // An auxilliary method used to record the ends of
 811   // used regions of each generation to limit the extent of sweep
 812   void save_sweep_limits();
 813 
 814   // A work method used by foreground collection to determine
 815   // what type of collection (compacting or not, continuing or fresh)
 816   // it should do.
 817   void decide_foreground_collection_type(bool clear_all_soft_refs,
 818     bool* should_compact, bool* should_start_over);
 819 
 820   // A work method used by the foreground collector to do
 821   // a mark-sweep-compact.
 822   void do_compaction_work(bool clear_all_soft_refs);
 823 
 824   // A work method used by the foreground collector to do
 825   // a mark-sweep, after taking over from a possibly on-going
 826   // concurrent mark-sweep collection.
 827   void do_mark_sweep_work(bool clear_all_soft_refs,
 828     CollectorState first_state, bool should_start_over);
 829 




 830   // If the backgrould GC is active, acquire control from the background
 831   // GC and do the collection.
 832   void acquire_control_and_collect(bool   full, bool clear_all_soft_refs);
 833 
 834   // For synchronizing passing of control from background to foreground
 835   // GC.  waitForForegroundGC() is called by the background
 836   // collector.  It if had to wait for a foreground collection,
 837   // it returns true and the background collection should assume
 838   // that the collection was finished by the foreground
 839   // collector.
 840   bool waitForForegroundGC();
 841 
 842   // Incremental mode triggering:  recompute the icms duty cycle and set the
 843   // allocation limits in the young gen.
 844   void icms_update_allocation_limits();
 845 
 846   size_t block_size_using_printezis_bits(HeapWord* addr) const;
 847   size_t block_size_if_printezis_bits(HeapWord* addr) const;
 848   HeapWord* next_card_start_after_block(HeapWord* addr) const;
 849 


 859 
 860   Mutex* bitMapLock()        const { return _markBitMap.lock();    }
 861   static CollectorState abstract_state() { return _collectorState;  }
 862 
 863   bool should_abort_preclean() const; // Whether preclean should be aborted.
 864   size_t get_eden_used() const;
 865   size_t get_eden_capacity() const;
 866 
 867   ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; }
 868 
 869   // locking checks
 870   NOT_PRODUCT(static bool have_cms_token();)
 871 
 872   // XXXPERM bool should_collect(bool full, size_t size, bool tlab);
 873   bool shouldConcurrentCollect();
 874 
 875   void collect(bool   full,
 876                bool   clear_all_soft_refs,
 877                size_t size,
 878                bool   tlab);
 879   void collect_in_background(bool clear_all_soft_refs);
 880   void collect_in_foreground(bool clear_all_soft_refs);
 881 
 882   // In support of ExplicitGCInvokesConcurrent
 883   static void request_full_gc(unsigned int full_gc_count);
 884   // Should we unload classes in a particular concurrent cycle?
 885   bool should_unload_classes() const {
 886     return _should_unload_classes;
 887   }
 888   void update_should_unload_classes();
 889 
 890   void direct_allocated(HeapWord* start, size_t size);
 891 
 892   // Object is dead if not marked and current phase is sweeping.
 893   bool is_dead_obj(oop obj) const;
 894 
 895   // After a promotion (of "start"), do any necessary marking.
 896   // If "par", then it's being done by a parallel GC thread.
 897   // The last two args indicate if we need precise marking
 898   // and if so the size of the object so it can be dirtied
 899   // in its entirety.
 900   void promoted(bool par, HeapWord* start,
 901                 bool is_obj_array, size_t obj_size);
 902 
 903   HeapWord* allocation_limit_reached(Space* space, HeapWord* top,




   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP
  27 
  28 #include "gc_implementation/shared/gcHeapSummary.hpp"
  29 #include "gc_implementation/shared/gSpaceCounters.hpp"
  30 #include "gc_implementation/shared/gcStats.hpp"
  31 #include "gc_implementation/shared/gcWhen.hpp"
  32 #include "gc_implementation/shared/generationCounters.hpp"
  33 #include "memory/freeBlockDictionary.hpp"
  34 #include "memory/generation.hpp"
  35 #include "runtime/mutexLocker.hpp"
  36 #include "runtime/virtualspace.hpp"
  37 #include "services/memoryService.hpp"
  38 #include "utilities/bitMap.inline.hpp"
  39 #include "utilities/stack.inline.hpp"
  40 #include "utilities/taskqueue.hpp"
  41 #include "utilities/yieldingWorkgroup.hpp"
  42 
  43 // ConcurrentMarkSweepGeneration is in support of a concurrent
  44 // mark-sweep old generation in the Detlefs-Printezis--Boehm-Demers-Schenker
  45 // style. We assume, for now, that this generation is always the
  46 // seniormost generation and for simplicity
  47 // in the first implementation, that this generation is a single compactible
  48 // space. Neither of these restrictions appears essential, and will be
  49 // relaxed in the future when more time is available to implement the
  50 // greater generality (and there's a need for it).
  51 //
  52 // Concurrent mode failures are currently handled by
  53 // means of a sliding mark-compact.
  54 
  55 class CMSAdaptiveSizePolicy;
  56 class CMSConcMarkingTask;
  57 class CMSGCAdaptivePolicyCounters;
  58 class CMSTracer;
  59 class ConcurrentGCTimer;
  60 class ConcurrentMarkSweepGeneration;
  61 class ConcurrentMarkSweepPolicy;
  62 class ConcurrentMarkSweepThread;
  63 class CompactibleFreeListSpace;
  64 class FreeChunk;
  65 class PromotionInfo;
  66 class ScanMarkedObjectsAgainCarefullyClosure;
  67 class TenuredGeneration;
  68 class SerialOldTracer;
  69 
  70 // A generic CMS bit map. It's the basis for both the CMS marking bit map
  71 // as well as for the mod union table (in each case only a subset of the
  72 // methods are used). This is essentially a wrapper around the BitMap class,
  73 // with one bit per (1<<_shifter) HeapWords. (i.e. for the marking bit map,
  74 // we have _shifter == 0. and for the mod union table we have
  75 // shifter == CardTableModRefBS::card_shift - LogHeapWordSize.)
  76 // XXX 64-bit issues in BitMap?
  77 class CMSBitMap VALUE_OBJ_CLASS_SPEC {
  78   friend class VMStructs;
  79 
  80   HeapWord* _bmStartWord;   // base address of range covered by map
  81   size_t    _bmWordSize;    // map size (in #HeapWords covered)
  82   const int _shifter;       // shifts to convert HeapWord to bit position
  83   VirtualSpace _virtual_space; // underlying the bit map
  84   BitMap    _bm;            // the bit map itself
  85  public:
  86   Mutex* const _lock;       // mutex protecting _bm;
  87 
  88  public:


 556   oop _overflow_list;
 557   // The following array-pair keeps track of mark words
 558   // displaced for accomodating overflow list above.
 559   // This code will likely be revisited under RFE#4922830.
 560   Stack<oop, mtGC>     _preserved_oop_stack;
 561   Stack<markOop, mtGC> _preserved_mark_stack;
 562 
 563   int*             _hash_seed;
 564 
 565   // In support of multi-threaded concurrent phases
 566   YieldingFlexibleWorkGang* _conc_workers;
 567 
 568   // Performance Counters
 569   CollectorCounters* _gc_counters;
 570 
 571   // Initialization Errors
 572   bool _completed_initialization;
 573 
 574   // In support of ExplicitGCInvokesConcurrent
 575   static bool _full_gc_requested;
 576   static GCCause::Cause _full_gc_cause;
 577   unsigned int _collection_count_start;
 578 
 579   // Should we unload classes this concurrent cycle?
 580   bool _should_unload_classes;
 581   unsigned int  _concurrent_cycles_since_last_unload;
 582   unsigned int concurrent_cycles_since_last_unload() const {
 583     return _concurrent_cycles_since_last_unload;
 584   }
 585   // Did we (allow) unload classes in the previous concurrent cycle?
 586   bool unloaded_classes_last_cycle() const {
 587     return concurrent_cycles_since_last_unload() == 0;
 588   }
 589   // Root scanning options for perm gen
 590   int _roots_scanning_options;
 591   int roots_scanning_options() const      { return _roots_scanning_options; }
 592   void add_root_scanning_option(int o)    { _roots_scanning_options |= o;   }
 593   void remove_root_scanning_option(int o) { _roots_scanning_options &= ~o;  }
 594 
 595   // Verification support
 596   CMSBitMap     _verification_mark_bm;


 598   void verify_after_remark_work_2();
 599 
 600   // true if any verification flag is on.
 601   bool _verifying;
 602   bool verifying() const { return _verifying; }
 603   void set_verifying(bool v) { _verifying = v; }
 604 
 605   // Collector policy
 606   ConcurrentMarkSweepPolicy* _collector_policy;
 607   ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; }
 608 
 609   void set_did_compact(bool v);
 610 
 611   // XXX Move these to CMSStats ??? FIX ME !!!
 612   elapsedTimer _inter_sweep_timer;   // time between sweeps
 613   elapsedTimer _intra_sweep_timer;   // time _in_ sweeps
 614   // padded decaying average estimates of the above
 615   AdaptivePaddedAverage _inter_sweep_estimate;
 616   AdaptivePaddedAverage _intra_sweep_estimate;
 617 
 618   CMSTracer* _gc_tracer_cm;
 619   ConcurrentGCTimer* _gc_timer_cm;
 620 
 621   bool _cms_start_registered;
 622 
 623   GCHeapSummary _last_heap_summary;
 624   MetaspaceSummary _last_metaspace_summary;
 625 
 626   void register_foreground_gc_start(GCCause::Cause cause);
 627   void register_gc_start(GCCause::Cause cause);
 628   void register_gc_end();
 629   void save_heap_summary();
 630   void report_heap_summary(GCWhen::Type when);
 631 
 632  protected:
 633   ConcurrentMarkSweepGeneration* _cmsGen;  // old gen (CMS)
 634   MemRegion                      _span;    // span covering above two
 635   CardTableRS*                   _ct;      // card table
 636 
 637   // CMS marking support structures
 638   CMSBitMap     _markBitMap;
 639   CMSBitMap     _modUnionTable;
 640   CMSMarkStack  _markStack;
 641 
 642   HeapWord*     _restart_addr; // in support of marking stack overflow
 643   void          lower_restart_addr(HeapWord* low);
 644 
 645   // Counters in support of marking stack / work queue overflow handling:
 646   // a non-zero value indicates certain types of overflow events during
 647   // the current CMS cycle and could lead to stack resizing efforts at
 648   // an opportune future time.
 649   size_t        _ser_pmc_preclean_ovflw;
 650   size_t        _ser_pmc_remark_ovflw;
 651   size_t        _par_pmc_remark_ovflw;


 830   // An auxilliary method used to record the ends of
 831   // used regions of each generation to limit the extent of sweep
 832   void save_sweep_limits();
 833 
 834   // A work method used by foreground collection to determine
 835   // what type of collection (compacting or not, continuing or fresh)
 836   // it should do.
 837   void decide_foreground_collection_type(bool clear_all_soft_refs,
 838     bool* should_compact, bool* should_start_over);
 839 
 840   // A work method used by the foreground collector to do
 841   // a mark-sweep-compact.
 842   void do_compaction_work(bool clear_all_soft_refs);
 843 
 844   // A work method used by the foreground collector to do
 845   // a mark-sweep, after taking over from a possibly on-going
 846   // concurrent mark-sweep collection.
 847   void do_mark_sweep_work(bool clear_all_soft_refs,
 848     CollectorState first_state, bool should_start_over);
 849 
 850   // Work methods for reporting concurrent mode interruption or failure
 851   bool is_external_interruption();
 852   void report_concurrent_mode_interruption();
 853 
 854   // If the backgrould GC is active, acquire control from the background
 855   // GC and do the collection.
 856   void acquire_control_and_collect(bool   full, bool clear_all_soft_refs);
 857 
 858   // For synchronizing passing of control from background to foreground
 859   // GC.  waitForForegroundGC() is called by the background
 860   // collector.  It if had to wait for a foreground collection,
 861   // it returns true and the background collection should assume
 862   // that the collection was finished by the foreground
 863   // collector.
 864   bool waitForForegroundGC();
 865 
 866   // Incremental mode triggering:  recompute the icms duty cycle and set the
 867   // allocation limits in the young gen.
 868   void icms_update_allocation_limits();
 869 
 870   size_t block_size_using_printezis_bits(HeapWord* addr) const;
 871   size_t block_size_if_printezis_bits(HeapWord* addr) const;
 872   HeapWord* next_card_start_after_block(HeapWord* addr) const;
 873 


 883 
 884   Mutex* bitMapLock()        const { return _markBitMap.lock();    }
 885   static CollectorState abstract_state() { return _collectorState;  }
 886 
 887   bool should_abort_preclean() const; // Whether preclean should be aborted.
 888   size_t get_eden_used() const;
 889   size_t get_eden_capacity() const;
 890 
 891   ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; }
 892 
 893   // locking checks
 894   NOT_PRODUCT(static bool have_cms_token();)
 895 
 896   // XXXPERM bool should_collect(bool full, size_t size, bool tlab);
 897   bool shouldConcurrentCollect();
 898 
 899   void collect(bool   full,
 900                bool   clear_all_soft_refs,
 901                size_t size,
 902                bool   tlab);
 903   void collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause);
 904   void collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause);
 905 
 906   // In support of ExplicitGCInvokesConcurrent
 907   static void request_full_gc(unsigned int full_gc_count, GCCause::Cause cause);
 908   // Should we unload classes in a particular concurrent cycle?
 909   bool should_unload_classes() const {
 910     return _should_unload_classes;
 911   }
 912   void update_should_unload_classes();
 913 
 914   void direct_allocated(HeapWord* start, size_t size);
 915 
 916   // Object is dead if not marked and current phase is sweeping.
 917   bool is_dead_obj(oop obj) const;
 918 
 919   // After a promotion (of "start"), do any necessary marking.
 920   // If "par", then it's being done by a parallel GC thread.
 921   // The last two args indicate if we need precise marking
 922   // and if so the size of the object so it can be dirtied
 923   // in its entirety.
 924   void promoted(bool par, HeapWord* start,
 925                 bool is_obj_array, size_t obj_size);
 926 
 927   HeapWord* allocation_limit_reached(Space* space, HeapWord* top,