src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp

Print this page
rev 4518 : 8013934: Garbage collection event for CMS has wrong cause for System.gc()
Reviewed-by: brutisso, jwilhelm


 557   oop _overflow_list;
 558   // The following array-pair keeps track of mark words
 559   // displaced for accomodating overflow list above.
 560   // This code will likely be revisited under RFE#4922830.
 561   Stack<oop, mtGC>     _preserved_oop_stack;
 562   Stack<markOop, mtGC> _preserved_mark_stack;
 563 
 564   int*             _hash_seed;
 565 
 566   // In support of multi-threaded concurrent phases
 567   YieldingFlexibleWorkGang* _conc_workers;
 568 
 569   // Performance Counters
 570   CollectorCounters* _gc_counters;
 571 
 572   // Initialization Errors
 573   bool _completed_initialization;
 574 
 575   // In support of ExplicitGCInvokesConcurrent
 576   static   bool _full_gc_requested;

 577   unsigned int  _collection_count_start;
 578 
 579   // Should we unload classes this concurrent cycle?
 580   bool _should_unload_classes;
 581   unsigned int  _concurrent_cycles_since_last_unload;
 582   unsigned int concurrent_cycles_since_last_unload() const {
 583     return _concurrent_cycles_since_last_unload;
 584   }
 585   // Did we (allow) unload classes in the previous concurrent cycle?
 586   bool unloaded_classes_last_cycle() const {
 587     return concurrent_cycles_since_last_unload() == 0;
 588   }
 589   // Root scanning options for perm gen
 590   int _roots_scanning_options;
 591   int roots_scanning_options() const      { return _roots_scanning_options; }
 592   void add_root_scanning_option(int o)    { _roots_scanning_options |= o;   }
 593   void remove_root_scanning_option(int o) { _roots_scanning_options &= ~o;  }
 594 
 595   // Verification support
 596   CMSBitMap     _verification_mark_bm;


 888 
 889   Mutex* bitMapLock()        const { return _markBitMap.lock();    }
 890   static CollectorState abstract_state() { return _collectorState;  }
 891 
 892   bool should_abort_preclean() const; // Whether preclean should be aborted.
 893   size_t get_eden_used() const;
 894   size_t get_eden_capacity() const;
 895 
 896   ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; }
 897 
 898   // locking checks
 899   NOT_PRODUCT(static bool have_cms_token();)
 900 
 901   // XXXPERM bool should_collect(bool full, size_t size, bool tlab);
 902   bool shouldConcurrentCollect();
 903 
 904   void collect(bool   full,
 905                bool   clear_all_soft_refs,
 906                size_t size,
 907                bool   tlab);
 908   void collect_in_background(bool clear_all_soft_refs);
 909   void collect_in_foreground(bool clear_all_soft_refs);
 910 
 911   // In support of ExplicitGCInvokesConcurrent
 912   static void request_full_gc(unsigned int full_gc_count);
 913   // Should we unload classes in a particular concurrent cycle?
 914   bool should_unload_classes() const {
 915     return _should_unload_classes;
 916   }
 917   bool update_should_unload_classes();
 918 
 919   void direct_allocated(HeapWord* start, size_t size);
 920 
 921   // Object is dead if not marked and current phase is sweeping.
 922   bool is_dead_obj(oop obj) const;
 923 
 924   // After a promotion (of "start"), do any necessary marking.
 925   // If "par", then it's being done by a parallel GC thread.
 926   // The last two args indicate if we need precise marking
 927   // and if so the size of the object so it can be dirtied
 928   // in its entirety.
 929   void promoted(bool par, HeapWord* start,
 930                 bool is_obj_array, size_t obj_size);
 931 
 932   HeapWord* allocation_limit_reached(Space* space, HeapWord* top,




 557   oop _overflow_list;
 558   // The following array-pair keeps track of mark words
 559   // displaced for accomodating overflow list above.
 560   // This code will likely be revisited under RFE#4922830.
 561   Stack<oop, mtGC>     _preserved_oop_stack;
 562   Stack<markOop, mtGC> _preserved_mark_stack;
 563 
 564   int*             _hash_seed;
 565 
 566   // In support of multi-threaded concurrent phases
 567   YieldingFlexibleWorkGang* _conc_workers;
 568 
 569   // Performance Counters
 570   CollectorCounters* _gc_counters;
 571 
 572   // Initialization Errors
 573   bool _completed_initialization;
 574 
 575   // In support of ExplicitGCInvokesConcurrent
 576   static bool _full_gc_requested;
 577   static GCCause::Cause _full_gc_cause;
 578   unsigned int _collection_count_start;
 579 
 580   // Should we unload classes this concurrent cycle?
 581   bool _should_unload_classes;
 582   unsigned int  _concurrent_cycles_since_last_unload;
 583   unsigned int concurrent_cycles_since_last_unload() const {
 584     return _concurrent_cycles_since_last_unload;
 585   }
 586   // Did we (allow) unload classes in the previous concurrent cycle?
 587   bool unloaded_classes_last_cycle() const {
 588     return concurrent_cycles_since_last_unload() == 0;
 589   }
 590   // Root scanning options for perm gen
 591   int _roots_scanning_options;
 592   int roots_scanning_options() const      { return _roots_scanning_options; }
 593   void add_root_scanning_option(int o)    { _roots_scanning_options |= o;   }
 594   void remove_root_scanning_option(int o) { _roots_scanning_options &= ~o;  }
 595 
 596   // Verification support
 597   CMSBitMap     _verification_mark_bm;


 889 
 890   Mutex* bitMapLock()        const { return _markBitMap.lock();    }
 891   static CollectorState abstract_state() { return _collectorState;  }
 892 
 893   bool should_abort_preclean() const; // Whether preclean should be aborted.
 894   size_t get_eden_used() const;
 895   size_t get_eden_capacity() const;
 896 
 897   ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; }
 898 
 899   // locking checks
 900   NOT_PRODUCT(static bool have_cms_token();)
 901 
 902   // XXXPERM bool should_collect(bool full, size_t size, bool tlab);
 903   bool shouldConcurrentCollect();
 904 
 905   void collect(bool   full,
 906                bool   clear_all_soft_refs,
 907                size_t size,
 908                bool   tlab);
 909   void collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause);
 910   void collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause);
 911 
 912   // In support of ExplicitGCInvokesConcurrent
 913   static void request_full_gc(unsigned int full_gc_count, GCCause::Cause cause);
 914   // Should we unload classes in a particular concurrent cycle?
 915   bool should_unload_classes() const {
 916     return _should_unload_classes;
 917   }
 918   bool update_should_unload_classes();
 919 
 920   void direct_allocated(HeapWord* start, size_t size);
 921 
 922   // Object is dead if not marked and current phase is sweeping.
 923   bool is_dead_obj(oop obj) const;
 924 
 925   // After a promotion (of "start"), do any necessary marking.
 926   // If "par", then it's being done by a parallel GC thread.
 927   // The last two args indicate if we need precise marking
 928   // and if so the size of the object so it can be dirtied
 929   // in its entirety.
 930   void promoted(bool par, HeapWord* start,
 931                 bool is_obj_array, size_t obj_size);
 932 
 933   HeapWord* allocation_limit_reached(Space* space, HeapWord* top,