< prev index next >

src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp

Print this page
rev 47957 : 8191564: Refactor GC related servicability code into GC specific subclasses


1059  protected:
1060   // Shrink generation by specified size (returns false if unable to shrink)
1061   void shrink_free_list_by(size_t bytes);
1062 
1063   // Update statistics for GC
1064   virtual void update_gc_stats(Generation* current_generation, bool full);
1065 
1066   // Maximum available space in the generation (including uncommitted)
1067   // space.
1068   size_t max_available() const;
1069 
1070   // getter and initializer for _initiating_occupancy field.
1071   double initiating_occupancy() const { return _initiating_occupancy; }
1072   void   init_initiating_occupancy(intx io, uintx tr);
1073 
1074   void expand_for_gc_cause(size_t bytes, size_t expand_bytes, CMSExpansionCause::Cause cause);
1075 
1076   void assert_correct_size_change_locking();
1077 
1078  public:
1079   ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size, CardTableRS* ct);
1080 
1081   // Accessors
1082   CMSCollector* collector() const { return _collector; }
1083   static void set_collector(CMSCollector* collector) {
1084     assert(_collector == NULL, "already set");
1085     _collector = collector;
1086   }
1087   CompactibleFreeListSpace*  cmsSpace() const { return _cmsSpace;  }
1088 
1089   Mutex* freelistLock() const;
1090 
1091   virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; }
1092 
1093   void set_did_compact(bool v) { _did_compact = v; }
1094 
1095   bool refs_discovery_is_atomic() const { return false; }
1096   bool refs_discovery_is_mt()     const {
1097     // Note: CMS does MT-discovery during the parallel-remark
1098     // phases. Use ReferenceProcessorMTMutator to make refs
1099     // discovery MT-safe during such phases or other parallel




1059  protected:
1060   // Shrink generation by specified size (returns false if unable to shrink)
1061   void shrink_free_list_by(size_t bytes);
1062 
1063   // Update statistics for GC
1064   virtual void update_gc_stats(Generation* current_generation, bool full);
1065 
1066   // Maximum available space in the generation (including uncommitted)
1067   // space.
1068   size_t max_available() const;
1069 
1070   // getter and initializer for _initiating_occupancy field.
1071   double initiating_occupancy() const { return _initiating_occupancy; }
1072   void   init_initiating_occupancy(intx io, uintx tr);
1073 
1074   void expand_for_gc_cause(size_t bytes, size_t expand_bytes, CMSExpansionCause::Cause cause);
1075 
1076   void assert_correct_size_change_locking();
1077 
1078  public:
1079   ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size, GCMemoryManager* mem_mgr, CardTableRS* ct);
1080 
1081   // Accessors
1082   CMSCollector* collector() const { return _collector; }
1083   static void set_collector(CMSCollector* collector) {
1084     assert(_collector == NULL, "already set");
1085     _collector = collector;
1086   }
1087   CompactibleFreeListSpace*  cmsSpace() const { return _cmsSpace;  }
1088 
1089   Mutex* freelistLock() const;
1090 
1091   virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; }
1092 
1093   void set_did_compact(bool v) { _did_compact = v; }
1094 
1095   bool refs_discovery_is_atomic() const { return false; }
1096   bool refs_discovery_is_mt()     const {
1097     // Note: CMS does MT-discovery during the parallel-remark
1098     // phases. Use ReferenceProcessorMTMutator to make refs
1099     // discovery MT-safe during such phases or other parallel


< prev index next >