< prev index next >

src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp

Print this page
rev 7474 : imported patch separateCardGeneration
rev 7476 : imported patch expand_for_gc_cause
rev 7477 : imported patch move_stuff_up
   1 /*
   2  * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP
  27 
  28 #include "gc_implementation/shared/gcHeapSummary.hpp"
  29 #include "gc_implementation/shared/gSpaceCounters.hpp"
  30 #include "gc_implementation/shared/gcStats.hpp"
  31 #include "gc_implementation/shared/gcWhen.hpp"
  32 #include "gc_implementation/shared/generationCounters.hpp"
  33 #include "memory/cardGeneration.hpp"
  34 #include "memory/freeBlockDictionary.hpp"
  35 #include "memory/iterator.hpp"

  36 #include "runtime/mutexLocker.hpp"
  37 #include "runtime/virtualspace.hpp"
  38 #include "services/memoryService.hpp"
  39 #include "utilities/bitMap.inline.hpp"
  40 #include "utilities/stack.inline.hpp"
  41 #include "utilities/taskqueue.hpp"
  42 #include "utilities/yieldingWorkgroup.hpp"
  43 
  44 // ConcurrentMarkSweepGeneration is in support of a concurrent
  45 // mark-sweep old generation in the Detlefs-Printezis--Boehm-Demers-Schenker
  46 // style. We assume, for now, that this generation is always the
  47 // seniormost generation and for simplicity
  48 // in the first implementation, that this generation is a single compactible
  49 // space. Neither of these restrictions appears essential, and will be
  50 // relaxed in the future when more time is available to implement the
  51 // greater generality (and there's a need for it).
  52 //
  53 // Concurrent mode failures are currently handled by
  54 // means of a sliding mark-compact.
  55 


1014     size_t _numObjectsAllocated;
1015     size_t _numWordsAllocated;
1016   )
1017 
1018   // Used for sizing decisions
1019   bool _incremental_collection_failed;
1020   bool incremental_collection_failed() {
1021     return _incremental_collection_failed;
1022   }
1023   void set_incremental_collection_failed() {
1024     _incremental_collection_failed = true;
1025   }
1026   void clear_incremental_collection_failed() {
1027     _incremental_collection_failed = false;
1028   }
1029 
1030   // accessors
1031   void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;}
1032   CMSExpansionCause::Cause expansion_cause() const { return _expansion_cause; }
1033 



1034  private:
1035   // For parallel young-gen GC support.
1036   CMSParGCThreadState** _par_gc_thread_states;
1037 
1038   // Reason generation was expanded
1039   CMSExpansionCause::Cause _expansion_cause;
1040 
1041   // In support of MinChunkSize being larger than min object size
1042   const double _dilatation_factor;
1043 
1044   // True if a compacting collection was done.
1045   bool _did_compact;
1046   bool did_compact() { return _did_compact; }
1047 
1048   // Fraction of current occupancy at which to start a CMS collection which
1049   // will collect this generation (at least).
1050   double _initiating_occupancy;
1051 
1052  protected:
1053   // Shrink generation by specified size (returns false if unable to shrink)
1054   void shrink_free_list_by(size_t bytes);
1055 
1056   // Update statistics for GC
1057   virtual void update_gc_stats(int level, bool full);
1058 
1059   // Maximum available space in the generation (including uncommitted)
1060   // space.
1061   size_t max_available() const;
1062 
1063   // getter and initializer for _initiating_occupancy field.
1064   double initiating_occupancy() const { return _initiating_occupancy; }
1065   void   init_initiating_occupancy(intx io, uintx tr);
1066 




1067  public:
1068   ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
1069                                 int level, CardTableRS* ct,
1070                                 bool use_adaptive_freelists,
1071                                 FreeBlockDictionary<FreeChunk>::DictionaryChoice);
1072 
1073   // Accessors
1074   CMSCollector* collector() const { return _collector; }
1075   static void set_collector(CMSCollector* collector) {
1076     assert(_collector == NULL, "already set");
1077     _collector = collector;
1078   }
1079   CompactibleFreeListSpace*  cmsSpace() const { return _cmsSpace;  }
1080 
1081   Mutex* freelistLock() const;
1082 
1083   virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; }
1084 
1085   void set_did_compact(bool v) { _did_compact = v; }
1086 
1087   bool refs_discovery_is_atomic() const { return false; }
1088   bool refs_discovery_is_mt()     const {
1089     // Note: CMS does MT-discovery during the parallel-remark
1090     // phases. Use ReferenceProcessorMTMutator to make refs
1091     // discovery MT-safe during such phases or other parallel
1092     // discovery phases in the future. This may all go away
1093     // if/when we decide that refs discovery is sufficiently
1094     // rare that the cost of the CAS's involved is in the
1095     // noise. That's a measurement that should be done, and
1096     // the code simplified if that turns out to be the case.
1097     return ConcGCThreads > 1;
1098   }
1099 
1100   // Override
1101   virtual void ref_processor_init();
1102 
1103   // Grow generation by specified size (returns false if unable to grow)
1104   bool grow_by(size_t bytes);
1105   // Grow generation to reserved size.
1106   bool grow_to_reserved();
1107 
1108   void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; }
1109 
1110   // Space enquiries
1111   size_t capacity() const;
1112   size_t used() const;
1113   size_t free() const;
1114   double occupancy() const { return ((double)used())/((double)capacity()); }
1115   size_t contiguous_available() const;
1116   size_t unsafe_max_alloc_nogc() const;
1117 
1118   // over-rides
1119   MemRegion used_region() const;
1120   MemRegion used_region_at_save_marks() const;
1121 
1122   // Does a "full" (forced) collection invoked on this generation collect
1123   // all younger generations as well? Note that the second conjunct is a
1124   // hack to allow the collection of the younger gen first if the flag is
1125   // set.
1126   virtual bool full_collects_younger_generations() const {
1127     return !ScavengeBeforeFullGC;
1128   }
1129 
1130   void space_iterate(SpaceClosure* blk, bool usedOnly = false);
1131 
1132   // Support for compaction
1133   CompactibleSpace* first_compaction_space() const;
1134   // Adjust quantities in the generation affected by
1135   // the compaction.
1136   void reset_after_compaction();
1137 
1138   // Allocation support
1139   HeapWord* allocate(size_t size, bool tlab);
1140   HeapWord* have_lock_and_allocate(size_t size, bool tlab);
1141   oop       promote(oop obj, size_t obj_size);
1142   HeapWord* par_allocate(size_t size, bool tlab) {
1143     return allocate(size, tlab);
1144   }
1145 
1146 
1147   // Used by CMSStats to track direct allocation.  The value is sampled and
1148   // reset after each young gen collection.
1149   size_t direct_allocated_words() const { return _direct_allocated_words; }
1150   void reset_direct_allocated_words()   { _direct_allocated_words = 0; }
1151 
1152   // Overrides for parallel promotion.
1153   virtual oop par_promote(int thread_num,


1173   HeapWord* expand_and_allocate(size_t word_size,
1174                                 bool tlab,
1175                                 bool parallel = false);
1176 
1177   // GC prologue and epilogue
1178   void gc_prologue(bool full);
1179   void gc_prologue_work(bool full, bool registerClosure,
1180                         ModUnionClosure* modUnionClosure);
1181   void gc_epilogue(bool full);
1182   void gc_epilogue_work(bool full);
1183 
1184   // Time since last GC of this generation
1185   jlong time_of_last_gc(jlong now) {
1186     return collector()->time_of_last_gc(now);
1187   }
1188   void update_time_of_last_gc(jlong now) {
1189     collector()-> update_time_of_last_gc(now);
1190   }
1191 
1192   // Allocation failure
1193   void expand(size_t bytes, size_t expand_bytes,
1194     CMSExpansionCause::Cause cause);
1195   virtual bool expand(size_t bytes, size_t expand_bytes);
1196   void shrink(size_t bytes);
1197   void shrink_by(size_t bytes);
1198   HeapWord* expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz);
1199   bool expand_and_ensure_spooling_space(PromotionInfo* promo);
1200 
1201   // Iteration support and related enquiries
1202   void save_marks();
1203   bool no_allocs_since_save_marks();
1204   void younger_refs_iterate(OopsInGenClosure* cl);
1205 
1206   // Iteration support specific to CMS generations
1207   void save_sweep_limit();
1208 
1209   // More iteration support
1210   virtual void oop_iterate(ExtendedOopClosure* cl);
1211   virtual void safe_object_iterate(ObjectClosure* cl);
1212   virtual void object_iterate(ObjectClosure* cl);
1213 
1214   // Need to declare the full complement of closures, whether we'll
1215   // override them or not, or get message from the compiler:
1216   //   oop_since_save_marks_iterate_nv hides virtual function...
1217   #define CMS_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
1218     void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
1219   ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DECL)
1220 
1221   // Smart allocation  XXX -- move to CFLSpace?
1222   void setNearLargestChunk();
1223   bool isNearLargestChunk(HeapWord* addr);
1224 


   1 /*
   2  * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP
  27 
  28 #include "gc_implementation/shared/gcHeapSummary.hpp"
  29 #include "gc_implementation/shared/gSpaceCounters.hpp"
  30 #include "gc_implementation/shared/gcStats.hpp"
  31 #include "gc_implementation/shared/gcWhen.hpp"
  32 #include "gc_implementation/shared/generationCounters.hpp"
  33 #include "memory/cardGeneration.hpp"
  34 #include "memory/freeBlockDictionary.hpp"
  35 #include "memory/iterator.hpp"
  36 #include "memory/space.hpp"
  37 #include "runtime/mutexLocker.hpp"
  38 #include "runtime/virtualspace.hpp"
  39 #include "services/memoryService.hpp"
  40 #include "utilities/bitMap.inline.hpp"
  41 #include "utilities/stack.inline.hpp"
  42 #include "utilities/taskqueue.hpp"
  43 #include "utilities/yieldingWorkgroup.hpp"
  44 
  45 // ConcurrentMarkSweepGeneration is in support of a concurrent
  46 // mark-sweep old generation in the Detlefs-Printezis--Boehm-Demers-Schenker
  47 // style. We assume, for now, that this generation is always the
  48 // seniormost generation and for simplicity
  49 // in the first implementation, that this generation is a single compactible
  50 // space. Neither of these restrictions appears essential, and will be
  51 // relaxed in the future when more time is available to implement the
  52 // greater generality (and there's a need for it).
  53 //
  54 // Concurrent mode failures are currently handled by
  55 // means of a sliding mark-compact.
  56 


1015     size_t _numObjectsAllocated;
1016     size_t _numWordsAllocated;
1017   )
1018 
1019   // Used for sizing decisions
1020   bool _incremental_collection_failed;
1021   bool incremental_collection_failed() {
1022     return _incremental_collection_failed;
1023   }
1024   void set_incremental_collection_failed() {
1025     _incremental_collection_failed = true;
1026   }
1027   void clear_incremental_collection_failed() {
1028     _incremental_collection_failed = false;
1029   }
1030 
1031   // accessors
1032   void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;}
1033   CMSExpansionCause::Cause expansion_cause() const { return _expansion_cause; }
1034 
1035   // Accessing spaces
1036   CompactibleSpace* space() const { return (CompactibleSpace*)_cmsSpace; }
1037 
1038  private:
1039   // For parallel young-gen GC support.
1040   CMSParGCThreadState** _par_gc_thread_states;
1041 
1042   // Reason generation was expanded
1043   CMSExpansionCause::Cause _expansion_cause;
1044 
1045   // In support of MinChunkSize being larger than min object size
1046   const double _dilatation_factor;
1047 
1048   // True if a compacting collection was done.
1049   bool _did_compact;
1050   bool did_compact() { return _did_compact; }
1051 
1052   // Fraction of current occupancy at which to start a CMS collection which
1053   // will collect this generation (at least).
1054   double _initiating_occupancy;
1055 
1056  protected:
1057   // Shrink generation by specified size (returns false if unable to shrink)
1058   void shrink_free_list_by(size_t bytes);
1059 
1060   // Update statistics for GC
1061   virtual void update_gc_stats(int level, bool full);
1062 
1063   // Maximum available space in the generation (including uncommitted)
1064   // space.
1065   size_t max_available() const;
1066 
1067   // getter and initializer for _initiating_occupancy field.
1068   double initiating_occupancy() const { return _initiating_occupancy; }
1069   void   init_initiating_occupancy(intx io, uintx tr);
1070 
1071   void expand_for_gc_cause(size_t bytes, size_t expand_bytes, CMSExpansionCause::Cause cause);
1072 
1073   void assert_correct_size_change_locking();
1074 
1075  public:
1076   ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
1077                                 int level, CardTableRS* ct,
1078                                 bool use_adaptive_freelists,
1079                                 FreeBlockDictionary<FreeChunk>::DictionaryChoice);
1080 
1081   // Accessors
1082   CMSCollector* collector() const { return _collector; }
1083   static void set_collector(CMSCollector* collector) {
1084     assert(_collector == NULL, "already set");
1085     _collector = collector;
1086   }
1087   CompactibleFreeListSpace*  cmsSpace() const { return _cmsSpace;  }
1088 
1089   Mutex* freelistLock() const;
1090 
1091   virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; }
1092 
1093   void set_did_compact(bool v) { _did_compact = v; }
1094 
1095   bool refs_discovery_is_atomic() const { return false; }
1096   bool refs_discovery_is_mt()     const {
1097     // Note: CMS does MT-discovery during the parallel-remark
1098     // phases. Use ReferenceProcessorMTMutator to make refs
1099     // discovery MT-safe during such phases or other parallel
1100     // discovery phases in the future. This may all go away
1101     // if/when we decide that refs discovery is sufficiently
1102     // rare that the cost of the CAS's involved is in the
1103     // noise. That's a measurement that should be done, and
1104     // the code simplified if that turns out to be the case.
1105     return ConcGCThreads > 1;
1106   }
1107 
1108   // Override
1109   virtual void ref_processor_init();
1110 





1111   void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; }
1112 
1113   // Space enquiries



1114   double occupancy() const { return ((double)used())/((double)capacity()); }
1115   size_t contiguous_available() const;
1116   size_t unsafe_max_alloc_nogc() const;
1117 
1118   // over-rides

1119   MemRegion used_region_at_save_marks() const;
1120 
1121   // Does a "full" (forced) collection invoked on this generation collect
1122   // all younger generations as well? Note that the second conjunct is a
1123   // hack to allow the collection of the younger gen first if the flag is
1124   // set.
1125   virtual bool full_collects_younger_generations() const {
1126     return !ScavengeBeforeFullGC;
1127   }
1128 




1129   // Adjust quantities in the generation affected by
1130   // the compaction.
1131   void reset_after_compaction();
1132 
1133   // Allocation support
1134   HeapWord* allocate(size_t size, bool tlab);
1135   HeapWord* have_lock_and_allocate(size_t size, bool tlab);
1136   oop       promote(oop obj, size_t obj_size);
1137   HeapWord* par_allocate(size_t size, bool tlab) {
1138     return allocate(size, tlab);
1139   }
1140 
1141 
1142   // Used by CMSStats to track direct allocation.  The value is sampled and
1143   // reset after each young gen collection.
1144   size_t direct_allocated_words() const { return _direct_allocated_words; }
1145   void reset_direct_allocated_words()   { _direct_allocated_words = 0; }
1146 
1147   // Overrides for parallel promotion.
1148   virtual oop par_promote(int thread_num,


1168   HeapWord* expand_and_allocate(size_t word_size,
1169                                 bool tlab,
1170                                 bool parallel = false);
1171 
1172   // GC prologue and epilogue
1173   void gc_prologue(bool full);
1174   void gc_prologue_work(bool full, bool registerClosure,
1175                         ModUnionClosure* modUnionClosure);
1176   void gc_epilogue(bool full);
1177   void gc_epilogue_work(bool full);
1178 
1179   // Time since last GC of this generation
1180   jlong time_of_last_gc(jlong now) {
1181     return collector()->time_of_last_gc(now);
1182   }
1183   void update_time_of_last_gc(jlong now) {
1184     collector()-> update_time_of_last_gc(now);
1185   }
1186 
1187   // Allocation failure



1188   void shrink(size_t bytes);

1189   HeapWord* expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz);
1190   bool expand_and_ensure_spooling_space(PromotionInfo* promo);
1191 
1192   // Iteration support and related enquiries
1193   void save_marks();
1194   bool no_allocs_since_save_marks();

1195 
1196   // Iteration support specific to CMS generations
1197   void save_sweep_limit();
1198 
1199   // More iteration support
1200   virtual void oop_iterate(ExtendedOopClosure* cl);
1201   virtual void safe_object_iterate(ObjectClosure* cl);
1202   virtual void object_iterate(ObjectClosure* cl);
1203 
1204   // Need to declare the full complement of closures, whether we'll
1205   // override them or not, or get message from the compiler:
1206   //   oop_since_save_marks_iterate_nv hides virtual function...
1207   #define CMS_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
1208     void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
1209   ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DECL)
1210 
1211   // Smart allocation  XXX -- move to CFLSpace?
1212   void setNearLargestChunk();
1213   bool isNearLargestChunk(HeapWord* addr);
1214 


< prev index next >