18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_CMS_CONCURRENTMARKSWEEPGENERATION_HPP
26 #define SHARE_VM_GC_CMS_CONCURRENTMARKSWEEPGENERATION_HPP
27
28 #include "gc/cms/cmsOopClosures.hpp"
29 #include "gc/cms/gSpaceCounters.hpp"
30 #include "gc/cms/yieldingWorkgroup.hpp"
31 #include "gc/shared/cardGeneration.hpp"
32 #include "gc/shared/gcHeapSummary.hpp"
33 #include "gc/shared/gcStats.hpp"
34 #include "gc/shared/gcWhen.hpp"
35 #include "gc/shared/generationCounters.hpp"
36 #include "gc/shared/space.hpp"
37 #include "gc/shared/taskqueue.hpp"
38 #include "memory/freeBlockDictionary.hpp"
39 #include "memory/iterator.hpp"
40 #include "memory/virtualspace.hpp"
41 #include "runtime/mutexLocker.hpp"
42 #include "services/memoryService.hpp"
43 #include "utilities/bitMap.hpp"
44 #include "utilities/stack.hpp"
45
46 // ConcurrentMarkSweepGeneration is in support of a concurrent
47 // mark-sweep old generation in the Detlefs-Printezis--Boehm-Demers-Schenker
48 // style. We assume, for now, that this generation is always the
49 // seniormost generation and for simplicity
50 // in the first implementation, that this generation is a single compactible
51 // space. Neither of these restrictions appears essential, and will be
52 // relaxed in the future when more time is available to implement the
53 // greater generality (and there's a need for it).
54 //
55 // Concurrent mode failures are currently handled by
56 // means of a sliding mark-compact.
57
291 HeapWord** array() { return _array; }
292 void set_array(HeapWord** a) { _array = a; }
293
294 size_t capacity() { return _capacity; }
295 void set_capacity(size_t c) { _capacity = c; }
296
297 size_t end() {
298 assert(_index <= capacity(),
299 "_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT "): out of bounds",
300 _index, _capacity);
301 return _index;
302 } // exclusive
303
304 HeapWord* nth(size_t n) {
305 assert(n < end(), "Out of bounds access");
306 return _array[n];
307 }
308
309 void reset() {
310 _index = 0;
311 if (_overflows > 0 && PrintCMSStatistics > 1) {
312 warning("CMS: ChunkArray[" SIZE_FORMAT "] overflowed " SIZE_FORMAT " times",
313 _capacity, _overflows);
314 }
315 _overflows = 0;
316 }
317
318 void record_sample(HeapWord* p, size_t sz) {
319 // For now we do not do anything with the size
320 if (_index < _capacity) {
321 _array[_index++] = p;
322 } else {
323 ++_overflows;
324 assert(_index == _capacity,
325 "_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT
326 "): out of bounds at overflow#" SIZE_FORMAT,
327 _index, _capacity, _overflows);
328 }
329 }
330 };
331
332 //
333 // Timing, allocation and promotion statistics for gc scheduling and incremental
434
435 // Returns bytes directly allocated per second of wall clock time.
436 double cms_allocation_rate() const;
437
438 // Rate at which space in the cms generation is being consumed (sum of the
439 // above two).
440 double cms_consumption_rate() const;
441
442 // Returns an estimate of the number of seconds until the cms generation will
443 // fill up, assuming no collection work is done.
444 double time_until_cms_gen_full() const;
445
446 // Returns an estimate of the number of seconds remaining until
447 // the cms generation collection should start.
448 double time_until_cms_start() const;
449
450 // End of higher level statistics.
451
452 // Debugging.
453 void print_on(outputStream* st) const PRODUCT_RETURN;
454 void print() const { print_on(gclog_or_tty); }
455 };
456
457 // A closure related to weak references processing which
458 // we embed in the CMSCollector, since we need to pass
459 // it to the reference processor for secondary filtering
460 // of references based on reachability of referent;
461 // see role of _is_alive_non_header closure in the
462 // ReferenceProcessor class.
463 // For objects in the CMS generation, this closure checks
464 // if the object is "live" (reachable). Used in weak
465 // reference processing.
466 class CMSIsAliveClosure: public BoolObjectClosure {
467 const MemRegion _span;
468 const CMSBitMap* _bit_map;
469
470 friend class CMSCollector;
471 public:
472 CMSIsAliveClosure(MemRegion span,
473 CMSBitMap* bit_map):
474 _span(span),
909
910 // Support for parallel remark of survivor space
911 void* get_data_recorder(int thr_num);
912 void sample_eden_chunk();
913
914 CMSBitMap* markBitMap() { return &_markBitMap; }
915 void directAllocated(HeapWord* start, size_t size);
916
917 // Main CMS steps and related support
918 void checkpointRootsInitial();
919 bool markFromRoots(); // a return value of false indicates failure
920 // due to stack overflow
921 void preclean();
922 void checkpointRootsFinal();
923 void sweep();
924
925 // Check that the currently executing thread is the expected
926 // one (foreground collector or background collector).
927 static void check_correct_thread_executing() PRODUCT_RETURN;
928
929 bool is_cms_reachable(HeapWord* addr);
930
931 // Performance Counter Support
932 CollectorCounters* counters() { return _gc_counters; }
933
934 // Timer stuff
935 void startTimer() { assert(!_timer.is_active(), "Error"); _timer.start(); }
936 void stopTimer() { assert( _timer.is_active(), "Error"); _timer.stop(); }
937 void resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset(); }
938 double timerValue() { assert(!_timer.is_active(), "Error"); return _timer.seconds(); }
939
940 int yields() { return _numYields; }
941 void resetYields() { _numYields = 0; }
942 void incrementYields() { _numYields++; }
943 void resetNumDirtyCards() { _numDirtyCards = 0; }
944 void incrementNumDirtyCards(size_t num) { _numDirtyCards += num; }
945 size_t numDirtyCards() { return _numDirtyCards; }
946
947 static bool foregroundGCShouldWait() { return _foregroundGCShouldWait; }
948 static void set_foregroundGCShouldWait(bool v) { _foregroundGCShouldWait = v; }
949 static bool foregroundGCIsActive() { return _foregroundGCIsActive; }
950 static void set_foregroundGCIsActive(bool v) { _foregroundGCIsActive = v; }
951 size_t sweep_count() const { return _sweep_count; }
952 void increment_sweep_count() { _sweep_count++; }
953
954 // Timers/stats for gc scheduling and incremental mode pacing.
955 CMSStats& stats() { return _stats; }
956
957 // Adaptive size policy
958 AdaptiveSizePolicy* size_policy();
959
960 static void print_on_error(outputStream* st);
961
962 // Debugging
963 void verify();
964 bool verify_after_remark(bool silent = VerifySilently);
965 void verify_ok_to_terminate() const PRODUCT_RETURN;
966 void verify_work_stacks_empty() const PRODUCT_RETURN;
967 void verify_overflow_empty() const PRODUCT_RETURN;
968
969 // Convenience methods in support of debugging
970 static const size_t skip_header_HeapWords() PRODUCT_RETURN0;
971 HeapWord* block_start(const void* p) const PRODUCT_RETURN0;
972
973 // Accessors
974 CMSMarkStack* verification_mark_stack() { return &_markStack; }
975 CMSBitMap* verification_mark_bm() { return &_verification_mark_bm; }
976
977 // Initialization errors
978 bool completed_initialization() { return _completed_initialization; }
979
980 void print_eden_and_survivor_chunk_arrays();
981 };
982
983 class CMSExpansionCause : public AllStatic {
984 public:
1217 // Performance Counters support
1218 virtual void update_counters();
1219 virtual void update_counters(size_t used);
1220 void initialize_performance_counters();
1221 CollectorCounters* counters() { return collector()->counters(); }
1222
1223 // Support for parallel remark of survivor space
1224 void* get_data_recorder(int thr_num) {
1225 //Delegate to collector
1226 return collector()->get_data_recorder(thr_num);
1227 }
1228 void sample_eden_chunk() {
1229 //Delegate to collector
1230 return collector()->sample_eden_chunk();
1231 }
1232
1233 // Printing
1234 const char* name() const;
1235 virtual const char* short_name() const { return "CMS"; }
1236 void print() const;
1237 void printOccupancy(const char* s);
1238
1239 // Resize the generation after a compacting GC. The
1240 // generation can be treated as a contiguous space
1241 // after the compaction.
1242 virtual void compute_new_size();
1243 // Resize the generation after a non-compacting
1244 // collection.
1245 void compute_new_size_free_list();
1246 };
1247
1248 //
1249 // Closures of various sorts used by CMS to accomplish its work
1250 //
1251
1252 // This closure is used to do concurrent marking from the roots
1253 // following the first checkpoint.
1254 class MarkFromRootsClosure: public BitMapClosure {
1255 CMSCollector* _collector;
1256 MemRegion _span;
1257 CMSBitMap* _bitMap;
|
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_CMS_CONCURRENTMARKSWEEPGENERATION_HPP
26 #define SHARE_VM_GC_CMS_CONCURRENTMARKSWEEPGENERATION_HPP
27
28 #include "gc/cms/cmsOopClosures.hpp"
29 #include "gc/cms/gSpaceCounters.hpp"
30 #include "gc/cms/yieldingWorkgroup.hpp"
31 #include "gc/shared/cardGeneration.hpp"
32 #include "gc/shared/gcHeapSummary.hpp"
33 #include "gc/shared/gcStats.hpp"
34 #include "gc/shared/gcWhen.hpp"
35 #include "gc/shared/generationCounters.hpp"
36 #include "gc/shared/space.hpp"
37 #include "gc/shared/taskqueue.hpp"
38 #include "logging/log.hpp"
39 #include "memory/freeBlockDictionary.hpp"
40 #include "memory/iterator.hpp"
41 #include "memory/virtualspace.hpp"
42 #include "runtime/mutexLocker.hpp"
43 #include "services/memoryService.hpp"
44 #include "utilities/bitMap.hpp"
45 #include "utilities/stack.hpp"
46
47 // ConcurrentMarkSweepGeneration is in support of a concurrent
48 // mark-sweep old generation in the Detlefs-Printezis--Boehm-Demers-Schenker
49 // style. We assume, for now, that this generation is always the
50 // seniormost generation and for simplicity
51 // in the first implementation, that this generation is a single compactible
52 // space. Neither of these restrictions appears essential, and will be
53 // relaxed in the future when more time is available to implement the
54 // greater generality (and there's a need for it).
55 //
56 // Concurrent mode failures are currently handled by
57 // means of a sliding mark-compact.
58
292 HeapWord** array() { return _array; }
293 void set_array(HeapWord** a) { _array = a; }
294
295 size_t capacity() { return _capacity; }
296 void set_capacity(size_t c) { _capacity = c; }
297
298 size_t end() {
299 assert(_index <= capacity(),
300 "_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT "): out of bounds",
301 _index, _capacity);
302 return _index;
303 } // exclusive
304
305 HeapWord* nth(size_t n) {
306 assert(n < end(), "Out of bounds access");
307 return _array[n];
308 }
309
310 void reset() {
311 _index = 0;
312 if (_overflows > 0) {
313 log_trace(gc, stats)("CMS: ChunkArray[" SIZE_FORMAT "] overflowed " SIZE_FORMAT " times", _capacity, _overflows);
314 }
315 _overflows = 0;
316 }
317
318 void record_sample(HeapWord* p, size_t sz) {
319 // For now we do not do anything with the size
320 if (_index < _capacity) {
321 _array[_index++] = p;
322 } else {
323 ++_overflows;
324 assert(_index == _capacity,
325 "_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT
326 "): out of bounds at overflow#" SIZE_FORMAT,
327 _index, _capacity, _overflows);
328 }
329 }
330 };
331
332 //
333 // Timing, allocation and promotion statistics for gc scheduling and incremental
434
435 // Returns bytes directly allocated per second of wall clock time.
436 double cms_allocation_rate() const;
437
438 // Rate at which space in the cms generation is being consumed (sum of the
439 // above two).
440 double cms_consumption_rate() const;
441
442 // Returns an estimate of the number of seconds until the cms generation will
443 // fill up, assuming no collection work is done.
444 double time_until_cms_gen_full() const;
445
446 // Returns an estimate of the number of seconds remaining until
447 // the cms generation collection should start.
448 double time_until_cms_start() const;
449
450 // End of higher level statistics.
451
452 // Debugging.
453 void print_on(outputStream* st) const PRODUCT_RETURN;
454 void print() const { print_on(tty); }
455 };
456
457 // A closure related to weak references processing which
458 // we embed in the CMSCollector, since we need to pass
459 // it to the reference processor for secondary filtering
460 // of references based on reachability of referent;
461 // see role of _is_alive_non_header closure in the
462 // ReferenceProcessor class.
463 // For objects in the CMS generation, this closure checks
464 // if the object is "live" (reachable). Used in weak
465 // reference processing.
466 class CMSIsAliveClosure: public BoolObjectClosure {
467 const MemRegion _span;
468 const CMSBitMap* _bit_map;
469
470 friend class CMSCollector;
471 public:
472 CMSIsAliveClosure(MemRegion span,
473 CMSBitMap* bit_map):
474 _span(span),
909
910 // Support for parallel remark of survivor space
911 void* get_data_recorder(int thr_num);
912 void sample_eden_chunk();
913
914 CMSBitMap* markBitMap() { return &_markBitMap; }
915 void directAllocated(HeapWord* start, size_t size);
916
917 // Main CMS steps and related support
918 void checkpointRootsInitial();
919 bool markFromRoots(); // a return value of false indicates failure
920 // due to stack overflow
921 void preclean();
922 void checkpointRootsFinal();
923 void sweep();
924
925 // Check that the currently executing thread is the expected
926 // one (foreground collector or background collector).
927 static void check_correct_thread_executing() PRODUCT_RETURN;
928
929 // Performance Counter Support
930 CollectorCounters* counters() { return _gc_counters; }
931
932 // Timer stuff
933 void startTimer() { assert(!_timer.is_active(), "Error"); _timer.start(); }
934 void stopTimer() { assert( _timer.is_active(), "Error"); _timer.stop(); }
935 void resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset(); }
936 double timerValue() { assert(!_timer.is_active(), "Error"); return _timer.seconds() * 1000; }
937
938 int yields() { return _numYields; }
939 void resetYields() { _numYields = 0; }
940 void incrementYields() { _numYields++; }
941 void resetNumDirtyCards() { _numDirtyCards = 0; }
942 void incrementNumDirtyCards(size_t num) { _numDirtyCards += num; }
943 size_t numDirtyCards() { return _numDirtyCards; }
944
945 static bool foregroundGCShouldWait() { return _foregroundGCShouldWait; }
946 static void set_foregroundGCShouldWait(bool v) { _foregroundGCShouldWait = v; }
947 static bool foregroundGCIsActive() { return _foregroundGCIsActive; }
948 static void set_foregroundGCIsActive(bool v) { _foregroundGCIsActive = v; }
949 size_t sweep_count() const { return _sweep_count; }
950 void increment_sweep_count() { _sweep_count++; }
951
952 // Timers/stats for gc scheduling and incremental mode pacing.
953 CMSStats& stats() { return _stats; }
954
955 // Adaptive size policy
956 AdaptiveSizePolicy* size_policy();
957
958 static void print_on_error(outputStream* st);
959
960 // Debugging
961 void verify();
962 bool verify_after_remark();
963 void verify_ok_to_terminate() const PRODUCT_RETURN;
964 void verify_work_stacks_empty() const PRODUCT_RETURN;
965 void verify_overflow_empty() const PRODUCT_RETURN;
966
967 // Convenience methods in support of debugging
968 static const size_t skip_header_HeapWords() PRODUCT_RETURN0;
969 HeapWord* block_start(const void* p) const PRODUCT_RETURN0;
970
971 // Accessors
972 CMSMarkStack* verification_mark_stack() { return &_markStack; }
973 CMSBitMap* verification_mark_bm() { return &_verification_mark_bm; }
974
975 // Initialization errors
976 bool completed_initialization() { return _completed_initialization; }
977
978 void print_eden_and_survivor_chunk_arrays();
979 };
980
981 class CMSExpansionCause : public AllStatic {
982 public:
1215 // Performance Counters support
1216 virtual void update_counters();
1217 virtual void update_counters(size_t used);
1218 void initialize_performance_counters();
1219 CollectorCounters* counters() { return collector()->counters(); }
1220
1221 // Support for parallel remark of survivor space
1222 void* get_data_recorder(int thr_num) {
1223 //Delegate to collector
1224 return collector()->get_data_recorder(thr_num);
1225 }
1226 void sample_eden_chunk() {
1227 //Delegate to collector
1228 return collector()->sample_eden_chunk();
1229 }
1230
1231 // Printing
1232 const char* name() const;
1233 virtual const char* short_name() const { return "CMS"; }
1234 void print() const;
1235
1236 // Resize the generation after a compacting GC. The
1237 // generation can be treated as a contiguous space
1238 // after the compaction.
1239 virtual void compute_new_size();
1240 // Resize the generation after a non-compacting
1241 // collection.
1242 void compute_new_size_free_list();
1243 };
1244
1245 //
1246 // Closures of various sorts used by CMS to accomplish its work
1247 //
1248
1249 // This closure is used to do concurrent marking from the roots
1250 // following the first checkpoint.
1251 class MarkFromRootsClosure: public BitMapClosure {
1252 CMSCollector* _collector;
1253 MemRegion _span;
1254 CMSBitMap* _bitMap;
|