716 enum CMS_op_type {
717 CMS_op_checkpointRootsInitial,
718 CMS_op_checkpointRootsFinal
719 };
720
721 void do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause);
722 bool stop_world_and_do(CMS_op_type op);
723
724 OopTaskQueueSet* task_queues() { return _task_queues; }
725 int* hash_seed(int i) { return &_hash_seed[i]; }
726 YieldingFlexibleWorkGang* conc_workers() { return _conc_workers; }
727
728 // Support for parallelizing Eden rescan in CMS remark phase
729 void sample_eden(); // ... sample Eden space top
730
731 private:
732 // Support for parallelizing young gen rescan in CMS remark phase
733 Generation* _young_gen; // the younger gen
734 HeapWord** _top_addr; // ... Top of Eden
735 HeapWord** _end_addr; // ... End of Eden
736 HeapWord** _eden_chunk_array; // ... Eden partitioning array
737 size_t _eden_chunk_index; // ... top (exclusive) of array
738 size_t _eden_chunk_capacity; // ... max entries in array
739
740 // Support for parallelizing survivor space rescan
741 HeapWord** _survivor_chunk_array;
742 size_t _survivor_chunk_index;
743 size_t _survivor_chunk_capacity;
744 size_t* _cursor;
745 ChunkArray* _survivor_plab_array;
746
747 // Support for marking stack overflow handling
748 bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack);
749 bool par_take_from_overflow_list(size_t num,
750 OopTaskQueue* to_work_q,
751 int no_of_gc_threads);
752 void push_on_overflow_list(oop p);
753 void par_push_on_overflow_list(oop p);
754 // the following is, obviously, not, in general, "MT-stable"
755 bool overflow_list_is_empty() const;
913
914 // Adjust size of underlying generation
915 void compute_new_size();
916
917 // GC prologue and epilogue
918 void gc_prologue(bool full);
919 void gc_epilogue(bool full);
920
921 jlong time_of_last_gc(jlong now) {
922 if (_collectorState <= Idling) {
923 // gc not in progress
924 return _time_of_last_gc;
925 } else {
926 // collection in progress
927 return now;
928 }
929 }
930
931 // Support for parallel remark of survivor space
932 void* get_data_recorder(int thr_num);
933
934 CMSBitMap* markBitMap() { return &_markBitMap; }
935 void directAllocated(HeapWord* start, size_t size);
936
937 // main CMS steps and related support
938 void checkpointRootsInitial(bool asynch);
939 bool markFromRoots(bool asynch); // a return value of false indicates failure
940 // due to stack overflow
941 void preclean();
942 void checkpointRootsFinal(bool asynch, bool clear_all_soft_refs,
943 bool init_mark_was_synchronous);
944 void sweep(bool asynch);
945
946 // Check that the currently executing thread is the expected
947 // one (foreground collector or background collector).
948 static void check_correct_thread_executing() PRODUCT_RETURN;
949 // XXXPERM void print_statistics() PRODUCT_RETURN;
950
951 bool is_cms_reachable(HeapWord* addr);
952
990
991 static void print_on_error(outputStream* st);
992
993 // debugging
994 void verify();
995 bool verify_after_remark(bool silent = VerifySilently);
996 void verify_ok_to_terminate() const PRODUCT_RETURN;
997 void verify_work_stacks_empty() const PRODUCT_RETURN;
998 void verify_overflow_empty() const PRODUCT_RETURN;
999
1000 // convenience methods in support of debugging
1001 static const size_t skip_header_HeapWords() PRODUCT_RETURN0;
1002 HeapWord* block_start(const void* p) const PRODUCT_RETURN0;
1003
1004 // accessors
1005 CMSMarkStack* verification_mark_stack() { return &_markStack; }
1006 CMSBitMap* verification_mark_bm() { return &_verification_mark_bm; }
1007
1008 // Initialization errors
1009 bool completed_initialization() { return _completed_initialization; }
1010 };
1011
1012 class CMSExpansionCause : public AllStatic {
1013 public:
1014 enum Cause {
1015 _no_expansion,
1016 _satisfy_free_ratio,
1017 _satisfy_promotion,
1018 _satisfy_allocation,
1019 _allocate_par_lab,
1020 _allocate_par_spooling_space,
1021 _adaptive_size_policy
1022 };
1023 // Return a string describing the cause of the expansion.
1024 static const char* to_string(CMSExpansionCause::Cause cause);
1025 };
1026
1027 class ConcurrentMarkSweepGeneration: public CardGeneration {
1028 friend class VMStructs;
1029 friend class ConcurrentMarkSweepThread;
1280 // the space.
1281 FreeChunk* find_chunk_at_end();
1282
1283 void post_compact();
1284
1285 // Debugging
1286 void prepare_for_verify();
1287 void verify();
1288 void print_statistics() PRODUCT_RETURN;
1289
1290 // Performance Counters support
1291 virtual void update_counters();
1292 virtual void update_counters(size_t used);
1293 void initialize_performance_counters();
1294 CollectorCounters* counters() { return collector()->counters(); }
1295
1296 // Support for parallel remark of survivor space
1297 void* get_data_recorder(int thr_num) {
1298 //Delegate to collector
1299 return collector()->get_data_recorder(thr_num);
1300 }
1301
1302 // Printing
1303 const char* name() const;
1304 virtual const char* short_name() const { return "CMS"; }
1305 void print() const;
1306 void printOccupancy(const char* s);
1307 bool must_be_youngest() const { return false; }
1308 bool must_be_oldest() const { return true; }
1309
1310 // Resize the generation after a compacting GC. The
1311 // generation can be treated as a contiguous space
1312 // after the compaction.
1313 virtual void compute_new_size();
1314 // Resize the generation after a non-compacting
1315 // collection.
1316 void compute_new_size_free_list();
1317
1318 CollectionTypes debug_collection_type() { return _debug_collection_type; }
1319 void rotate_debug_collection_type();
|
716 enum CMS_op_type {
717 CMS_op_checkpointRootsInitial,
718 CMS_op_checkpointRootsFinal
719 };
720
721 void do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause);
722 bool stop_world_and_do(CMS_op_type op);
723
724 OopTaskQueueSet* task_queues() { return _task_queues; }
725 int* hash_seed(int i) { return &_hash_seed[i]; }
726 YieldingFlexibleWorkGang* conc_workers() { return _conc_workers; }
727
728 // Support for parallelizing Eden rescan in CMS remark phase
729 void sample_eden(); // ... sample Eden space top
730
731 private:
732 // Support for parallelizing young gen rescan in CMS remark phase
733 Generation* _young_gen; // the younger gen
734 HeapWord** _top_addr; // ... Top of Eden
735 HeapWord** _end_addr; // ... End of Eden
736 Mutex* _eden_chunk_lock;
737 HeapWord** _eden_chunk_array; // ... Eden partitioning array
738 size_t _eden_chunk_index; // ... top (exclusive) of array
739 size_t _eden_chunk_capacity; // ... max entries in array
740
741 // Support for parallelizing survivor space rescan
742 HeapWord** _survivor_chunk_array;
743 size_t _survivor_chunk_index;
744 size_t _survivor_chunk_capacity;
745 size_t* _cursor;
746 ChunkArray* _survivor_plab_array;
747
748 // Support for marking stack overflow handling
749 bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack);
750 bool par_take_from_overflow_list(size_t num,
751 OopTaskQueue* to_work_q,
752 int no_of_gc_threads);
753 void push_on_overflow_list(oop p);
754 void par_push_on_overflow_list(oop p);
755 // the following is, obviously, not, in general, "MT-stable"
756 bool overflow_list_is_empty() const;
914
915 // Adjust size of underlying generation
916 void compute_new_size();
917
918 // GC prologue and epilogue
919 void gc_prologue(bool full);
920 void gc_epilogue(bool full);
921
922 jlong time_of_last_gc(jlong now) {
923 if (_collectorState <= Idling) {
924 // gc not in progress
925 return _time_of_last_gc;
926 } else {
927 // collection in progress
928 return now;
929 }
930 }
931
932 // Support for parallel remark of survivor space
933 void* get_data_recorder(int thr_num);
934 void sample_eden_chunk();
935
936 CMSBitMap* markBitMap() { return &_markBitMap; }
937 void directAllocated(HeapWord* start, size_t size);
938
939 // main CMS steps and related support
940 void checkpointRootsInitial(bool asynch);
941 bool markFromRoots(bool asynch); // a return value of false indicates failure
942 // due to stack overflow
943 void preclean();
944 void checkpointRootsFinal(bool asynch, bool clear_all_soft_refs,
945 bool init_mark_was_synchronous);
946 void sweep(bool asynch);
947
948 // Check that the currently executing thread is the expected
949 // one (foreground collector or background collector).
950 static void check_correct_thread_executing() PRODUCT_RETURN;
951 // XXXPERM void print_statistics() PRODUCT_RETURN;
952
953 bool is_cms_reachable(HeapWord* addr);
954
992
993 static void print_on_error(outputStream* st);
994
995 // debugging
996 void verify();
997 bool verify_after_remark(bool silent = VerifySilently);
998 void verify_ok_to_terminate() const PRODUCT_RETURN;
999 void verify_work_stacks_empty() const PRODUCT_RETURN;
1000 void verify_overflow_empty() const PRODUCT_RETURN;
1001
1002 // convenience methods in support of debugging
1003 static const size_t skip_header_HeapWords() PRODUCT_RETURN0;
1004 HeapWord* block_start(const void* p) const PRODUCT_RETURN0;
1005
1006 // accessors
1007 CMSMarkStack* verification_mark_stack() { return &_markStack; }
1008 CMSBitMap* verification_mark_bm() { return &_verification_mark_bm; }
1009
1010 // Initialization errors
1011 bool completed_initialization() { return _completed_initialization; }
1012
1013 void print_eden_and_survivor_chunk_arrays();
1014 };
1015
1016 class CMSExpansionCause : public AllStatic {
1017 public:
1018 enum Cause {
1019 _no_expansion,
1020 _satisfy_free_ratio,
1021 _satisfy_promotion,
1022 _satisfy_allocation,
1023 _allocate_par_lab,
1024 _allocate_par_spooling_space,
1025 _adaptive_size_policy
1026 };
1027 // Return a string describing the cause of the expansion.
1028 static const char* to_string(CMSExpansionCause::Cause cause);
1029 };
1030
1031 class ConcurrentMarkSweepGeneration: public CardGeneration {
1032 friend class VMStructs;
1033 friend class ConcurrentMarkSweepThread;
1284 // the space.
1285 FreeChunk* find_chunk_at_end();
1286
1287 void post_compact();
1288
1289 // Debugging
1290 void prepare_for_verify();
1291 void verify();
1292 void print_statistics() PRODUCT_RETURN;
1293
1294 // Performance Counters support
1295 virtual void update_counters();
1296 virtual void update_counters(size_t used);
1297 void initialize_performance_counters();
1298 CollectorCounters* counters() { return collector()->counters(); }
1299
1300 // Support for parallel remark of survivor space
1301 void* get_data_recorder(int thr_num) {
1302 //Delegate to collector
1303 return collector()->get_data_recorder(thr_num);
1304 }
1305 void sample_eden_chunk() {
1306 //Delegate to collector
1307 return collector()->sample_eden_chunk();
1308 }
1309
1310 // Printing
1311 const char* name() const;
1312 virtual const char* short_name() const { return "CMS"; }
1313 void print() const;
1314 void printOccupancy(const char* s);
1315 bool must_be_youngest() const { return false; }
1316 bool must_be_oldest() const { return true; }
1317
1318 // Resize the generation after a compacting GC. The
1319 // generation can be treated as a contiguous space
1320 // after the compaction.
1321 virtual void compute_new_size();
1322 // Resize the generation after a non-compacting
1323 // collection.
1324 void compute_new_size_free_list();
1325
1326 CollectionTypes debug_collection_type() { return _debug_collection_type; }
1327 void rotate_debug_collection_type();
|