719 };
720
721 void do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause);
722 bool stop_world_and_do(CMS_op_type op);
723
724 OopTaskQueueSet* task_queues() { return _task_queues; }
725 int* hash_seed(int i) { return &_hash_seed[i]; }
726 YieldingFlexibleWorkGang* conc_workers() { return _conc_workers; }
727
728 // Support for parallelizing Eden rescan in CMS remark phase
729 void sample_eden(); // ... sample Eden space top
730
731 private:
732 // Support for parallelizing young gen rescan in CMS remark phase
733 Generation* _young_gen; // the younger gen
734 HeapWord** _top_addr; // ... Top of Eden
735 HeapWord** _end_addr; // ... End of Eden
736 HeapWord** _eden_chunk_array; // ... Eden partitioning array
737 size_t _eden_chunk_index; // ... top (exclusive) of array
738 size_t _eden_chunk_capacity; // ... max entries in array
739
740 // Support for parallelizing survivor space rescan
741 HeapWord** _survivor_chunk_array;
742 size_t _survivor_chunk_index;
743 size_t _survivor_chunk_capacity;
744 size_t* _cursor;
745 ChunkArray* _survivor_plab_array;
746
747 // Support for marking stack overflow handling
748 bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack);
749 bool par_take_from_overflow_list(size_t num,
750 OopTaskQueue* to_work_q,
751 int no_of_gc_threads);
752 void push_on_overflow_list(oop p);
753 void par_push_on_overflow_list(oop p);
754 // the following is, obviously, not, in general, "MT-stable"
755 bool overflow_list_is_empty() const;
756
757 void preserve_mark_if_necessary(oop p);
758 void par_preserve_mark_if_necessary(oop p);
913
914 // Adjust size of underlying generation
915 void compute_new_size();
916
917 // GC prologue and epilogue
918 void gc_prologue(bool full);
919 void gc_epilogue(bool full);
920
921 jlong time_of_last_gc(jlong now) {
922 if (_collectorState <= Idling) {
923 // gc not in progress
924 return _time_of_last_gc;
925 } else {
926 // collection in progress
927 return now;
928 }
929 }
930
931 // Support for parallel remark of survivor space
932 void* get_data_recorder(int thr_num);
933
934 CMSBitMap* markBitMap() { return &_markBitMap; }
935 void directAllocated(HeapWord* start, size_t size);
936
937 // main CMS steps and related support
938 void checkpointRootsInitial(bool asynch);
939 bool markFromRoots(bool asynch); // a return value of false indicates failure
940 // due to stack overflow
941 void preclean();
942 void checkpointRootsFinal(bool asynch, bool clear_all_soft_refs,
943 bool init_mark_was_synchronous);
944 void sweep(bool asynch);
945
946 // Check that the currently executing thread is the expected
947 // one (foreground collector or background collector).
948 static void check_correct_thread_executing() PRODUCT_RETURN;
949 // XXXPERM void print_statistics() PRODUCT_RETURN;
950
951 bool is_cms_reachable(HeapWord* addr);
952
990
991 static void print_on_error(outputStream* st);
992
993 // debugging
994 void verify();
995 bool verify_after_remark(bool silent = VerifySilently);
996 void verify_ok_to_terminate() const PRODUCT_RETURN;
997 void verify_work_stacks_empty() const PRODUCT_RETURN;
998 void verify_overflow_empty() const PRODUCT_RETURN;
999
1000 // convenience methods in support of debugging
1001 static const size_t skip_header_HeapWords() PRODUCT_RETURN0;
1002 HeapWord* block_start(const void* p) const PRODUCT_RETURN0;
1003
1004 // accessors
1005 CMSMarkStack* verification_mark_stack() { return &_markStack; }
1006 CMSBitMap* verification_mark_bm() { return &_verification_mark_bm; }
1007
1008 // Initialization errors
1009 bool completed_initialization() { return _completed_initialization; }
1010 };
1011
1012 class CMSExpansionCause : public AllStatic {
1013 public:
1014 enum Cause {
1015 _no_expansion,
1016 _satisfy_free_ratio,
1017 _satisfy_promotion,
1018 _satisfy_allocation,
1019 _allocate_par_lab,
1020 _allocate_par_spooling_space,
1021 _adaptive_size_policy
1022 };
1023 // Return a string describing the cause of the expansion.
1024 static const char* to_string(CMSExpansionCause::Cause cause);
1025 };
1026
1027 class ConcurrentMarkSweepGeneration: public CardGeneration {
1028 friend class VMStructs;
1029 friend class ConcurrentMarkSweepThread;
1280 // the space.
1281 FreeChunk* find_chunk_at_end();
1282
1283 void post_compact();
1284
1285 // Debugging
1286 void prepare_for_verify();
1287 void verify();
1288 void print_statistics() PRODUCT_RETURN;
1289
1290 // Performance Counters support
1291 virtual void update_counters();
1292 virtual void update_counters(size_t used);
1293 void initialize_performance_counters();
1294 CollectorCounters* counters() { return collector()->counters(); }
1295
1296 // Support for parallel remark of survivor space
1297 void* get_data_recorder(int thr_num) {
1298 //Delegate to collector
1299 return collector()->get_data_recorder(thr_num);
1300 }
1301
1302 // Printing
1303 const char* name() const;
1304 virtual const char* short_name() const { return "CMS"; }
1305 void print() const;
1306 void printOccupancy(const char* s);
1307 bool must_be_youngest() const { return false; }
1308 bool must_be_oldest() const { return true; }
1309
1310 // Resize the generation after a compacting GC. The
1311 // generation can be treated as a contiguous space
1312 // after the compaction.
1313 virtual void compute_new_size();
1314 // Resize the generation after a non-compacting
1315 // collection.
1316 void compute_new_size_free_list();
1317
1318 CollectionTypes debug_collection_type() { return _debug_collection_type; }
1319 void rotate_debug_collection_type();
|
719 };
720
721 void do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause);
722 bool stop_world_and_do(CMS_op_type op);
723
724 OopTaskQueueSet* task_queues() { return _task_queues; }
725 int* hash_seed(int i) { return &_hash_seed[i]; }
726 YieldingFlexibleWorkGang* conc_workers() { return _conc_workers; }
727
728 // Support for parallelizing Eden rescan in CMS remark phase
729 void sample_eden(); // ... sample Eden space top
730
731 private:
732 // Support for parallelizing young gen rescan in CMS remark phase
733 Generation* _young_gen; // the younger gen
734 HeapWord** _top_addr; // ... Top of Eden
735 HeapWord** _end_addr; // ... End of Eden
736 HeapWord** _eden_chunk_array; // ... Eden partitioning array
737 size_t _eden_chunk_index; // ... top (exclusive) of array
738 size_t _eden_chunk_capacity; // ... max entries in array
739 // This is meant to be a boolean flag, but jbyte for CAS.
740 jbyte _eden_chunk_sampling_active;
741
742 // Support for parallelizing survivor space rescan
743 HeapWord** _survivor_chunk_array;
744 size_t _survivor_chunk_index;
745 size_t _survivor_chunk_capacity;
746 size_t* _cursor;
747 ChunkArray* _survivor_plab_array;
748
749 // Support for marking stack overflow handling
750 bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack);
751 bool par_take_from_overflow_list(size_t num,
752 OopTaskQueue* to_work_q,
753 int no_of_gc_threads);
754 void push_on_overflow_list(oop p);
755 void par_push_on_overflow_list(oop p);
756 // the following is, obviously, not, in general, "MT-stable"
757 bool overflow_list_is_empty() const;
758
759 void preserve_mark_if_necessary(oop p);
760 void par_preserve_mark_if_necessary(oop p);
915
916 // Adjust size of underlying generation
917 void compute_new_size();
918
919 // GC prologue and epilogue
920 void gc_prologue(bool full);
921 void gc_epilogue(bool full);
922
923 jlong time_of_last_gc(jlong now) {
924 if (_collectorState <= Idling) {
925 // gc not in progress
926 return _time_of_last_gc;
927 } else {
928 // collection in progress
929 return now;
930 }
931 }
932
933 // Support for parallel remark of survivor space
934 void* get_data_recorder(int thr_num);
935 void sample_eden_chunk();
936
937 CMSBitMap* markBitMap() { return &_markBitMap; }
938 void directAllocated(HeapWord* start, size_t size);
939
940 // main CMS steps and related support
941 void checkpointRootsInitial(bool asynch);
942 bool markFromRoots(bool asynch); // a return value of false indicates failure
943 // due to stack overflow
944 void preclean();
945 void checkpointRootsFinal(bool asynch, bool clear_all_soft_refs,
946 bool init_mark_was_synchronous);
947 void sweep(bool asynch);
948
949 // Check that the currently executing thread is the expected
950 // one (foreground collector or background collector).
951 static void check_correct_thread_executing() PRODUCT_RETURN;
952 // XXXPERM void print_statistics() PRODUCT_RETURN;
953
954 bool is_cms_reachable(HeapWord* addr);
955
993
994 static void print_on_error(outputStream* st);
995
996 // debugging
997 void verify();
998 bool verify_after_remark(bool silent = VerifySilently);
999 void verify_ok_to_terminate() const PRODUCT_RETURN;
1000 void verify_work_stacks_empty() const PRODUCT_RETURN;
1001 void verify_overflow_empty() const PRODUCT_RETURN;
1002
1003 // convenience methods in support of debugging
1004 static const size_t skip_header_HeapWords() PRODUCT_RETURN0;
1005 HeapWord* block_start(const void* p) const PRODUCT_RETURN0;
1006
1007 // accessors
1008 CMSMarkStack* verification_mark_stack() { return &_markStack; }
1009 CMSBitMap* verification_mark_bm() { return &_verification_mark_bm; }
1010
1011 // Initialization errors
1012 bool completed_initialization() { return _completed_initialization; }
1013
1014 void print_eden_and_survivor_chunk_arrays();
1015 };
1016
1017 class CMSExpansionCause : public AllStatic {
1018 public:
1019 enum Cause {
1020 _no_expansion,
1021 _satisfy_free_ratio,
1022 _satisfy_promotion,
1023 _satisfy_allocation,
1024 _allocate_par_lab,
1025 _allocate_par_spooling_space,
1026 _adaptive_size_policy
1027 };
1028 // Return a string describing the cause of the expansion.
1029 static const char* to_string(CMSExpansionCause::Cause cause);
1030 };
1031
1032 class ConcurrentMarkSweepGeneration: public CardGeneration {
1033 friend class VMStructs;
1034 friend class ConcurrentMarkSweepThread;
1285 // the space.
1286 FreeChunk* find_chunk_at_end();
1287
1288 void post_compact();
1289
1290 // Debugging
1291 void prepare_for_verify();
1292 void verify();
1293 void print_statistics() PRODUCT_RETURN;
1294
1295 // Performance Counters support
1296 virtual void update_counters();
1297 virtual void update_counters(size_t used);
1298 void initialize_performance_counters();
1299 CollectorCounters* counters() { return collector()->counters(); }
1300
1301 // Support for parallel remark of survivor space
1302 void* get_data_recorder(int thr_num) {
1303 //Delegate to collector
1304 return collector()->get_data_recorder(thr_num);
1305 }
1306 void sample_eden_chunk() {
1307 //Delegate to collector
1308 return collector()->sample_eden_chunk();
1309 }
1310
1311 // Printing
1312 const char* name() const;
1313 virtual const char* short_name() const { return "CMS"; }
1314 void print() const;
1315 void printOccupancy(const char* s);
1316 bool must_be_youngest() const { return false; }
1317 bool must_be_oldest() const { return true; }
1318
1319 // Resize the generation after a compacting GC. The
1320 // generation can be treated as a contiguous space
1321 // after the compaction.
1322 virtual void compute_new_size();
1323 // Resize the generation after a non-compacting
1324 // collection.
1325 void compute_new_size_free_list();
1326
1327 CollectionTypes debug_collection_type() { return _debug_collection_type; }
1328 void rotate_debug_collection_type();
|