13 *
14 * You should have received a copy of the GNU General Public License version
15 * 2 along with this work; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
21 *
22 */
23
24 #ifndef SHARE_GC_PARALLEL_PSPARALLELCOMPACT_HPP
25 #define SHARE_GC_PARALLEL_PSPARALLELCOMPACT_HPP
26
27 #include "gc/parallel/mutableSpace.hpp"
28 #include "gc/parallel/objectStartArray.hpp"
29 #include "gc/parallel/parMarkBitMap.hpp"
30 #include "gc/parallel/parallelScavengeHeap.hpp"
31 #include "gc/shared/collectedHeap.hpp"
32 #include "gc/shared/collectorCounters.hpp"
33 #include "oops/oop.hpp"
34
35 class ParallelScavengeHeap;
36 class PSAdaptiveSizePolicy;
37 class PSYoungGen;
38 class PSOldGen;
39 class ParCompactionManager;
40 class ParallelTaskTerminator;
41 class PSParallelCompact;
42 class GCTaskManager;
43 class GCTaskQueue;
44 class PreGCValues;
45 class MoveAndUpdateClosure;
46 class RefProcTaskExecutor;
47 class ParallelOldTracer;
48 class STWGCTimer;
49
50 // The SplitInfo class holds the information needed to 'split' a source region
51 // so that the live data can be copied to two destination *spaces*. Normally,
52 // all the live data in a region is copied to a single destination space (e.g.,
53 // everything live in a region in eden is copied entirely into the old gen).
54 // However, when the heap is nearly full, all the live data in eden may not fit
55 // into the old gen. Copying only some of the regions from eden to old gen
56 // requires finding a region that does not contain a partial object (i.e., no
57 // live object crosses the region boundary) somewhere near the last object that
58 // does fit into the old gen. Since it's not always possible to find such a
59 // region, splitting is necessary for predictable behavior.
60 //
61 // A region is always split at the end of the partial object. This avoids
62 // additional tests when calculating the new location of a pointer, which is a
|
13 *
14 * You should have received a copy of the GNU General Public License version
15 * 2 along with this work; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
21 *
22 */
23
24 #ifndef SHARE_GC_PARALLEL_PSPARALLELCOMPACT_HPP
25 #define SHARE_GC_PARALLEL_PSPARALLELCOMPACT_HPP
26
27 #include "gc/parallel/mutableSpace.hpp"
28 #include "gc/parallel/objectStartArray.hpp"
29 #include "gc/parallel/parMarkBitMap.hpp"
30 #include "gc/parallel/parallelScavengeHeap.hpp"
31 #include "gc/shared/collectedHeap.hpp"
32 #include "gc/shared/collectorCounters.hpp"
33 #include "gc/shared/taskqueue.hpp"
34 #include "oops/oop.hpp"
35
36 class ParallelScavengeHeap;
37 class PSAdaptiveSizePolicy;
38 class PSYoungGen;
39 class PSOldGen;
40 class ParCompactionManager;
41 class ParallelTaskTerminator;
42 class PSParallelCompact;
43 class PreGCValues;
44 class MoveAndUpdateClosure;
45 class RefProcTaskExecutor;
46 class ParallelOldTracer;
47 class STWGCTimer;
48
49 // The SplitInfo class holds the information needed to 'split' a source region
50 // so that the live data can be copied to two destination *spaces*. Normally,
51 // all the live data in a region is copied to a single destination space (e.g.,
52 // everything live in a region in eden is copied entirely into the old gen).
53 // However, when the heap is nearly full, all the live data in eden may not fit
54 // into the old gen. Copying only some of the regions from eden to old gen
55 // requires finding a region that does not contain a partial object (i.e., no
56 // live object crosses the region boundary) somewhere near the last object that
57 // does fit into the old gen. Since it's not always possible to find such a
58 // region, splitting is necessary for predictable behavior.
59 //
60 // A region is always split at the end of the partial object. This avoids
61 // additional tests when calculating the new location of a pointer, which is a
|
896 // references in the object.
897 //
898 // A current exception is that objects that cross a region boundary are moved
899 // but do not have their references updated. References are not updated because
900 // it cannot easily be determined if the klass pointer KKK for the object AAA
901 // has been updated. KKK likely resides in a region to the left of the region
902 // containing AAA. These AAA's have there references updated at the end in a
903 // clean up phase. See the method PSParallelCompact::update_deferred_objects().
904 // An alternate strategy is being investigated for this deferral of updating.
905 //
906 // Compaction is done on a region basis. A region that is ready to be filled is
907 // put on a ready list and GC threads take region off the list and fill them. A
908 // region is ready to be filled if it empty of live objects. Such a region may
909 // have been initially empty (only contained dead objects) or may have had all
910 // its live objects copied out already. A region that compacts into itself is
911 // also ready for filling. The ready list is initially filled with empty
912 // regions and regions compacting into themselves. There is always at least 1
913 // region that can be put on the ready list. The regions are atomically added
914 // and removed from the ready list.
915
916 class PSParallelCompact : AllStatic {
917 public:
918 // Convenient access to type names.
919 typedef ParMarkBitMap::idx_t idx_t;
920 typedef ParallelCompactData::RegionData RegionData;
921 typedef ParallelCompactData::BlockData BlockData;
922
923 typedef enum {
924 old_space_id, eden_space_id,
925 from_space_id, to_space_id, last_space_id
926 } SpaceId;
927
928 public:
929 // Inline closure decls
930 //
931 class IsAliveClosure: public BoolObjectClosure {
932 public:
933 virtual bool do_object_b(oop p);
934 };
935
936 friend class RefProcTaskProxy;
937 friend class PSParallelCompactTest;
938
939 private:
940 static STWGCTimer _gc_timer;
941 static ParallelOldTracer _gc_tracer;
942 static elapsedTimer _accumulated_time;
943 static unsigned int _total_invocations;
944 static unsigned int _maximum_compaction_gc_num;
945 static jlong _time_of_last_gc; // ms
946 static CollectorCounters* _counters;
|
895 // references in the object.
896 //
897 // A current exception is that objects that cross a region boundary are moved
898 // but do not have their references updated. References are not updated because
899 // it cannot easily be determined if the klass pointer KKK for the object AAA
900 // has been updated. KKK likely resides in a region to the left of the region
901 // containing AAA. These AAA's have there references updated at the end in a
902 // clean up phase. See the method PSParallelCompact::update_deferred_objects().
903 // An alternate strategy is being investigated for this deferral of updating.
904 //
905 // Compaction is done on a region basis. A region that is ready to be filled is
906 // put on a ready list and GC threads take region off the list and fill them. A
907 // region is ready to be filled if it empty of live objects. Such a region may
908 // have been initially empty (only contained dead objects) or may have had all
909 // its live objects copied out already. A region that compacts into itself is
910 // also ready for filling. The ready list is initially filled with empty
911 // regions and regions compacting into themselves. There is always at least 1
912 // region that can be put on the ready list. The regions are atomically added
913 // and removed from the ready list.
914
915 class TaskQueue;
916
917 class PSParallelCompact : AllStatic {
918 public:
919 // Convenient access to type names.
920 typedef ParMarkBitMap::idx_t idx_t;
921 typedef ParallelCompactData::RegionData RegionData;
922 typedef ParallelCompactData::BlockData BlockData;
923
924 typedef enum {
925 old_space_id, eden_space_id,
926 from_space_id, to_space_id, last_space_id
927 } SpaceId;
928
929 struct UpdateDensePrefixTask : public CHeapObj<mtGC> {
930 SpaceId _space_id;
931 size_t _region_index_start;
932 size_t _region_index_end;
933
934 UpdateDensePrefixTask()
935 : _space_id(SpaceId(0)),
936 _region_index_start(0),
937 _region_index_end(0) {
938 }
939
940 UpdateDensePrefixTask(SpaceId space_id,
941 size_t region_index_start,
942 size_t region_index_end)
943 : _space_id(space_id),
944 _region_index_start(region_index_start),
945 _region_index_end(region_index_end) {
946 }
947 };
948
949 public:
950 // Inline closure decls
951 //
952 class IsAliveClosure: public BoolObjectClosure {
953 public:
954 virtual bool do_object_b(oop p);
955 };
956
957 friend class RefProcTaskProxy;
958 friend class PSParallelCompactTest;
959
960 private:
961 static STWGCTimer _gc_timer;
962 static ParallelOldTracer _gc_tracer;
963 static elapsedTimer _accumulated_time;
964 static unsigned int _total_invocations;
965 static unsigned int _maximum_compaction_gc_num;
966 static jlong _time_of_last_gc; // ms
967 static CollectorCounters* _counters;
|
1033
1034 // Summary phase utility routine to fill dead space (if any) at the dense
1035 // prefix boundary. Should only be called if the the dense prefix is
1036 // non-empty.
1037 static void fill_dense_prefix_end(SpaceId id);
1038
1039 static void summarize_spaces_quick();
1040 static void summarize_space(SpaceId id, bool maximum_compaction);
1041 static void summary_phase(ParCompactionManager* cm, bool maximum_compaction);
1042
1043 // Adjust addresses in roots. Does not adjust addresses in heap.
1044 static void adjust_roots(ParCompactionManager* cm);
1045
1046 DEBUG_ONLY(static void write_block_fill_histogram();)
1047
1048 // Move objects to new locations.
1049 static void compact_perm(ParCompactionManager* cm);
1050 static void compact();
1051
1052 // Add available regions to the stack and draining tasks to the task queue.
1053 static void prepare_region_draining_tasks(GCTaskQueue* q,
1054 uint parallel_gc_threads);
1055
1056 // Add dense prefix update tasks to the task queue.
1057 static void enqueue_dense_prefix_tasks(GCTaskQueue* q,
1058 uint parallel_gc_threads);
1059
1060 // Add region stealing tasks to the task queue.
1061 static void enqueue_region_stealing_tasks(
1062 GCTaskQueue* q,
1063 ParallelTaskTerminator* terminator_ptr,
1064 uint parallel_gc_threads);
1065
1066 // If objects are left in eden after a collection, try to move the boundary
1067 // and absorb them into the old gen. Returns true if eden was emptied.
1068 static bool absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
1069 PSYoungGen* young_gen,
1070 PSOldGen* old_gen);
1071
1072 // Reset time since last full gc
1073 static void reset_millis_since_last_gc();
1074
1075 #ifndef PRODUCT
1076 // Print generic summary data
1077 static void print_generic_summary_data(ParallelCompactData& summary_data,
1078 HeapWord* const beg_addr,
1079 HeapWord* const end_addr);
1080 #endif // #ifndef PRODUCT
1081
1082 public:
1083
1084 PSParallelCompact();
1085
1086 static void invoke(bool maximum_heap_compaction);
1087 static bool invoke_no_policy(bool maximum_heap_compaction);
1088
1089 static void post_initialize();
1090 // Perform initialization for PSParallelCompact that requires
1091 // allocations. This should be called during the VM initialization
1092 // at a pointer where it would be appropriate to return a JNI_ENOMEM
1093 // in the event of a failure.
1094 static bool initialize();
1095
1096 // Closure accessors
1097 static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&_is_alive_closure; }
1098
1099 // Public accessors
1100 static elapsedTimer* accumulated_time() { return &_accumulated_time; }
1101 static unsigned int total_invocations() { return _total_invocations; }
1102 static CollectorCounters* counters() { return _counters; }
1103
1104 // Used to add tasks
1105 static GCTaskManager* const gc_task_manager();
1106
1107 // Marking support
1108 static inline bool mark_obj(oop obj);
1109 static inline bool is_marked(oop obj);
1110
1111 template <class T> static inline void adjust_pointer(T* p, ParCompactionManager* cm);
1112
1113 // Compaction support.
1114 // Return true if p is in the range [beg_addr, end_addr).
1115 static inline bool is_in(HeapWord* p, HeapWord* beg_addr, HeapWord* end_addr);
1116 static inline bool is_in(oop* p, HeapWord* beg_addr, HeapWord* end_addr);
1117
1118 // Convenience wrappers for per-space data kept in _space_info.
1119 static inline MutableSpace* space(SpaceId space_id);
1120 static inline HeapWord* new_top(SpaceId space_id);
1121 static inline HeapWord* dense_prefix(SpaceId space_id);
1122 static inline ObjectStartArray* start_array(SpaceId space_id);
1123
1124 // Move and update the live objects in the specified space.
|
1054
1055 // Summary phase utility routine to fill dead space (if any) at the dense
1056 // prefix boundary. Should only be called if the the dense prefix is
1057 // non-empty.
1058 static void fill_dense_prefix_end(SpaceId id);
1059
1060 static void summarize_spaces_quick();
1061 static void summarize_space(SpaceId id, bool maximum_compaction);
1062 static void summary_phase(ParCompactionManager* cm, bool maximum_compaction);
1063
1064 // Adjust addresses in roots. Does not adjust addresses in heap.
1065 static void adjust_roots(ParCompactionManager* cm);
1066
1067 DEBUG_ONLY(static void write_block_fill_histogram();)
1068
1069 // Move objects to new locations.
1070 static void compact_perm(ParCompactionManager* cm);
1071 static void compact();
1072
1073 // Add available regions to the stack and draining tasks to the task queue.
1074 static void prepare_region_draining_tasks(uint parallel_gc_threads);
1075
1076 // Add dense prefix update tasks to the task queue.
1077 static void enqueue_dense_prefix_tasks(TaskQueue& task_queue,
1078 uint parallel_gc_threads);
1079
1080 // If objects are left in eden after a collection, try to move the boundary
1081 // and absorb them into the old gen. Returns true if eden was emptied.
1082 static bool absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
1083 PSYoungGen* young_gen,
1084 PSOldGen* old_gen);
1085
1086 // Reset time since last full gc
1087 static void reset_millis_since_last_gc();
1088
1089 #ifndef PRODUCT
1090 // Print generic summary data
1091 static void print_generic_summary_data(ParallelCompactData& summary_data,
1092 HeapWord* const beg_addr,
1093 HeapWord* const end_addr);
1094 #endif // #ifndef PRODUCT
1095
1096 public:
1097
1098 PSParallelCompact();
1099
1100 static void invoke(bool maximum_heap_compaction);
1101 static bool invoke_no_policy(bool maximum_heap_compaction);
1102
1103 static void post_initialize();
1104 // Perform initialization for PSParallelCompact that requires
1105 // allocations. This should be called during the VM initialization
1106 // at a pointer where it would be appropriate to return a JNI_ENOMEM
1107 // in the event of a failure.
1108 static bool initialize();
1109
1110 // Closure accessors
1111 static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&_is_alive_closure; }
1112
1113 // Public accessors
1114 static elapsedTimer* accumulated_time() { return &_accumulated_time; }
1115 static unsigned int total_invocations() { return _total_invocations; }
1116 static CollectorCounters* counters() { return _counters; }
1117
1118 // Marking support
1119 static inline bool mark_obj(oop obj);
1120 static inline bool is_marked(oop obj);
1121
1122 template <class T> static inline void adjust_pointer(T* p, ParCompactionManager* cm);
1123
1124 // Compaction support.
1125 // Return true if p is in the range [beg_addr, end_addr).
1126 static inline bool is_in(HeapWord* p, HeapWord* beg_addr, HeapWord* end_addr);
1127 static inline bool is_in(oop* p, HeapWord* beg_addr, HeapWord* end_addr);
1128
1129 // Convenience wrappers for per-space data kept in _space_info.
1130 static inline MutableSpace* space(SpaceId space_id);
1131 static inline HeapWord* new_top(SpaceId space_id);
1132 static inline HeapWord* dense_prefix(SpaceId space_id);
1133 static inline ObjectStartArray* start_array(SpaceId space_id);
1134
1135 // Move and update the live objects in the specified space.
|