23
24 #ifndef SHARE_GC_PARALLEL_PSPARALLELCOMPACT_HPP
25 #define SHARE_GC_PARALLEL_PSPARALLELCOMPACT_HPP
26
27 #include "gc/parallel/mutableSpace.hpp"
28 #include "gc/parallel/objectStartArray.hpp"
29 #include "gc/parallel/parMarkBitMap.hpp"
30 #include "gc/parallel/parallelScavengeHeap.hpp"
31 #include "gc/shared/collectedHeap.hpp"
32 #include "gc/shared/collectorCounters.hpp"
33 #include "gc/shared/taskqueue.hpp"
34 #include "oops/oop.hpp"
35
36 class ParallelScavengeHeap;
37 class PSAdaptiveSizePolicy;
38 class PSYoungGen;
39 class PSOldGen;
40 class ParCompactionManager;
41 class ParallelTaskTerminator;
42 class PSParallelCompact;
43 class GCTaskManager;
44 class GCTaskQueue;
45 class PreGCValues;
46 class MoveAndUpdateClosure;
47 class RefProcTaskExecutor;
48 class ParallelOldTracer;
49 class STWGCTimer;
50
51 // The SplitInfo class holds the information needed to 'split' a source region
52 // so that the live data can be copied to two destination *spaces*. Normally,
53 // all the live data in a region is copied to a single destination space (e.g.,
54 // everything live in a region in eden is copied entirely into the old gen).
55 // However, when the heap is nearly full, all the live data in eden may not fit
56 // into the old gen. Copying only some of the regions from eden to old gen
57 // requires finding a region that does not contain a partial object (i.e., no
58 // live object crosses the region boundary) somewhere near the last object that
59 // does fit into the old gen. Since it's not always possible to find such a
60 // region, splitting is necessary for predictable behavior.
61 //
62 // A region is always split at the end of the partial object. This avoids
63 // additional tests when calculating the new location of a pointer, which is a
|
23
24 #ifndef SHARE_GC_PARALLEL_PSPARALLELCOMPACT_HPP
25 #define SHARE_GC_PARALLEL_PSPARALLELCOMPACT_HPP
26
27 #include "gc/parallel/mutableSpace.hpp"
28 #include "gc/parallel/objectStartArray.hpp"
29 #include "gc/parallel/parMarkBitMap.hpp"
30 #include "gc/parallel/parallelScavengeHeap.hpp"
31 #include "gc/shared/collectedHeap.hpp"
32 #include "gc/shared/collectorCounters.hpp"
33 #include "gc/shared/taskqueue.hpp"
34 #include "oops/oop.hpp"
35
36 class ParallelScavengeHeap;
37 class PSAdaptiveSizePolicy;
38 class PSYoungGen;
39 class PSOldGen;
40 class ParCompactionManager;
41 class ParallelTaskTerminator;
42 class PSParallelCompact;
43 class PreGCValues;
44 class MoveAndUpdateClosure;
45 class RefProcTaskExecutor;
46 class ParallelOldTracer;
47 class STWGCTimer;
48
49 // The SplitInfo class holds the information needed to 'split' a source region
50 // so that the live data can be copied to two destination *spaces*. Normally,
51 // all the live data in a region is copied to a single destination space (e.g.,
52 // everything live in a region in eden is copied entirely into the old gen).
53 // However, when the heap is nearly full, all the live data in eden may not fit
54 // into the old gen. Copying only some of the regions from eden to old gen
55 // requires finding a region that does not contain a partial object (i.e., no
56 // live object crosses the region boundary) somewhere near the last object that
57 // does fit into the old gen. Since it's not always possible to find such a
58 // region, splitting is necessary for predictable behavior.
59 //
60 // A region is always split at the end of the partial object. This avoids
61 // additional tests when calculating the new location of a pointer, which is a
|
1099
1100 PSParallelCompact();
1101
1102 static void invoke(bool maximum_heap_compaction);
1103 static bool invoke_no_policy(bool maximum_heap_compaction);
1104
1105 static void post_initialize();
1106 // Perform initialization for PSParallelCompact that requires
1107 // allocations. This should be called during the VM initialization
1108 // at a pointer where it would be appropriate to return a JNI_ENOMEM
1109 // in the event of a failure.
1110 static bool initialize();
1111
1112 // Closure accessors
1113 static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&_is_alive_closure; }
1114
1115 // Public accessors
1116 static elapsedTimer* accumulated_time() { return &_accumulated_time; }
1117 static unsigned int total_invocations() { return _total_invocations; }
1118 static CollectorCounters* counters() { return _counters; }
1119
1120 // Used to add tasks
1121 static GCTaskManager* const gc_task_manager();
1122
1123 // Marking support
1124 static inline bool mark_obj(oop obj);
1125 static inline bool is_marked(oop obj);
1126
1127 template <class T> static inline void adjust_pointer(T* p, ParCompactionManager* cm);
1128
1129 // Compaction support.
1130 // Return true if p is in the range [beg_addr, end_addr).
1131 static inline bool is_in(HeapWord* p, HeapWord* beg_addr, HeapWord* end_addr);
1132 static inline bool is_in(oop* p, HeapWord* beg_addr, HeapWord* end_addr);
1133
1134 // Convenience wrappers for per-space data kept in _space_info.
1135 static inline MutableSpace* space(SpaceId space_id);
1136 static inline HeapWord* new_top(SpaceId space_id);
1137 static inline HeapWord* dense_prefix(SpaceId space_id);
1138 static inline ObjectStartArray* start_array(SpaceId space_id);
1139
1140 // Move and update the live objects in the specified space.
|
1097
1098 PSParallelCompact();
1099
1100 static void invoke(bool maximum_heap_compaction);
1101 static bool invoke_no_policy(bool maximum_heap_compaction);
1102
1103 static void post_initialize();
1104 // Perform initialization for PSParallelCompact that requires
1105 // allocations. This should be called during the VM initialization
1106 // at a pointer where it would be appropriate to return a JNI_ENOMEM
1107 // in the event of a failure.
1108 static bool initialize();
1109
1110 // Closure accessors
1111 static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&_is_alive_closure; }
1112
1113 // Public accessors
1114 static elapsedTimer* accumulated_time() { return &_accumulated_time; }
1115 static unsigned int total_invocations() { return _total_invocations; }
1116 static CollectorCounters* counters() { return _counters; }
1117
1118 // Marking support
1119 static inline bool mark_obj(oop obj);
1120 static inline bool is_marked(oop obj);
1121
1122 template <class T> static inline void adjust_pointer(T* p, ParCompactionManager* cm);
1123
1124 // Compaction support.
1125 // Return true if p is in the range [beg_addr, end_addr).
1126 static inline bool is_in(HeapWord* p, HeapWord* beg_addr, HeapWord* end_addr);
1127 static inline bool is_in(oop* p, HeapWord* beg_addr, HeapWord* end_addr);
1128
1129 // Convenience wrappers for per-space data kept in _space_info.
1130 static inline MutableSpace* space(SpaceId space_id);
1131 static inline HeapWord* new_top(SpaceId space_id);
1132 static inline HeapWord* dense_prefix(SpaceId space_id);
1133 static inline ObjectStartArray* start_array(SpaceId space_id);
1134
1135 // Move and update the live objects in the specified space.
|