12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_G1_G1CONCURRENTMARK_HPP
26 #define SHARE_VM_GC_G1_G1CONCURRENTMARK_HPP
27
28 #include "gc/g1/g1ConcurrentMarkBitMap.hpp"
29 #include "gc/g1/g1ConcurrentMarkObjArrayProcessor.hpp"
30 #include "gc/g1/heapRegionSet.hpp"
31 #include "gc/shared/taskqueue.hpp"
32
33 class ConcurrentGCTimer;
34 class ConcurrentMarkThread;
35 class G1CollectedHeap;
36 class G1CMTask;
37 class G1ConcurrentMark;
38 class G1OldTracer;
39 class G1RegionToSpaceMapper;
40 class G1SurvivorRegions;
41
42 #ifdef _MSC_VER
43 #pragma warning(push)
44 // warning C4522: multiple assignment operators specified
45 #pragma warning(disable:4522)
46 #endif
47
48 // This is a container class for either an oop or a continuation address for
49 // mark stack entries. Both are pushed onto the mark stack.
50 class G1TaskQueueEntry VALUE_OBJ_CLASS_SPEC {
51 private:
52 void* _holder;
53
54 static const uintptr_t ArraySliceBit = 1;
55
56 G1TaskQueueEntry(oop obj) : _holder(obj) {
57 assert(_holder != NULL, "Not allowed to set NULL task queue element");
58 }
59 G1TaskQueueEntry(HeapWord* addr) : _holder((void*)((uintptr_t)addr | ArraySliceBit)) { }
60 public:
61 G1TaskQueueEntry(const G1TaskQueueEntry& other) { _holder = other._holder; }
62 G1TaskQueueEntry() : _holder(NULL) { }
63
64 static G1TaskQueueEntry from_slice(HeapWord* what) { return G1TaskQueueEntry(what); }
65 static G1TaskQueueEntry from_oop(oop obj) { return G1TaskQueueEntry(obj); }
66
67 G1TaskQueueEntry& operator=(const G1TaskQueueEntry& t) {
68 _holder = t._holder;
69 return *this;
70 }
110 bool do_object_b(oop obj);
111 };
112
113 // Represents the overflow mark stack used by concurrent marking.
114 //
115 // Stores oops in a huge buffer in virtual memory that is always fully committed.
116 // Resizing may only happen during a STW pause when the stack is empty.
117 //
118 // Memory is allocated on a "chunk" basis, i.e. a set of oops. For this, the mark
119 // stack memory is split into evenly sized chunks of oops. Users can only
120 // add or remove entries on that basis.
121 // Chunks are filled in increasing address order. Not completely filled chunks
122 // have a NULL element as a terminating element.
123 //
124 // Every chunk has a header containing a single pointer element used for memory
125 // management. This wastes some space, but is negligible (< .1% with current sizing).
126 //
127 // Memory management is done using a mix of tracking a high water-mark indicating
128 // that all chunks at a lower address are valid chunks, and a singly linked free
129 // list connecting all empty chunks.
130 class G1CMMarkStack VALUE_OBJ_CLASS_SPEC {
131 public:
132 // Number of TaskQueueEntries that can fit in a single chunk.
133 static const size_t EntriesPerChunk = 1024 - 1 /* One reference for the next pointer */;
134 private:
135 struct TaskQueueEntryChunk {
136 TaskQueueEntryChunk* next;
137 G1TaskQueueEntry data[EntriesPerChunk];
138 };
139
140 size_t _max_chunk_capacity; // Maximum number of TaskQueueEntryChunk elements on the stack.
141
142 TaskQueueEntryChunk* _base; // Bottom address of allocated memory area.
143 size_t _chunk_capacity; // Current maximum number of TaskQueueEntryChunk elements.
144
145 char _pad0[DEFAULT_CACHE_LINE_SIZE];
146 TaskQueueEntryChunk* volatile _free_list; // Linked list of free chunks that can be allocated by users.
147 char _pad1[DEFAULT_CACHE_LINE_SIZE - sizeof(TaskQueueEntryChunk*)];
148 TaskQueueEntryChunk* volatile _chunk_list; // List of chunks currently containing data.
149 volatile size_t _chunks_in_chunk_list;
150 char _pad2[DEFAULT_CACHE_LINE_SIZE - sizeof(TaskQueueEntryChunk*) - sizeof(size_t)];
210 void set_empty();
211
212 // Apply Fn to every oop on the mark stack. The mark stack must not
213 // be modified while iterating.
214 template<typename Fn> void iterate(Fn fn) const PRODUCT_RETURN;
215 };
216
217 // Root Regions are regions that are not empty at the beginning of a
218 // marking cycle and which we might collect during an evacuation pause
219 // while the cycle is active. Given that, during evacuation pauses, we
220 // do not copy objects that are explicitly marked, what we have to do
221 // for the root regions is to scan them and mark all objects reachable
222 // from them. According to the SATB assumptions, we only need to visit
223 // each object once during marking. So, as long as we finish this scan
224 // before the next evacuation pause, we can copy the objects from the
225 // root regions without having to mark them or do anything else to them.
226 //
227 // Currently, we only support root region scanning once (at the start
228 // of the marking cycle) and the root regions are all the survivor
229 // regions populated during the initial-mark pause.
230 class G1CMRootRegions VALUE_OBJ_CLASS_SPEC {
231 private:
232 const G1SurvivorRegions* _survivors;
233 G1ConcurrentMark* _cm;
234
235 volatile bool _scan_in_progress;
236 volatile bool _should_abort;
237 volatile int _claimed_survivor_index;
238
239 void notify_scan_done();
240
241 public:
242 G1CMRootRegions();
243 // We actually do most of the initialization in this method.
244 void init(const G1SurvivorRegions* survivors, G1ConcurrentMark* cm);
245
246 // Reset the claiming / scanning of the root regions.
247 void prepare_for_scan();
248
249 // Forces get_next() to return NULL so that the iteration aborts early.
250 void abort() { _should_abort = true; }
|
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_G1_G1CONCURRENTMARK_HPP
26 #define SHARE_VM_GC_G1_G1CONCURRENTMARK_HPP
27
28 #include "gc/g1/g1ConcurrentMarkBitMap.hpp"
29 #include "gc/g1/g1ConcurrentMarkObjArrayProcessor.hpp"
30 #include "gc/g1/heapRegionSet.hpp"
31 #include "gc/shared/taskqueue.hpp"
32 #include "memory/allocation.hpp"
33
34 class ConcurrentGCTimer;
35 class ConcurrentMarkThread;
36 class G1CollectedHeap;
37 class G1CMTask;
38 class G1ConcurrentMark;
39 class G1OldTracer;
40 class G1RegionToSpaceMapper;
41 class G1SurvivorRegions;
42
43 #ifdef _MSC_VER
44 #pragma warning(push)
45 // warning C4522: multiple assignment operators specified
46 #pragma warning(disable:4522)
47 #endif
48
49 // This is a container class for either an oop or a continuation address for
50 // mark stack entries. Both are pushed onto the mark stack.
51 class G1TaskQueueEntry {
52 private:
53 void* _holder;
54
55 static const uintptr_t ArraySliceBit = 1;
56
57 G1TaskQueueEntry(oop obj) : _holder(obj) {
58 assert(_holder != NULL, "Not allowed to set NULL task queue element");
59 }
60 G1TaskQueueEntry(HeapWord* addr) : _holder((void*)((uintptr_t)addr | ArraySliceBit)) { }
61 public:
62 G1TaskQueueEntry(const G1TaskQueueEntry& other) { _holder = other._holder; }
63 G1TaskQueueEntry() : _holder(NULL) { }
64
65 static G1TaskQueueEntry from_slice(HeapWord* what) { return G1TaskQueueEntry(what); }
66 static G1TaskQueueEntry from_oop(oop obj) { return G1TaskQueueEntry(obj); }
67
68 G1TaskQueueEntry& operator=(const G1TaskQueueEntry& t) {
69 _holder = t._holder;
70 return *this;
71 }
111 bool do_object_b(oop obj);
112 };
113
114 // Represents the overflow mark stack used by concurrent marking.
115 //
116 // Stores oops in a huge buffer in virtual memory that is always fully committed.
117 // Resizing may only happen during a STW pause when the stack is empty.
118 //
119 // Memory is allocated on a "chunk" basis, i.e. a set of oops. For this, the mark
120 // stack memory is split into evenly sized chunks of oops. Users can only
121 // add or remove entries on that basis.
122 // Chunks are filled in increasing address order. Not completely filled chunks
123 // have a NULL element as a terminating element.
124 //
125 // Every chunk has a header containing a single pointer element used for memory
126 // management. This wastes some space, but is negligible (< .1% with current sizing).
127 //
128 // Memory management is done using a mix of tracking a high water-mark indicating
129 // that all chunks at a lower address are valid chunks, and a singly linked free
130 // list connecting all empty chunks.
131 class G1CMMarkStack {
132 public:
133 // Number of TaskQueueEntries that can fit in a single chunk.
134 static const size_t EntriesPerChunk = 1024 - 1 /* One reference for the next pointer */;
135 private:
136 struct TaskQueueEntryChunk {
137 TaskQueueEntryChunk* next;
138 G1TaskQueueEntry data[EntriesPerChunk];
139 };
140
141 size_t _max_chunk_capacity; // Maximum number of TaskQueueEntryChunk elements on the stack.
142
143 TaskQueueEntryChunk* _base; // Bottom address of allocated memory area.
144 size_t _chunk_capacity; // Current maximum number of TaskQueueEntryChunk elements.
145
146 char _pad0[DEFAULT_CACHE_LINE_SIZE];
147 TaskQueueEntryChunk* volatile _free_list; // Linked list of free chunks that can be allocated by users.
148 char _pad1[DEFAULT_CACHE_LINE_SIZE - sizeof(TaskQueueEntryChunk*)];
149 TaskQueueEntryChunk* volatile _chunk_list; // List of chunks currently containing data.
150 volatile size_t _chunks_in_chunk_list;
151 char _pad2[DEFAULT_CACHE_LINE_SIZE - sizeof(TaskQueueEntryChunk*) - sizeof(size_t)];
211 void set_empty();
212
213 // Apply Fn to every oop on the mark stack. The mark stack must not
214 // be modified while iterating.
215 template<typename Fn> void iterate(Fn fn) const PRODUCT_RETURN;
216 };
217
218 // Root Regions are regions that are not empty at the beginning of a
219 // marking cycle and which we might collect during an evacuation pause
220 // while the cycle is active. Given that, during evacuation pauses, we
221 // do not copy objects that are explicitly marked, what we have to do
222 // for the root regions is to scan them and mark all objects reachable
223 // from them. According to the SATB assumptions, we only need to visit
224 // each object once during marking. So, as long as we finish this scan
225 // before the next evacuation pause, we can copy the objects from the
226 // root regions without having to mark them or do anything else to them.
227 //
228 // Currently, we only support root region scanning once (at the start
229 // of the marking cycle) and the root regions are all the survivor
230 // regions populated during the initial-mark pause.
231 class G1CMRootRegions {
232 private:
233 const G1SurvivorRegions* _survivors;
234 G1ConcurrentMark* _cm;
235
236 volatile bool _scan_in_progress;
237 volatile bool _should_abort;
238 volatile int _claimed_survivor_index;
239
240 void notify_scan_done();
241
242 public:
243 G1CMRootRegions();
244 // We actually do most of the initialization in this method.
245 void init(const G1SurvivorRegions* survivors, G1ConcurrentMark* cm);
246
247 // Reset the claiming / scanning of the root regions.
248 void prepare_for_scan();
249
250 // Forces get_next() to return NULL so that the iteration aborts early.
251 void abort() { _should_abort = true; }
|