41 #include "gc/g1/heapRegionRemSet.hpp"
42 #include "gc/g1/heapRegionSet.inline.hpp"
43 #include "gc/g1/suspendibleThreadSet.hpp"
44 #include "gc/shared/gcTimer.hpp"
45 #include "gc/shared/gcTrace.hpp"
46 #include "gc/shared/gcTraceTime.hpp"
47 #include "gc/shared/genOopClosures.inline.hpp"
48 #include "gc/shared/referencePolicy.hpp"
49 #include "gc/shared/strongRootsScope.hpp"
50 #include "gc/shared/taskqueue.inline.hpp"
51 #include "gc/shared/vmGCOperations.hpp"
52 #include "memory/allocation.hpp"
53 #include "memory/resourceArea.hpp"
54 #include "oops/oop.inline.hpp"
55 #include "runtime/atomic.inline.hpp"
56 #include "runtime/handles.inline.hpp"
57 #include "runtime/java.hpp"
58 #include "runtime/prefetch.inline.hpp"
59 #include "services/memTracker.hpp"
60
61 // Concurrent marking bit map wrapper
62
63 CMBitMapRO::CMBitMapRO(int shifter) :
64 _bm(),
65 _shifter(shifter) {
66 _bmStartWord = 0;
67 _bmWordSize = 0;
68 }
69
70 HeapWord* CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr,
71 const HeapWord* limit) const {
72 // First we must round addr *up* to a possible object boundary.
73 addr = (HeapWord*)align_size_up((intptr_t)addr,
74 HeapWordSize << _shifter);
75 size_t addrOffset = heapWordToOffset(addr);
76 if (limit == NULL) {
77 limit = _bmStartWord + _bmWordSize;
78 }
79 size_t limitOffset = heapWordToOffset(limit);
80 size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset);
81 HeapWord* nextAddr = offsetToHeapWord(nextOffset);
82 assert(nextAddr >= addr, "get_next_one postcondition");
83 assert(nextAddr == limit || isMarked(nextAddr),
84 "get_next_one postcondition");
85 return nextAddr;
86 }
87
88 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(const HeapWord* addr,
89 const HeapWord* limit) const {
90 size_t addrOffset = heapWordToOffset(addr);
91 if (limit == NULL) {
92 limit = _bmStartWord + _bmWordSize;
93 }
94 size_t limitOffset = heapWordToOffset(limit);
95 size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset);
96 HeapWord* nextAddr = offsetToHeapWord(nextOffset);
97 assert(nextAddr >= addr, "get_next_one postcondition");
98 assert(nextAddr == limit || !isMarked(nextAddr),
99 "get_next_one postcondition");
100 return nextAddr;
101 }
102
103 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const {
104 assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
105 return (int) (diff >> _shifter);
106 }
107
108 #ifndef PRODUCT
109 bool CMBitMapRO::covers(MemRegion heap_rs) const {
110 // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
111 assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize,
112 "size inconsistency");
113 return _bmStartWord == (HeapWord*)(heap_rs.start()) &&
114 _bmWordSize == heap_rs.word_size();
115 }
116 #endif
117
118 void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const {
119 _bm.print_on_error(st, prefix);
120 }
121
122 size_t CMBitMap::compute_size(size_t heap_size) {
123 return ReservedSpace::allocation_align_size_up(heap_size / mark_distance());
124 }
125
126 size_t CMBitMap::mark_distance() {
127 return MinObjAlignmentInBytes * BitsPerByte;
128 }
129
130 void CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) {
131 _bmStartWord = heap.start();
132 _bmWordSize = heap.word_size();
133
134 _bm.set_map((BitMap::bm_word_t*) storage->reserved().start());
135 _bm.set_size(_bmWordSize >> _shifter);
136
137 storage->set_mapping_changed_listener(&_listener);
138 }
139
140 void CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) {
141 if (zero_filled) {
142 return;
143 }
144 // We need to clear the bitmap on commit, removing any existing information.
145 MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords);
146 _bm->clearRange(mr);
147 }
148
149 // Closure used for clearing the given mark bitmap.
150 class ClearBitmapHRClosure : public HeapRegionClosure {
151 private:
152 ConcurrentMark* _cm;
153 CMBitMap* _bitmap;
154 bool _may_yield; // The closure may yield during iteration. If yielded, abort the iteration.
155 public:
183
184 return false;
185 }
186 };
187
188 class ParClearNextMarkBitmapTask : public AbstractGangTask {
189 ClearBitmapHRClosure* _cl;
190 HeapRegionClaimer _hrclaimer;
191 bool _suspendible; // If the task is suspendible, workers must join the STS.
192
193 public:
194 ParClearNextMarkBitmapTask(ClearBitmapHRClosure *cl, uint n_workers, bool suspendible) :
195 _cl(cl), _suspendible(suspendible), AbstractGangTask("Parallel Clear Bitmap Task"), _hrclaimer(n_workers) {}
196
197 void work(uint worker_id) {
198 SuspendibleThreadSetJoiner sts_join(_suspendible);
199 G1CollectedHeap::heap()->heap_region_par_iterate(_cl, worker_id, &_hrclaimer, true);
200 }
201 };
202
203 void CMBitMap::clearAll() {
204 G1CollectedHeap* g1h = G1CollectedHeap::heap();
205 ClearBitmapHRClosure cl(NULL, this, false /* may_yield */);
206 uint n_workers = g1h->workers()->active_workers();
207 ParClearNextMarkBitmapTask task(&cl, n_workers, false);
208 g1h->workers()->run_task(&task);
209 guarantee(cl.complete(), "Must have completed iteration.");
210 return;
211 }
212
213 void CMBitMap::markRange(MemRegion mr) {
214 mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
215 assert(!mr.is_empty(), "unexpected empty region");
216 assert((offsetToHeapWord(heapWordToOffset(mr.end())) ==
217 ((HeapWord *) mr.end())),
218 "markRange memory region end is not card aligned");
219 // convert address range into offset range
220 _bm.at_put_range(heapWordToOffset(mr.start()),
221 heapWordToOffset(mr.end()), true);
222 }
223
224 void CMBitMap::clearRange(MemRegion mr) {
225 mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
226 assert(!mr.is_empty(), "unexpected empty region");
227 // convert address range into offset range
228 _bm.at_put_range(heapWordToOffset(mr.start()),
229 heapWordToOffset(mr.end()), false);
230 }
231
232 MemRegion CMBitMap::getAndClearMarkedRegion(HeapWord* addr,
233 HeapWord* end_addr) {
234 HeapWord* start = getNextMarkedWordAddress(addr);
235 start = MIN2(start, end_addr);
236 HeapWord* end = getNextUnmarkedWordAddress(start);
237 end = MIN2(end, end_addr);
238 assert(start <= end, "Consistency check");
239 MemRegion mr(start, end);
240 if (!mr.is_empty()) {
241 clearRange(mr);
242 }
243 return mr;
244 }
245
246 CMMarkStack::CMMarkStack(ConcurrentMark* cm) :
247 _base(NULL), _cm(cm)
248 #ifdef ASSERT
249 , _drain_in_progress(false)
250 , _drain_in_progress_yields(false)
251 #endif
252 {}
253
254 bool CMMarkStack::allocate(size_t capacity) {
255 // allocate a stack of the requisite depth
256 ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop)));
257 if (!rs.is_reserved()) {
258 warning("ConcurrentMark MarkStack allocation failure");
259 return false;
260 }
261 MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
262 if (!_virtual_space.initialize(rs, rs.size())) {
263 warning("ConcurrentMark MarkStack backing store failure");
|
41 #include "gc/g1/heapRegionRemSet.hpp"
42 #include "gc/g1/heapRegionSet.inline.hpp"
43 #include "gc/g1/suspendibleThreadSet.hpp"
44 #include "gc/shared/gcTimer.hpp"
45 #include "gc/shared/gcTrace.hpp"
46 #include "gc/shared/gcTraceTime.hpp"
47 #include "gc/shared/genOopClosures.inline.hpp"
48 #include "gc/shared/referencePolicy.hpp"
49 #include "gc/shared/strongRootsScope.hpp"
50 #include "gc/shared/taskqueue.inline.hpp"
51 #include "gc/shared/vmGCOperations.hpp"
52 #include "memory/allocation.hpp"
53 #include "memory/resourceArea.hpp"
54 #include "oops/oop.inline.hpp"
55 #include "runtime/atomic.inline.hpp"
56 #include "runtime/handles.inline.hpp"
57 #include "runtime/java.hpp"
58 #include "runtime/prefetch.inline.hpp"
59 #include "services/memTracker.hpp"
60
61 void G1CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) {
62
63 CMBitMap::initialize(heap, storage->reserved());
64
65 storage->set_mapping_changed_listener(&_listener);
66 }
67
68 void CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) {
69 if (zero_filled) {
70 return;
71 }
72 // We need to clear the bitmap on commit, removing any existing information.
73 MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords);
74 _bm->clearRange(mr);
75 }
76
77 // Closure used for clearing the given mark bitmap.
78 class ClearBitmapHRClosure : public HeapRegionClosure {
79 private:
80 ConcurrentMark* _cm;
81 CMBitMap* _bitmap;
82 bool _may_yield; // The closure may yield during iteration. If yielded, abort the iteration.
83 public:
111
112 return false;
113 }
114 };
115
116 class ParClearNextMarkBitmapTask : public AbstractGangTask {
117 ClearBitmapHRClosure* _cl;
118 HeapRegionClaimer _hrclaimer;
119 bool _suspendible; // If the task is suspendible, workers must join the STS.
120
121 public:
122 ParClearNextMarkBitmapTask(ClearBitmapHRClosure *cl, uint n_workers, bool suspendible) :
123 _cl(cl), _suspendible(suspendible), AbstractGangTask("Parallel Clear Bitmap Task"), _hrclaimer(n_workers) {}
124
125 void work(uint worker_id) {
126 SuspendibleThreadSetJoiner sts_join(_suspendible);
127 G1CollectedHeap::heap()->heap_region_par_iterate(_cl, worker_id, &_hrclaimer, true);
128 }
129 };
130
131 void G1CMBitMap::clearAll() {
132 G1CollectedHeap* g1h = G1CollectedHeap::heap();
133 ClearBitmapHRClosure cl(NULL, this, false /* may_yield */);
134 uint n_workers = g1h->workers()->active_workers();
135 ParClearNextMarkBitmapTask task(&cl, n_workers, false);
136 g1h->workers()->run_task(&task);
137 guarantee(cl.complete(), "Must have completed iteration.");
138 return;
139 }
140
141 CMMarkStack::CMMarkStack(ConcurrentMark* cm) :
142 _base(NULL), _cm(cm)
143 #ifdef ASSERT
144 , _drain_in_progress(false)
145 , _drain_in_progress_yields(false)
146 #endif
147 {}
148
149 bool CMMarkStack::allocate(size_t capacity) {
150 // allocate a stack of the requisite depth
151 ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop)));
152 if (!rs.is_reserved()) {
153 warning("ConcurrentMark MarkStack allocation failure");
154 return false;
155 }
156 MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
157 if (!_virtual_space.initialize(rs, rs.size())) {
158 warning("ConcurrentMark MarkStack backing store failure");
|