11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_SHARED_SPACE_INLINE_HPP
26 #define SHARE_VM_GC_SHARED_SPACE_INLINE_HPP
27
28 #include "gc/serial/markSweep.inline.hpp"
29 #include "gc/shared/collectedHeap.hpp"
30 #include "gc/shared/generation.hpp"
31 #include "gc/shared/liveRange.hpp"
32 #include "gc/shared/space.hpp"
33 #include "gc/shared/spaceDecorator.hpp"
34 #include "memory/universe.hpp"
35 #include "runtime/prefetch.inline.hpp"
36 #include "runtime/safepoint.hpp"
37
38 inline HeapWord* Space::block_start(const void* p) {
39 return block_start_const(p);
40 }
41
42 inline HeapWord* OffsetTableContigSpace::allocate(size_t size) {
43 HeapWord* res = ContiguousSpace::allocate(size);
44 if (res != NULL) {
45 _offsets.alloc_block(res, size);
46 }
47 return res;
48 }
49
50 // Because of the requirement of keeping "_offsets" up to date with the
51 // allocations, we sequentialize these with a lock. Therefore, best if
100
101 // We allow some amount of garbage towards the bottom of the space, so
102 // we don't start compacting before there is a significant gain to be made.
103 // Occasionally, we want to ensure a full compaction, which is determined
104 // by the MarkSweepAlwaysCompactCount parameter.
105 uint invocations = MarkSweep::total_invocations();
106 bool skip_dead = ((invocations % MarkSweepAlwaysCompactCount) != 0);
107
108 size_t allowed_deadspace = 0;
109 if (skip_dead) {
110 const size_t ratio = space->allowed_dead_ratio();
111 allowed_deadspace = (space->capacity() * ratio / 100) / HeapWordSize;
112 }
113
114 HeapWord* q = space->bottom();
115 HeapWord* t = space->scan_limit();
116
117 HeapWord* end_of_live= q; // One byte beyond the last byte of the last
118 // live object.
119 HeapWord* first_dead = space->end(); // The first dead object.
120 LiveRange* liveRange = NULL; // The current live range, recorded in the
121 // first header of preceding free area.
122 space->_first_dead = first_dead;
123
124 const intx interval = PrefetchScanIntervalInBytes;
125
126 while (q < t) {
127 assert(!space->scanned_block_is_obj(q) ||
128 oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() ||
129 oop(q)->mark()->has_bias_pattern(),
130 "these are the only valid states during a mark sweep");
131 if (space->scanned_block_is_obj(q) && oop(q)->is_gc_marked()) {
132 // prefetch beyond q
133 Prefetch::write(q, interval);
134 size_t size = space->scanned_block_size(q);
135 compact_top = cp->space->forward(oop(q), size, cp, compact_top);
136 q += size;
137 end_of_live = q;
138 } else {
139 // run over all the contiguous dead objects
140 HeapWord* end = q;
141 do {
142 // prefetch beyond end
143 Prefetch::write(end, interval);
144 end += space->scanned_block_size(end);
145 } while (end < t && (!space->scanned_block_is_obj(end) || !oop(end)->is_gc_marked()));
146
147 // see if we might want to pretend this object is alive so that
148 // we don't have to compact quite as often.
149 if (allowed_deadspace > 0 && q == compact_top) {
150 size_t sz = pointer_delta(end, q);
151 if (space->insert_deadspace(allowed_deadspace, q, sz)) {
152 compact_top = cp->space->forward(oop(q), sz, cp, compact_top);
153 q = end;
154 end_of_live = end;
155 continue;
156 }
157 }
158
159 // otherwise, it really is a free region.
160
161 // for the previous LiveRange, record the end of the live objects.
162 if (liveRange) {
163 liveRange->set_end(q);
164 }
165
166 // record the current LiveRange object.
167 // liveRange->start() is overlaid on the mark word.
168 liveRange = (LiveRange*)q;
169 liveRange->set_start(end);
170 liveRange->set_end(end);
171
172 // see if this is the first dead region.
173 if (q < first_dead) {
174 first_dead = q;
175 }
176
177 // move on to the next object
178 q = end;
179 }
180 }
181
182 assert(q == t, "just checking");
183 if (liveRange != NULL) {
184 liveRange->set_end(q);
185 }
186 space->_end_of_live = end_of_live;
187 if (end_of_live < first_dead) {
188 first_dead = end_of_live;
189 }
190 space->_first_dead = first_dead;
191
192 // save the compaction_top of the compaction space.
193 cp->space->set_compaction_top(compact_top);
194 }
195
196 template <class SpaceType>
197 inline void CompactibleSpace::scan_and_adjust_pointers(SpaceType* space) {
198 // adjust all the interior pointers to point at the new locations of objects
199 // Used by MarkSweep::mark_sweep_phase3()
200
201 HeapWord* q = space->bottom();
202 HeapWord* t = space->_end_of_live; // Established by "prepare_for_compaction".
203
204 assert(space->_first_dead <= space->_end_of_live, "Stands to reason, no?");
205
210 HeapWord* end = space->_first_dead;
211
212 while (q < end) {
213 // I originally tried to conjoin "block_start(q) == q" to the
214 // assertion below, but that doesn't work, because you can't
215 // accurately traverse previous objects to get to the current one
216 // after their pointers have been
217 // updated, until the actual compaction is done. dld, 4/00
218 assert(space->block_is_obj(q), "should be at block boundaries, and should be looking at objs");
219
220 // point all the oops to the new location
221 size_t size = MarkSweep::adjust_pointers(oop(q));
222 size = space->adjust_obj_size(size);
223
224 q += size;
225 }
226
227 if (space->_first_dead == t) {
228 q = t;
229 } else {
230 // $$$ This is funky. Using this to read the previously written
231 // LiveRange. See also use below.
232 q = (HeapWord*)oop(space->_first_dead)->mark()->decode_pointer();
233 }
234 }
235
236 const intx interval = PrefetchScanIntervalInBytes;
237
238 debug_only(HeapWord* prev_q = NULL);
239 while (q < t) {
240 // prefetch beyond q
241 Prefetch::write(q, interval);
242 if (oop(q)->is_gc_marked()) {
243 // q is alive
244 // point all the oops to the new location
245 size_t size = MarkSweep::adjust_pointers(oop(q));
246 size = space->adjust_obj_size(size);
247 debug_only(prev_q = q);
248 q += size;
249 } else {
250 // q is not a live object, so its mark should point at the next
251 // live object
252 debug_only(prev_q = q);
253 q = (HeapWord*) oop(q)->mark()->decode_pointer();
254 assert(q > prev_q, "we should be moving forward through memory");
255 }
256 }
257
258 assert(q == t, "just checking");
259 }
260
261 template <class SpaceType>
262 inline void CompactibleSpace::scan_and_compact(SpaceType* space) {
263 // Copy all live objects to their new location
264 // Used by MarkSweep::mark_sweep_phase4()
265
266 HeapWord* q = space->bottom();
267 HeapWord* const t = space->_end_of_live;
268 debug_only(HeapWord* prev_q = NULL);
269
270 if (q < t && space->_first_dead > q && !oop(q)->is_gc_marked()) {
271 #ifdef ASSERT // Debug only
272 // we have a chunk of the space which hasn't moved and we've reinitialized
273 // the mark word during the previous pass, so we can't use is_gc_marked for
274 // the traversal.
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_SHARED_SPACE_INLINE_HPP
26 #define SHARE_VM_GC_SHARED_SPACE_INLINE_HPP
27
28 #include "gc/serial/markSweep.inline.hpp"
29 #include "gc/shared/collectedHeap.hpp"
30 #include "gc/shared/generation.hpp"
31 #include "gc/shared/space.hpp"
32 #include "gc/shared/spaceDecorator.hpp"
33 #include "memory/universe.hpp"
34 #include "runtime/prefetch.inline.hpp"
35 #include "runtime/safepoint.hpp"
36
37 inline HeapWord* Space::block_start(const void* p) {
38 return block_start_const(p);
39 }
40
41 inline HeapWord* OffsetTableContigSpace::allocate(size_t size) {
42 HeapWord* res = ContiguousSpace::allocate(size);
43 if (res != NULL) {
44 _offsets.alloc_block(res, size);
45 }
46 return res;
47 }
48
49 // Because of the requirement of keeping "_offsets" up to date with the
50 // allocations, we sequentialize these with a lock. Therefore, best if
99
100 // We allow some amount of garbage towards the bottom of the space, so
101 // we don't start compacting before there is a significant gain to be made.
102 // Occasionally, we want to ensure a full compaction, which is determined
103 // by the MarkSweepAlwaysCompactCount parameter.
104 uint invocations = MarkSweep::total_invocations();
105 bool skip_dead = ((invocations % MarkSweepAlwaysCompactCount) != 0);
106
107 size_t allowed_deadspace = 0;
108 if (skip_dead) {
109 const size_t ratio = space->allowed_dead_ratio();
110 allowed_deadspace = (space->capacity() * ratio / 100) / HeapWordSize;
111 }
112
113 HeapWord* q = space->bottom();
114 HeapWord* t = space->scan_limit();
115
116 HeapWord* end_of_live= q; // One byte beyond the last byte of the last
117 // live object.
118 HeapWord* first_dead = space->end(); // The first dead object.
119
120 const intx interval = PrefetchScanIntervalInBytes;
121
122 while (q < t) {
123 assert(!space->scanned_block_is_obj(q) ||
124 oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() ||
125 oop(q)->mark()->has_bias_pattern(),
126 "these are the only valid states during a mark sweep");
127 if (space->scanned_block_is_obj(q) && oop(q)->is_gc_marked()) {
128 // prefetch beyond q
129 Prefetch::write(q, interval);
130 size_t size = space->scanned_block_size(q);
131 compact_top = cp->space->forward(oop(q), size, cp, compact_top);
132 q += size;
133 end_of_live = q;
134 } else {
135 // run over all the contiguous dead objects
136 HeapWord* end = q;
137 do {
138 // prefetch beyond end
139 Prefetch::write(end, interval);
140 end += space->scanned_block_size(end);
141 } while (end < t && (!space->scanned_block_is_obj(end) || !oop(end)->is_gc_marked()));
142
143 // see if we might want to pretend this object is alive so that
144 // we don't have to compact quite as often.
145 if (allowed_deadspace > 0 && q == compact_top) {
146 size_t sz = pointer_delta(end, q);
147 if (space->insert_deadspace(allowed_deadspace, q, sz)) {
148 compact_top = cp->space->forward(oop(q), sz, cp, compact_top);
149 q = end;
150 end_of_live = end;
151 continue;
152 }
153 }
154
155 // otherwise, it really is a free region.
156
157 // q is a pointer to a dead object. Use this dead memory to store a pointer to the next live object.
158 (*(HeapWord**)q) = end;
159
160 // see if this is the first dead region.
161 if (q < first_dead) {
162 first_dead = q;
163 }
164
165 // move on to the next object
166 q = end;
167 }
168 }
169
170 assert(q == t, "just checking");
171 space->_end_of_live = end_of_live;
172 if (end_of_live < first_dead) {
173 first_dead = end_of_live;
174 }
175 space->_first_dead = first_dead;
176
177 // save the compaction_top of the compaction space.
178 cp->space->set_compaction_top(compact_top);
179 }
180
181 template <class SpaceType>
182 inline void CompactibleSpace::scan_and_adjust_pointers(SpaceType* space) {
183 // adjust all the interior pointers to point at the new locations of objects
184 // Used by MarkSweep::mark_sweep_phase3()
185
186 HeapWord* q = space->bottom();
187 HeapWord* t = space->_end_of_live; // Established by "prepare_for_compaction".
188
189 assert(space->_first_dead <= space->_end_of_live, "Stands to reason, no?");
190
195 HeapWord* end = space->_first_dead;
196
197 while (q < end) {
198 // I originally tried to conjoin "block_start(q) == q" to the
199 // assertion below, but that doesn't work, because you can't
200 // accurately traverse previous objects to get to the current one
201 // after their pointers have been
202 // updated, until the actual compaction is done. dld, 4/00
203 assert(space->block_is_obj(q), "should be at block boundaries, and should be looking at objs");
204
205 // point all the oops to the new location
206 size_t size = MarkSweep::adjust_pointers(oop(q));
207 size = space->adjust_obj_size(size);
208
209 q += size;
210 }
211
212 if (space->_first_dead == t) {
213 q = t;
214 } else {
215 // The first dead object should contain a pointer to the first live object
216 q = *((HeapWord**)(space->_first_dead));
217 }
218 }
219
220 const intx interval = PrefetchScanIntervalInBytes;
221
222 debug_only(HeapWord* prev_q = NULL);
223 while (q < t) {
224 // prefetch beyond q
225 Prefetch::write(q, interval);
226 if (oop(q)->is_gc_marked()) {
227 // q is alive
228 // point all the oops to the new location
229 size_t size = MarkSweep::adjust_pointers(oop(q));
230 size = space->adjust_obj_size(size);
231 debug_only(prev_q = q);
232 q += size;
233 } else {
234 debug_only(prev_q = q);
235 // q is not a live object, instead it points at the next live object
236 q = *(HeapWord**)q;
237 assert(q > prev_q, "we should be moving forward through memory, q: " PTR_FORMAT ", prev_q: " PTR_FORMAT, p2i(q), p2i(prev_q));
238 }
239 }
240
241 assert(q == t, "just checking");
242 }
243
244 template <class SpaceType>
245 inline void CompactibleSpace::scan_and_compact(SpaceType* space) {
246 // Copy all live objects to their new location
247 // Used by MarkSweep::mark_sweep_phase4()
248
249 HeapWord* q = space->bottom();
250 HeapWord* const t = space->_end_of_live;
251 debug_only(HeapWord* prev_q = NULL);
252
253 if (q < t && space->_first_dead > q && !oop(q)->is_gc_marked()) {
254 #ifdef ASSERT // Debug only
255 // we have a chunk of the space which hasn't moved and we've reinitialized
256 // the mark word during the previous pass, so we can't use is_gc_marked for
257 // the traversal.
|