14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_SHARED_SPACE_INLINE_HPP
26 #define SHARE_VM_GC_SHARED_SPACE_INLINE_HPP
27
28 #include "gc/serial/markSweep.inline.hpp"
29 #include "gc/shared/collectedHeap.hpp"
30 #include "gc/shared/generation.hpp"
31 #include "gc/shared/space.hpp"
32 #include "gc/shared/spaceDecorator.hpp"
33 #include "memory/universe.hpp"
34 #include "runtime/prefetch.inline.hpp"
35 #include "runtime/safepoint.hpp"
36
37 inline HeapWord* Space::block_start(const void* p) {
38 return block_start_const(p);
39 }
40
41 inline HeapWord* OffsetTableContigSpace::allocate(size_t size) {
42 HeapWord* res = ContiguousSpace::allocate(size);
43 if (res != NULL) {
44 _offsets.alloc_block(res, size);
45 }
46 return res;
47 }
48
49 // Because of the requirement of keeping "_offsets" up to date with the
50 // allocations, we sequentialize these with a lock. Therefore, best if
51 // this is used for larger LAB allocations only.
52 inline HeapWord* OffsetTableContigSpace::par_allocate(size_t size) {
53 MutexLocker x(&_par_alloc_lock);
58 // meaning "locking at safepoint taken care of", and set/reset that
59 // here. But this will do for now, especially in light of the comment
60 // above. Perhaps in the future some lock-free manner of keeping the
61 // coordination.
62 HeapWord* res = ContiguousSpace::par_allocate(size);
63 if (res != NULL) {
64 _offsets.alloc_block(res, size);
65 }
66 return res;
67 }
68
69 inline HeapWord*
70 OffsetTableContigSpace::block_start_const(const void* p) const {
71 return _offsets.block_start(p);
72 }
73
74 size_t CompactibleSpace::obj_size(const HeapWord* addr) const {
75 return oop(addr)->size();
76 }
77
78 template <class SpaceType>
79 inline void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp) {
80 // Compute the new addresses for the live objects and store it in the mark
81 // Used by universe::mark_sweep_phase2()
82 HeapWord* compact_top; // This is where we are currently compacting to.
83
84 // We're sure to be here before any objects are compacted into this
85 // space, so this is a good time to initialize this:
86 space->set_compaction_top(space->bottom());
87
88 if (cp->space == NULL) {
89 assert(cp->gen != NULL, "need a generation");
90 assert(cp->threshold == NULL, "just checking");
91 assert(cp->gen->first_compaction_space() == space, "just checking");
92 cp->space = cp->gen->first_compaction_space();
93 compact_top = cp->space->bottom();
94 cp->space->set_compaction_top(compact_top);
95 cp->threshold = cp->space->initialize_threshold();
96 } else {
97 compact_top = cp->space->compaction_top();
98 }
99
100 // We allow some amount of garbage towards the bottom of the space, so
101 // we don't start compacting before there is a significant gain to be made.
102 // Occasionally, we want to ensure a full compaction, which is determined
103 // by the MarkSweepAlwaysCompactCount parameter.
104 uint invocations = MarkSweep::total_invocations();
105 bool skip_dead = ((invocations % MarkSweepAlwaysCompactCount) != 0);
106
107 size_t allowed_deadspace = 0;
108 if (skip_dead) {
109 const size_t ratio = space->allowed_dead_ratio();
110 allowed_deadspace = (space->capacity() * ratio / 100) / HeapWordSize;
111 }
112
113 HeapWord* q = space->bottom();
114 HeapWord* t = space->scan_limit();
115
116 HeapWord* end_of_live= q; // One byte beyond the last byte of the last
117 // live object.
118 HeapWord* first_dead = space->end(); // The first dead object.
119
120 const intx interval = PrefetchScanIntervalInBytes;
121
122 while (q < t) {
123 assert(!space->scanned_block_is_obj(q) ||
124 oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() ||
125 oop(q)->mark()->has_bias_pattern(),
126 "these are the only valid states during a mark sweep");
127 if (space->scanned_block_is_obj(q) && oop(q)->is_gc_marked()) {
128 // prefetch beyond q
129 Prefetch::write(q, interval);
130 size_t size = space->scanned_block_size(q);
131 compact_top = cp->space->forward(oop(q), size, cp, compact_top);
132 q += size;
133 end_of_live = q;
134 } else {
135 // run over all the contiguous dead objects
136 HeapWord* end = q;
137 do {
138 // prefetch beyond end
139 Prefetch::write(end, interval);
140 end += space->scanned_block_size(end);
141 } while (end < t && (!space->scanned_block_is_obj(end) || !oop(end)->is_gc_marked()));
142
143 // see if we might want to pretend this object is alive so that
144 // we don't have to compact quite as often.
145 if (allowed_deadspace > 0 && q == compact_top) {
146 size_t sz = pointer_delta(end, q);
147 if (space->insert_deadspace(allowed_deadspace, q, sz)) {
148 compact_top = cp->space->forward(oop(q), sz, cp, compact_top);
149 q = end;
150 end_of_live = end;
151 continue;
152 }
153 }
154
155 // otherwise, it really is a free region.
156
157 // q is a pointer to a dead object. Use this dead memory to store a pointer to the next live object.
158 (*(HeapWord**)q) = end;
159
160 // see if this is the first dead region.
161 if (q < first_dead) {
162 first_dead = q;
163 }
164
165 // move on to the next object
166 q = end;
167 }
168 }
169
170 assert(q == t, "just checking");
171 space->_end_of_live = end_of_live;
172 if (end_of_live < first_dead) {
173 first_dead = end_of_live;
174 }
175 space->_first_dead = first_dead;
176
177 // save the compaction_top of the compaction space.
178 cp->space->set_compaction_top(compact_top);
179 }
180
181 template <class SpaceType>
182 inline void CompactibleSpace::scan_and_adjust_pointers(SpaceType* space) {
183 // adjust all the interior pointers to point at the new locations of objects
184 // Used by MarkSweep::mark_sweep_phase3()
185
186 HeapWord* q = space->bottom();
187 HeapWord* t = space->_end_of_live; // Established by "prepare_for_compaction".
188
189 assert(space->_first_dead <= space->_end_of_live, "Stands to reason, no?");
190
191 if (q < t && space->_first_dead > q && !oop(q)->is_gc_marked()) {
192 // we have a chunk of the space which hasn't moved and we've
193 // reinitialized the mark word during the previous pass, so we can't
194 // use is_gc_marked for the traversal.
195 HeapWord* end = space->_first_dead;
196
197 while (q < end) {
198 // I originally tried to conjoin "block_start(q) == q" to the
199 // assertion below, but that doesn't work, because you can't
200 // accurately traverse previous objects to get to the current one
201 // after their pointers have been
202 // updated, until the actual compaction is done. dld, 4/00
203 assert(space->block_is_obj(q), "should be at block boundaries, and should be looking at objs");
204
205 // point all the oops to the new location
206 size_t size = MarkSweep::adjust_pointers(oop(q));
207 size = space->adjust_obj_size(size);
208
209 q += size;
210 }
211
212 if (space->_first_dead == t) {
213 q = t;
214 } else {
215 // The first dead object is no longer an object. At that memory address,
216 // there is a pointer to the first live object that the previous phase found.
217 q = *((HeapWord**)(space->_first_dead));
218 }
219 }
220
221 const intx interval = PrefetchScanIntervalInBytes;
222
223 debug_only(HeapWord* prev_q = NULL);
224 while (q < t) {
225 // prefetch beyond q
226 Prefetch::write(q, interval);
227 if (oop(q)->is_gc_marked()) {
228 // q is alive
229 // point all the oops to the new location
230 size_t size = MarkSweep::adjust_pointers(oop(q));
231 size = space->adjust_obj_size(size);
232 debug_only(prev_q = q);
233 q += size;
234 } else {
235 debug_only(prev_q = q);
236 // q is not a live object, instead it points at the next live object
237 q = *(HeapWord**)q;
238 assert(q > prev_q, "we should be moving forward through memory, q: " PTR_FORMAT ", prev_q: " PTR_FORMAT, p2i(q), p2i(prev_q));
239 }
240 }
241
242 assert(q == t, "just checking");
243 }
244
245 template <class SpaceType>
246 inline void CompactibleSpace::scan_and_compact(SpaceType* space) {
247 // Copy all live objects to their new location
248 // Used by MarkSweep::mark_sweep_phase4()
249
250 HeapWord* q = space->bottom();
251 HeapWord* const t = space->_end_of_live;
252 debug_only(HeapWord* prev_q = NULL);
253
254 if (q < t && space->_first_dead > q && !oop(q)->is_gc_marked()) {
255 #ifdef ASSERT // Debug only
256 // we have a chunk of the space which hasn't moved and we've reinitialized
257 // the mark word during the previous pass, so we can't use is_gc_marked for
258 // the traversal.
259 HeapWord* const end = space->_first_dead;
260
261 while (q < end) {
262 size_t size = space->obj_size(q);
263 assert(!oop(q)->is_gc_marked(), "should be unmarked (special dense prefix handling)");
264 prev_q = q;
265 q += size;
266 }
267 #endif
268
269 if (space->_first_dead == t) {
270 q = t;
271 } else {
272 // $$$ Funky
273 q = (HeapWord*) oop(space->_first_dead)->mark()->decode_pointer();
274 }
275 }
276
277 const intx scan_interval = PrefetchScanIntervalInBytes;
278 const intx copy_interval = PrefetchCopyIntervalInBytes;
279 while (q < t) {
280 if (!oop(q)->is_gc_marked()) {
281 // mark is pointer to next marked oop
282 debug_only(prev_q = q);
283 q = (HeapWord*) oop(q)->mark()->decode_pointer();
284 assert(q > prev_q, "we should be moving forward through memory");
285 } else {
286 // prefetch beyond q
287 Prefetch::read(q, scan_interval);
288
289 // size and destination
290 size_t size = space->obj_size(q);
291 HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee();
292
293 // prefetch beyond compaction_top
294 Prefetch::write(compaction_top, copy_interval);
295
296 // copy object and reinit its mark
297 assert(q != compaction_top, "everything in this pass should be moving");
298 Copy::aligned_conjoint_words(q, compaction_top, size);
299 oop(compaction_top)->init_mark();
300 assert(oop(compaction_top)->klass() != NULL, "should have a class");
301
302 debug_only(prev_q = q);
303 q += size;
304 }
305 }
306
307 // Let's remember if we were empty before we did the compaction.
308 bool was_empty = space->used_region().is_empty();
309 // Reset space after compaction is complete
310 space->reset_after_compaction();
311 // We do this clear, below, since it has overloaded meanings for some
312 // space subtypes. For example, OffsetTableContigSpace's that were
313 // compacted into will have had their offset table thresholds updated
314 // continuously, but those that weren't need to have their thresholds
315 // re-initialized. Also mangles unused area for debugging.
316 if (space->used_region().is_empty()) {
317 if (!was_empty) space->clear(SpaceDecorator::Mangle);
318 } else {
319 if (ZapUnusedHeapArea) space->mangle_unused_area();
320 }
321 }
322
323 size_t ContiguousSpace::scanned_block_size(const HeapWord* addr) const {
324 return oop(addr)->size();
325 }
326
327 #endif // SHARE_VM_GC_SHARED_SPACE_INLINE_HPP
|
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_SHARED_SPACE_INLINE_HPP
26 #define SHARE_VM_GC_SHARED_SPACE_INLINE_HPP
27
28 #include "gc/serial/markSweep.inline.hpp"
29 #include "gc/shared/collectedHeap.hpp"
30 #include "gc/shared/generation.hpp"
31 #include "gc/shared/space.hpp"
32 #include "gc/shared/spaceDecorator.hpp"
33 #include "memory/universe.hpp"
34 #include "oops/oopsHierarchy.hpp"
35 #include "runtime/prefetch.inline.hpp"
36 #include "runtime/safepoint.hpp"
37
38 inline HeapWord* Space::block_start(const void* p) {
39 return block_start_const(p);
40 }
41
42 inline HeapWord* OffsetTableContigSpace::allocate(size_t size) {
43 HeapWord* res = ContiguousSpace::allocate(size);
44 if (res != NULL) {
45 _offsets.alloc_block(res, size);
46 }
47 return res;
48 }
49
50 // Because of the requirement of keeping "_offsets" up to date with the
51 // allocations, we sequentialize these with a lock. Therefore, best if
52 // this is used for larger LAB allocations only.
53 inline HeapWord* OffsetTableContigSpace::par_allocate(size_t size) {
54 MutexLocker x(&_par_alloc_lock);
59 // meaning "locking at safepoint taken care of", and set/reset that
60 // here. But this will do for now, especially in light of the comment
61 // above. Perhaps in the future some lock-free manner of keeping the
62 // coordination.
63 HeapWord* res = ContiguousSpace::par_allocate(size);
64 if (res != NULL) {
65 _offsets.alloc_block(res, size);
66 }
67 return res;
68 }
69
70 inline HeapWord*
71 OffsetTableContigSpace::block_start_const(const void* p) const {
72 return _offsets.block_start(p);
73 }
74
75 size_t CompactibleSpace::obj_size(const HeapWord* addr) const {
76 return oop(addr)->size();
77 }
78
79 class DeadSpacer : StackObj {
80 size_t _allowed_deadspace_words;
81 bool _active;
82 CompactibleSpace* _space;
83
84 public:
85 DeadSpacer(CompactibleSpace* space) : _space(space), _allowed_deadspace_words(0) {
86 size_t ratio = _space->allowed_dead_ratio();
87 _active = ratio > 0;
88
89 if (_active) {
90 assert(!UseG1GC, "G1 should not be using dead space");
91
92 // We allow some amount of garbage towards the bottom of the space, so
93 // we don't start compacting before there is a significant gain to be made.
94 // Occasionally, we want to ensure a full compaction, which is determined
95 // by the MarkSweepAlwaysCompactCount parameter.
96 if ((MarkSweep::total_invocations() % MarkSweepAlwaysCompactCount) != 0) {
97 _allowed_deadspace_words = (space->capacity() * ratio / 100) / HeapWordSize;
98 } else {
99 _active = false;
100 }
101 }
102 }
103
104
105 bool insert_deadspace(HeapWord* dead_start, HeapWord* dead_end) {
106 if (!_active) {
107 return false;
108 }
109
110 size_t dead_length = pointer_delta(dead_end, dead_start);
111 if (_allowed_deadspace_words >= dead_length) {
112 _allowed_deadspace_words -= dead_length;
113 CollectedHeap::fill_with_object(dead_start, dead_length);
114 oop obj = oop(dead_start);
115 obj->set_mark(obj->mark()->set_marked());
116
117 assert(dead_length == (size_t)obj->size(), "bad filler object size");
118 log_develop_trace(gc, compaction)("Inserting object to dead space: " PTR_FORMAT ", " PTR_FORMAT ", " SIZE_FORMAT "b",
119 p2i(dead_start), p2i(dead_end), dead_length * HeapWordSize);
120
121 return true;
122 } else {
123 _active = false;
124 return false;
125 }
126 }
127
128 };
129
130 template <class SpaceType>
131 inline void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp) {
132 // Compute the new addresses for the live objects and store it in the mark
133 // Used by universe::mark_sweep_phase2()
134
135 // We're sure to be here before any objects are compacted into this
136 // space, so this is a good time to initialize this:
137 space->set_compaction_top(space->bottom());
138
139 if (cp->space == NULL) {
140 assert(cp->gen != NULL, "need a generation");
141 assert(cp->threshold == NULL, "just checking");
142 assert(cp->gen->first_compaction_space() == space, "just checking");
143 cp->space = cp->gen->first_compaction_space();
144 cp->threshold = cp->space->initialize_threshold();
145 cp->space->set_compaction_top(cp->space->bottom());
146 }
147
148 HeapWord* compact_top = cp->space->compaction_top(); // This is where we are currently compacting to.
149
150 DeadSpacer dead_spacer(space);
151
152 HeapWord* end_of_live = space->bottom(); // One byte beyond the last byte of the last live object.
153 HeapWord* first_dead = NULL; // The first dead object.
154
155 const intx interval = PrefetchScanIntervalInBytes;
156
157 HeapWord* cur_obj = space->bottom();
158 HeapWord* scan_limit = space->scan_limit();
159
160 while (cur_obj < scan_limit) {
161 assert(!space->scanned_block_is_obj(cur_obj) ||
162 oop(cur_obj)->mark()->is_marked() || oop(cur_obj)->mark()->is_unlocked() ||
163 oop(cur_obj)->mark()->has_bias_pattern(),
164 "these are the only valid states during a mark sweep");
165 if (space->scanned_block_is_obj(cur_obj) && oop(cur_obj)->is_gc_marked()) {
166 // prefetch beyond cur_obj
167 Prefetch::write(cur_obj, interval);
168 size_t size = space->scanned_block_size(cur_obj);
169 compact_top = cp->space->forward(oop(cur_obj), size, cp, compact_top);
170 cur_obj += size;
171 end_of_live = cur_obj;
172 } else {
173 // run over all the contiguous dead objects
174 HeapWord* end = cur_obj;
175 do {
176 // prefetch beyond end
177 Prefetch::write(end, interval);
178 end += space->scanned_block_size(end);
179 } while (end < scan_limit && (!space->scanned_block_is_obj(end) || !oop(end)->is_gc_marked()));
180
181 // see if we might want to pretend this object is alive so that
182 // we don't have to compact quite as often.
183 if (cur_obj == compact_top && dead_spacer.insert_deadspace(cur_obj, end)) {
184 oop obj = oop(cur_obj);
185 compact_top = cp->space->forward(obj, obj->size(), cp, compact_top);
186 end_of_live = end;
187 } else {
188 // otherwise, it really is a free region.
189
190 // cur_obj is a pointer to a dead object. Use this dead memory to store a pointer to the next live object.
191 *(HeapWord**)cur_obj = end;
192
193 // see if this is the first dead region.
194 if (first_dead == NULL) {
195 first_dead = cur_obj;
196 }
197 }
198
199 // move on to the next object
200 cur_obj = end;
201 }
202 }
203
204 assert(cur_obj == scan_limit, "just checking");
205 space->_end_of_live = end_of_live;
206 if (first_dead != NULL) {
207 space->_first_dead = first_dead;
208 } else {
209 space->_first_dead = end_of_live;
210 }
211
212 // save the compaction_top of the compaction space.
213 cp->space->set_compaction_top(compact_top);
214 }
215
216 template <class SpaceType>
217 inline void CompactibleSpace::scan_and_adjust_pointers(SpaceType* space) {
218 // adjust all the interior pointers to point at the new locations of objects
219 // Used by MarkSweep::mark_sweep_phase3()
220
221 HeapWord* cur_obj = space->bottom();
222 HeapWord* const end_of_live = space->_end_of_live; // Established by "scan_and_forward".
223 HeapWord* const first_dead = space->_first_dead; // Established by "scan_and_forward".
224
225 assert(first_dead <= end_of_live, "Stands to reason, no?");
226
227 const intx interval = PrefetchScanIntervalInBytes;
228
229 debug_only(HeapWord* prev_obj = NULL);
230 while (cur_obj < end_of_live) {
231 Prefetch::write(cur_obj, interval);
232 if (cur_obj < first_dead || oop(cur_obj)->is_gc_marked()) {
233 // cur_obj is alive
234 // point all the oops to the new location
235 size_t size = MarkSweep::adjust_pointers(oop(cur_obj));
236 size = space->adjust_obj_size(size);
237 debug_only(prev_obj = cur_obj);
238 cur_obj += size;
239 } else {
240 debug_only(prev_obj = cur_obj);
241 // cur_obj is not a live object, instead it points at the next live object
242 cur_obj = *(HeapWord**)cur_obj;
243 assert(cur_obj > prev_obj, "we should be moving forward through memory, cur_obj: " PTR_FORMAT ", prev_obj: " PTR_FORMAT, p2i(cur_obj), p2i(prev_obj));
244 }
245 }
246
247 assert(cur_obj == end_of_live, "just checking");
248 }
249
250 #ifdef ASSERT
251 template <class SpaceType>
252 inline void CompactibleSpace::verify_up_to_first_dead(SpaceType* space) {
253 HeapWord* cur_obj = space->bottom();
254
255 if (cur_obj < space->_end_of_live && space->_first_dead > cur_obj && !oop(cur_obj)->is_gc_marked()) {
256 // we have a chunk of the space which hasn't moved and we've reinitialized
257 // the mark word during the previous pass, so we can't use is_gc_marked for
258 // the traversal.
259 HeapWord* prev_obj = NULL;
260
261 while (cur_obj < space->_first_dead) {
262 size_t size = space->obj_size(cur_obj);
263 assert(!oop(cur_obj)->is_gc_marked(), "should be unmarked (special dense prefix handling)");
264 prev_obj = cur_obj;
265 cur_obj += size;
266 }
267 }
268 }
269 #endif
270
271 template <class SpaceType>
272 inline void CompactibleSpace::clear_empty_region(SpaceType* space) {
273 // Let's remember if we were empty before we did the compaction.
274 bool was_empty = space->used_region().is_empty();
275 // Reset space after compaction is complete
276 space->reset_after_compaction();
277 // We do this clear, below, since it has overloaded meanings for some
278 // space subtypes. For example, OffsetTableContigSpace's that were
279 // compacted into will have had their offset table thresholds updated
280 // continuously, but those that weren't need to have their thresholds
281 // re-initialized. Also mangles unused area for debugging.
282 if (space->used_region().is_empty()) {
283 if (!was_empty) space->clear(SpaceDecorator::Mangle);
284 } else {
285 if (ZapUnusedHeapArea) space->mangle_unused_area();
286 }
287 }
288
289 template <class SpaceType>
290 inline void CompactibleSpace::scan_and_compact(SpaceType* space) {
291 // Copy all live objects to their new location
292 // Used by MarkSweep::mark_sweep_phase4()
293
294 verify_up_to_first_dead(space);
295
296 HeapWord* const end_of_live = space->_end_of_live;
297
298 assert(space->_first_dead <= end_of_live, "Invariant. _first_dead: " PTR_FORMAT " <= end_of_live: " PTR_FORMAT, p2i(space->_first_dead), p2i(end_of_live));
299 if (space->_first_dead == end_of_live && !oop(space->bottom())->is_gc_marked()) {
300 // Nothing to compact. The space is either empty or all live object should be left in place.
301 clear_empty_region(space);
302 return;
303 }
304
305 const intx scan_interval = PrefetchScanIntervalInBytes;
306 const intx copy_interval = PrefetchCopyIntervalInBytes;
307
308 assert(space->bottom() < end_of_live, "bottom: " PTR_FORMAT " should be < end_of_live: " PTR_FORMAT, p2i(space->bottom()), p2i(end_of_live));
309 HeapWord* cur_obj = space->bottom();
310 if (space->_first_dead > cur_obj && !oop(cur_obj)->is_gc_marked()) {
311 // All object before _first_dead can be skipped. They should not be moved.
312 // A pointer to the first live object is stored at the memory location for _first_dead.
313 cur_obj = *(HeapWord**)(space->_first_dead);
314 }
315
316 debug_only(HeapWord* prev_obj = NULL);
317 while (cur_obj < end_of_live) {
318 if (!oop(cur_obj)->is_gc_marked()) {
319 // mark is pointer to next marked oop
320 debug_only(prev_obj = cur_obj);
321 cur_obj = *(HeapWord**)cur_obj;
322 assert(cur_obj > prev_obj, "we should be moving forward through memory");
323 } else {
324 // prefetch beyond q
325 Prefetch::read(cur_obj, scan_interval);
326
327 // size and destination
328 size_t size = space->obj_size(cur_obj);
329 HeapWord* compaction_top = (HeapWord*)oop(cur_obj)->forwardee();
330
331 // prefetch beyond compaction_top
332 Prefetch::write(compaction_top, copy_interval);
333
334 // copy object and reinit its mark
335 assert(cur_obj != compaction_top, "everything in this pass should be moving");
336 Copy::aligned_conjoint_words(cur_obj, compaction_top, size);
337 oop(compaction_top)->init_mark();
338 assert(oop(compaction_top)->klass() != NULL, "should have a class");
339
340 debug_only(prev_obj = cur_obj);
341 cur_obj += size;
342 }
343 }
344
345 clear_empty_region(space);
346 }
347
348 size_t ContiguousSpace::scanned_block_size(const HeapWord* addr) const {
349 return oop(addr)->size();
350 }
351
352 #endif // SHARE_VM_GC_SHARED_SPACE_INLINE_HPP
|