149 cp->threshold = cp->space->initialize_threshold();
150 cp->space->set_compaction_top(cp->space->bottom());
151 }
152
153 HeapWord* compact_top = cp->space->compaction_top(); // This is where we are currently compacting to.
154
155 DeadSpacer dead_spacer(space);
156
157 HeapWord* end_of_live = space->bottom(); // One byte beyond the last byte of the last live object.
158 HeapWord* first_dead = NULL; // The first dead object.
159
160 const intx interval = PrefetchScanIntervalInBytes;
161
162 HeapWord* cur_obj = space->bottom();
163 HeapWord* scan_limit = space->scan_limit();
164
165 while (cur_obj < scan_limit) {
166 assert(!space->scanned_block_is_obj(cur_obj) ||
167 oop(cur_obj)->mark_raw()->is_marked() || oop(cur_obj)->mark_raw()->is_unlocked() ||
168 oop(cur_obj)->mark_raw()->has_bias_pattern(),
169 "these are the only valid states during a mark sweep");
170 if (space->scanned_block_is_obj(cur_obj) && oop(cur_obj)->is_gc_marked()) {
171 // prefetch beyond cur_obj
172 Prefetch::write(cur_obj, interval);
173 size_t size = space->scanned_block_size(cur_obj);
174 compact_top = cp->space->forward(oop(cur_obj), size, cp, compact_top);
175 cur_obj += size;
176 end_of_live = cur_obj;
177 } else {
178 // run over all the contiguous dead objects
179 HeapWord* end = cur_obj;
180 do {
181 // prefetch beyond end
182 Prefetch::write(end, interval);
183 end += space->scanned_block_size(end);
184 } while (end < scan_limit && (!space->scanned_block_is_obj(end) || !oop(end)->is_gc_marked()));
185
186 // see if we might want to pretend this object is alive so that
187 // we don't have to compact quite as often.
188 if (cur_obj == compact_top && dead_spacer.insert_deadspace(cur_obj, end)) {
189 oop obj = oop(cur_obj);
|
149 cp->threshold = cp->space->initialize_threshold();
150 cp->space->set_compaction_top(cp->space->bottom());
151 }
152
153 HeapWord* compact_top = cp->space->compaction_top(); // This is where we are currently compacting to.
154
155 DeadSpacer dead_spacer(space);
156
157 HeapWord* end_of_live = space->bottom(); // One byte beyond the last byte of the last live object.
158 HeapWord* first_dead = NULL; // The first dead object.
159
160 const intx interval = PrefetchScanIntervalInBytes;
161
162 HeapWord* cur_obj = space->bottom();
163 HeapWord* scan_limit = space->scan_limit();
164
165 while (cur_obj < scan_limit) {
166 assert(!space->scanned_block_is_obj(cur_obj) ||
167 oop(cur_obj)->mark_raw()->is_marked() || oop(cur_obj)->mark_raw()->is_unlocked() ||
168 oop(cur_obj)->mark_raw()->has_bias_pattern(),
169 "these are the only valid states during a mark sweep (%p)", cur_obj);
170 if (space->scanned_block_is_obj(cur_obj) && oop(cur_obj)->is_gc_marked()) {
171 // prefetch beyond cur_obj
172 Prefetch::write(cur_obj, interval);
173 size_t size = space->scanned_block_size(cur_obj);
174 compact_top = cp->space->forward(oop(cur_obj), size, cp, compact_top);
175 cur_obj += size;
176 end_of_live = cur_obj;
177 } else {
178 // run over all the contiguous dead objects
179 HeapWord* end = cur_obj;
180 do {
181 // prefetch beyond end
182 Prefetch::write(end, interval);
183 end += space->scanned_block_size(end);
184 } while (end < scan_limit && (!space->scanned_block_is_obj(end) || !oop(end)->is_gc_marked()));
185
186 // see if we might want to pretend this object is alive so that
187 // we don't have to compact quite as often.
188 if (cur_obj == compact_top && dead_spacer.insert_deadspace(cur_obj, end)) {
189 oop obj = oop(cur_obj);
|