164 top <= _min_done),
165 "overlap!");
166
167 // Walk the region if it is not empty; otherwise there is nothing to do.
168 if (!extended_mr.is_empty()) {
169 walk_mem_region(extended_mr, bottom_obj, top);
170 }
171
172 // An idempotent closure might be applied in any order, so we don't
173 // record a _min_done for it.
174 if (!_cl->idempotent()) {
175 _min_done = bottom;
176 } else {
177 assert(_min_done == _last_explicit_min_done,
178 "Don't update _min_done for idempotent cl");
179 }
180 }
181
182 DirtyCardToOopClosure* Space::new_dcto_cl(ExtendedOopClosure* cl,
183 CardTableModRefBS::PrecisionStyle precision,
184 HeapWord* boundary) {
185 return new DirtyCardToOopClosure(this, cl, precision, boundary);
186 }
187
188 HeapWord* ContiguousSpaceDCTOC::get_actual_top(HeapWord* top,
189 HeapWord* top_obj) {
190 if (top_obj != NULL && top_obj < (_sp->toContiguousSpace())->top()) {
191 if (_precision == CardTableModRefBS::ObjHeadPreciseArray) {
192 if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
193 // An arrayOop is starting on the dirty card - since we do exact
194 // store checks for objArrays we are done.
195 } else {
196 // Otherwise, it is possible that the object starting on the dirty
197 // card spans the entire card, and that the store happened on a
198 // later card. Figure out where the object ends.
199 assert(_sp->block_size(top_obj) == (size_t) oop(top_obj)->size(),
200 "Block size and object size mismatch");
201 top = top_obj + oop(top_obj)->size();
202 }
203 }
204 } else {
243 /* Bottom lies entirely below top, so we can call the */ \
244 /* non-memRegion version of oop_iterate below. */ \
245 oop(bottom)->oop_iterate(cl); \
246 bottom = next_obj; \
247 next_obj = bottom + oop(bottom)->size(); \
248 } \
249 /* Last object. */ \
250 oop(bottom)->oop_iterate(cl, mr); \
251 } \
252 }
253
254 // (There are only two of these, rather than N, because the split is due
255 // only to the introduction of the FilteringClosure, a local part of the
256 // impl of this abstraction.)
257 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
258 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
259
260 DirtyCardToOopClosure*
261 ContiguousSpace::new_dcto_cl(ExtendedOopClosure* cl,
262 CardTableModRefBS::PrecisionStyle precision,
263 HeapWord* boundary) {
264 return new ContiguousSpaceDCTOC(this, cl, precision, boundary);
265 }
266
267 void Space::initialize(MemRegion mr,
268 bool clear_space,
269 bool mangle_space) {
270 HeapWord* bottom = mr.start();
271 HeapWord* end = mr.end();
272 assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end),
273 "invalid space boundaries");
274 set_bottom(bottom);
275 set_end(end);
276 if (clear_space) clear(mangle_space);
277 }
278
279 void Space::clear(bool mangle_space) {
280 if (ZapUnusedHeapArea && mangle_space) {
281 mangle_unused_area();
282 }
283 }
|
164 top <= _min_done),
165 "overlap!");
166
167 // Walk the region if it is not empty; otherwise there is nothing to do.
168 if (!extended_mr.is_empty()) {
169 walk_mem_region(extended_mr, bottom_obj, top);
170 }
171
172 // An idempotent closure might be applied in any order, so we don't
173 // record a _min_done for it.
174 if (!_cl->idempotent()) {
175 _min_done = bottom;
176 } else {
177 assert(_min_done == _last_explicit_min_done,
178 "Don't update _min_done for idempotent cl");
179 }
180 }
181
182 DirtyCardToOopClosure* Space::new_dcto_cl(ExtendedOopClosure* cl,
183 CardTableModRefBS::PrecisionStyle precision,
184 HeapWord* boundary,
185 bool parallel) {
186 return new DirtyCardToOopClosure(this, cl, precision, boundary);
187 }
188
189 HeapWord* ContiguousSpaceDCTOC::get_actual_top(HeapWord* top,
190 HeapWord* top_obj) {
191 if (top_obj != NULL && top_obj < (_sp->toContiguousSpace())->top()) {
192 if (_precision == CardTableModRefBS::ObjHeadPreciseArray) {
193 if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
194 // An arrayOop is starting on the dirty card - since we do exact
195 // store checks for objArrays we are done.
196 } else {
197 // Otherwise, it is possible that the object starting on the dirty
198 // card spans the entire card, and that the store happened on a
199 // later card. Figure out where the object ends.
200 assert(_sp->block_size(top_obj) == (size_t) oop(top_obj)->size(),
201 "Block size and object size mismatch");
202 top = top_obj + oop(top_obj)->size();
203 }
204 }
205 } else {
244 /* Bottom lies entirely below top, so we can call the */ \
245 /* non-memRegion version of oop_iterate below. */ \
246 oop(bottom)->oop_iterate(cl); \
247 bottom = next_obj; \
248 next_obj = bottom + oop(bottom)->size(); \
249 } \
250 /* Last object. */ \
251 oop(bottom)->oop_iterate(cl, mr); \
252 } \
253 }
254
255 // (There are only two of these, rather than N, because the split is due
256 // only to the introduction of the FilteringClosure, a local part of the
257 // impl of this abstraction.)
258 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
259 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
260
261 DirtyCardToOopClosure*
262 ContiguousSpace::new_dcto_cl(ExtendedOopClosure* cl,
263 CardTableModRefBS::PrecisionStyle precision,
264 HeapWord* boundary,
265 bool parallel) {
266 return new ContiguousSpaceDCTOC(this, cl, precision, boundary);
267 }
268
269 void Space::initialize(MemRegion mr,
270 bool clear_space,
271 bool mangle_space) {
272 HeapWord* bottom = mr.start();
273 HeapWord* end = mr.end();
274 assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end),
275 "invalid space boundaries");
276 set_bottom(bottom);
277 set_end(end);
278 if (clear_space) clear(mangle_space);
279 }
280
281 void Space::clear(bool mangle_space) {
282 if (ZapUnusedHeapArea && mangle_space) {
283 mangle_unused_area();
284 }
285 }
|