95 !_sp->obj_allocated_since_save_marks(oop(bottom))) {
96 oop(bottom)->oop_iterate(_cl, mr);
97 }
98 }
99 }
100
101 // We get called with "mr" representing the dirty region
102 // that we want to process. Because of imprecise marking,
103 // we may need to extend the incoming "mr" to the right,
104 // and scan more. However, because we may already have
105 // scanned some of that extended region, we may need to
106 // trim its right-end back some so we do not scan what
107 // we (or another worker thread) may already have scanned
108 // or planning to scan.
109 void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) {
110
111 // Some collectors need to do special things whenever their dirty
112 // cards are processed. For instance, CMS must remember mutator updates
113 // (i.e. dirty cards) so as to re-scan mutated objects.
114 // Such work can be piggy-backed here on dirty card scanning, so as to make
115 // it slightly more efficient than doing a complete non-detructive pre-scan
116 // of the card table.
117 MemRegionClosure* pCl = _sp->preconsumptionDirtyCardClosure();
118 if (pCl != NULL) {
119 pCl->do_MemRegion(mr);
120 }
121
122 HeapWord* bottom = mr.start();
123 HeapWord* last = mr.last();
124 HeapWord* top = mr.end();
125 HeapWord* bottom_obj;
126 HeapWord* top_obj;
127
128 assert(_precision == CardTableModRefBS::ObjHeadPreciseArray ||
129 _precision == CardTableModRefBS::Precise,
130 "Only ones we deal with for now.");
131
132 assert(_precision != CardTableModRefBS::ObjHeadPreciseArray ||
133 _cl->idempotent() || _last_bottom == NULL ||
134 top <= _last_bottom,
135 "Not decreasing");
307
308 bool ContiguousSpace::is_in(const void* p) const {
309 return _bottom <= p && p < _top;
310 }
311
312 bool ContiguousSpace::is_free_block(const HeapWord* p) const {
313 return p >= _top;
314 }
315
316 void OffsetTableContigSpace::clear(bool mangle_space) {
317 ContiguousSpace::clear(mangle_space);
318 _offsets.initialize_threshold();
319 }
320
321 void OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
322 Space::set_bottom(new_bottom);
323 _offsets.set_bottom(new_bottom);
324 }
325
326 void OffsetTableContigSpace::set_end(HeapWord* new_end) {
327 // Space should not advertize an increase in size
328 // until after the underlying offest table has been enlarged.
329 _offsets.resize(pointer_delta(new_end, bottom()));
330 Space::set_end(new_end);
331 }
332
333 #ifndef PRODUCT
334
335 void ContiguousSpace::set_top_for_allocations(HeapWord* v) {
336 mangler()->set_top_for_allocations(v);
337 }
338 void ContiguousSpace::set_top_for_allocations() {
339 mangler()->set_top_for_allocations(top());
340 }
341 void ContiguousSpace::check_mangled_unused_area(HeapWord* limit) {
342 mangler()->check_mangled_unused_area(limit);
343 }
344
345 void ContiguousSpace::check_mangled_unused_area_complete() {
346 mangler()->check_mangled_unused_area_complete();
347 }
348
712 assert(obj->is_oop(), "expected an oop");
713 obj_addr += obj->size();
714 // If "obj_addr" is not greater than top, then the
715 // entire object "obj" is within the region.
716 if (obj_addr <= t) {
717 obj->oop_iterate(blk);
718 } else {
719 // "obj" extends beyond end of region
720 obj->oop_iterate(&smr_blk);
721 break;
722 }
723 };
724 }
725
726 void ContiguousSpace::object_iterate(ObjectClosure* blk) {
727 if (is_empty()) return;
728 WaterMark bm = bottom_mark();
729 object_iterate_from(bm, blk);
730 }
731
732 // For a continguous space object_iterate() and safe_object_iterate()
733 // are the same.
734 void ContiguousSpace::safe_object_iterate(ObjectClosure* blk) {
735 object_iterate(blk);
736 }
737
738 void ContiguousSpace::object_iterate_from(WaterMark mark, ObjectClosure* blk) {
739 assert(mark.space() == this, "Mark does not match space");
740 HeapWord* p = mark.point();
741 while (p < top()) {
742 blk->do_object(oop(p));
743 p += oop(p)->size();
744 }
745 }
746
747 HeapWord*
748 ContiguousSpace::object_iterate_careful(ObjectClosureCareful* blk) {
749 HeapWord * limit = concurrent_iteration_safe_limit();
750 assert(limit <= top(), "sanity check");
751 for (HeapWord* p = bottom(); p < limit;) {
752 size_t size = blk->do_object_careful(oop(p));
|
95 !_sp->obj_allocated_since_save_marks(oop(bottom))) {
96 oop(bottom)->oop_iterate(_cl, mr);
97 }
98 }
99 }
100
101 // We get called with "mr" representing the dirty region
102 // that we want to process. Because of imprecise marking,
103 // we may need to extend the incoming "mr" to the right,
104 // and scan more. However, because we may already have
105 // scanned some of that extended region, we may need to
106 // trim its right-end back some so we do not scan what
107 // we (or another worker thread) may already have scanned
108 // or planning to scan.
109 void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) {
110
111 // Some collectors need to do special things whenever their dirty
112 // cards are processed. For instance, CMS must remember mutator updates
113 // (i.e. dirty cards) so as to re-scan mutated objects.
114 // Such work can be piggy-backed here on dirty card scanning, so as to make
115 // it slightly more efficient than doing a complete non-destructive pre-scan
116 // of the card table.
117 MemRegionClosure* pCl = _sp->preconsumptionDirtyCardClosure();
118 if (pCl != NULL) {
119 pCl->do_MemRegion(mr);
120 }
121
122 HeapWord* bottom = mr.start();
123 HeapWord* last = mr.last();
124 HeapWord* top = mr.end();
125 HeapWord* bottom_obj;
126 HeapWord* top_obj;
127
128 assert(_precision == CardTableModRefBS::ObjHeadPreciseArray ||
129 _precision == CardTableModRefBS::Precise,
130 "Only ones we deal with for now.");
131
132 assert(_precision != CardTableModRefBS::ObjHeadPreciseArray ||
133 _cl->idempotent() || _last_bottom == NULL ||
134 top <= _last_bottom,
135 "Not decreasing");
307
308 bool ContiguousSpace::is_in(const void* p) const {
309 return _bottom <= p && p < _top;
310 }
311
312 bool ContiguousSpace::is_free_block(const HeapWord* p) const {
313 return p >= _top;
314 }
315
316 void OffsetTableContigSpace::clear(bool mangle_space) {
317 ContiguousSpace::clear(mangle_space);
318 _offsets.initialize_threshold();
319 }
320
321 void OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
322 Space::set_bottom(new_bottom);
323 _offsets.set_bottom(new_bottom);
324 }
325
326 void OffsetTableContigSpace::set_end(HeapWord* new_end) {
327 // Space should not advertise an increase in size
328 // until after the underlying offset table has been enlarged.
329 _offsets.resize(pointer_delta(new_end, bottom()));
330 Space::set_end(new_end);
331 }
332
333 #ifndef PRODUCT
334
335 void ContiguousSpace::set_top_for_allocations(HeapWord* v) {
336 mangler()->set_top_for_allocations(v);
337 }
338 void ContiguousSpace::set_top_for_allocations() {
339 mangler()->set_top_for_allocations(top());
340 }
341 void ContiguousSpace::check_mangled_unused_area(HeapWord* limit) {
342 mangler()->check_mangled_unused_area(limit);
343 }
344
345 void ContiguousSpace::check_mangled_unused_area_complete() {
346 mangler()->check_mangled_unused_area_complete();
347 }
348
712 assert(obj->is_oop(), "expected an oop");
713 obj_addr += obj->size();
714 // If "obj_addr" is not greater than top, then the
715 // entire object "obj" is within the region.
716 if (obj_addr <= t) {
717 obj->oop_iterate(blk);
718 } else {
719 // "obj" extends beyond end of region
720 obj->oop_iterate(&smr_blk);
721 break;
722 }
723 };
724 }
725
726 void ContiguousSpace::object_iterate(ObjectClosure* blk) {
727 if (is_empty()) return;
728 WaterMark bm = bottom_mark();
729 object_iterate_from(bm, blk);
730 }
731
732 // For a ContiguousSpace object_iterate() and safe_object_iterate()
733 // are the same.
734 void ContiguousSpace::safe_object_iterate(ObjectClosure* blk) {
735 object_iterate(blk);
736 }
737
738 void ContiguousSpace::object_iterate_from(WaterMark mark, ObjectClosure* blk) {
739 assert(mark.space() == this, "Mark does not match space");
740 HeapWord* p = mark.point();
741 while (p < top()) {
742 blk->do_object(oop(p));
743 p += oop(p)->size();
744 }
745 }
746
747 HeapWord*
748 ContiguousSpace::object_iterate_careful(ObjectClosureCareful* blk) {
749 HeapWord * limit = concurrent_iteration_safe_limit();
750 assert(limit <= top(), "sanity check");
751 for (HeapWord* p = bottom(); p < limit;) {
752 size_t size = blk->do_object_careful(oop(p));
|