170 CompactPoint* cp, HeapWord* compact_top) {
171 // q is alive
172 // First check if we should switch compaction space
173 assert(this == cp->space, "'this' should be current compaction space.");
174 size_t compaction_max_size = pointer_delta(end(), compact_top);
175 assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
176 "virtual adjustObjectSize_v() method is not correct");
177 size_t adjusted_size = adjustObjectSize(size);
178 assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
179 "no small fragments allowed");
180 assert(minimum_free_block_size() == MinChunkSize,
181 "for de-virtualized reference below");
182 // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
183 if (adjusted_size + MinChunkSize > compaction_max_size &&
184 adjusted_size != compaction_max_size) {
185 do {
186 // switch to next compaction space
187 cp->space->set_compaction_top(compact_top);
188 cp->space = cp->space->next_compaction_space();
189 if (cp->space == NULL) {
190 cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
191 assert(cp->gen != NULL, "compaction must succeed");
192 cp->space = cp->gen->first_compaction_space();
193 assert(cp->space != NULL, "generation must have a first compaction space");
194 }
195 compact_top = cp->space->bottom();
196 cp->space->set_compaction_top(compact_top);
197 // The correct adjusted_size may not be the same as that for this method
198 // (i.e., cp->space may no longer be "this" so adjust the size again.
199 // Use the virtual method which is not used above to save the virtual
200 // dispatch.
201 adjusted_size = cp->space->adjust_object_size_v(size);
202 compaction_max_size = pointer_delta(cp->space->end(), compact_top);
203 assert(cp->space->minimum_free_block_size() == 0, "just checking");
204 } while (adjusted_size > compaction_max_size);
205 }
206
207 // store the forwarding pointer into the mark word
208 if ((HeapWord*)q != compact_top) {
209 q->forward_to(oop(compact_top));
210 assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
890 }
891 HeapWord* region_end_addr = mr.end();
892 MemRegion derived_mr(region_start_addr, region_end_addr);
893 while (blk_start_addr < region_end_addr) {
894 const size_t size = block_size(blk_start_addr);
895 if (block_is_obj(blk_start_addr)) {
896 last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
897 } else {
898 last_was_obj_array = false;
899 }
900 blk_start_addr += size;
901 }
902 if (!last_was_obj_array) {
903 assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
904 "Should be within (closed) used space");
905 assert(blk_start_addr > prev, "Invariant");
906 cl->set_previous(blk_start_addr); // min address for next time
907 }
908 }
909
910
911 // Callers of this iterator beware: The closure application should
912 // be robust in the face of uninitialized objects and should (always)
913 // return a correct size so that the next addr + size below gives us a
914 // valid block boundary. [See for instance,
915 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
916 // in ConcurrentMarkSweepGeneration.cpp.]
917 HeapWord*
918 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
919 ObjectClosureCareful* cl) {
920 assert_lock_strong(freelistLock());
921 // Can't use used_region() below because it may not necessarily
922 // be the same as [bottom(),end()); although we could
923 // use [used_region().start(),round_to(used_region().end(),CardSize)),
924 // that appears too cumbersome, so we just do the simpler check
925 // in the assertion below.
926 assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
927 "mr should be non-empty and within used space");
928 HeapWord *addr, *end;
929 size_t size;
930 for (addr = block_start_careful(mr.start()), end = mr.end();
|
170 CompactPoint* cp, HeapWord* compact_top) {
171 // q is alive
172 // First check if we should switch compaction space
173 assert(this == cp->space, "'this' should be current compaction space.");
174 size_t compaction_max_size = pointer_delta(end(), compact_top);
175 assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
176 "virtual adjustObjectSize_v() method is not correct");
177 size_t adjusted_size = adjustObjectSize(size);
178 assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
179 "no small fragments allowed");
180 assert(minimum_free_block_size() == MinChunkSize,
181 "for de-virtualized reference below");
182 // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
183 if (adjusted_size + MinChunkSize > compaction_max_size &&
184 adjusted_size != compaction_max_size) {
185 do {
186 // switch to next compaction space
187 cp->space->set_compaction_top(compact_top);
188 cp->space = cp->space->next_compaction_space();
189 if (cp->space == NULL) {
190 cp->gen = GenCollectedHeap::heap()->young_gen();
191 assert(cp->gen != NULL, "compaction must succeed");
192 cp->space = cp->gen->first_compaction_space();
193 assert(cp->space != NULL, "generation must have a first compaction space");
194 }
195 compact_top = cp->space->bottom();
196 cp->space->set_compaction_top(compact_top);
197 // The correct adjusted_size may not be the same as that for this method
198 // (i.e., cp->space may no longer be "this" so adjust the size again.
199 // Use the virtual method which is not used above to save the virtual
200 // dispatch.
201 adjusted_size = cp->space->adjust_object_size_v(size);
202 compaction_max_size = pointer_delta(cp->space->end(), compact_top);
203 assert(cp->space->minimum_free_block_size() == 0, "just checking");
204 } while (adjusted_size > compaction_max_size);
205 }
206
207 // store the forwarding pointer into the mark word
208 if ((HeapWord*)q != compact_top) {
209 q->forward_to(oop(compact_top));
210 assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
890 }
891 HeapWord* region_end_addr = mr.end();
892 MemRegion derived_mr(region_start_addr, region_end_addr);
893 while (blk_start_addr < region_end_addr) {
894 const size_t size = block_size(blk_start_addr);
895 if (block_is_obj(blk_start_addr)) {
896 last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
897 } else {
898 last_was_obj_array = false;
899 }
900 blk_start_addr += size;
901 }
902 if (!last_was_obj_array) {
903 assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
904 "Should be within (closed) used space");
905 assert(blk_start_addr > prev, "Invariant");
906 cl->set_previous(blk_start_addr); // min address for next time
907 }
908 }
909
910 // Callers of this iterator beware: The closure application should
911 // be robust in the face of uninitialized objects and should (always)
912 // return a correct size so that the next addr + size below gives us a
913 // valid block boundary. [See for instance,
914 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
915 // in ConcurrentMarkSweepGeneration.cpp.]
916 HeapWord*
917 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
918 ObjectClosureCareful* cl) {
919 assert_lock_strong(freelistLock());
920 // Can't use used_region() below because it may not necessarily
921 // be the same as [bottom(),end()); although we could
922 // use [used_region().start(),round_to(used_region().end(),CardSize)),
923 // that appears too cumbersome, so we just do the simpler check
924 // in the assertion below.
925 assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
926 "mr should be non-empty and within used space");
927 HeapWord *addr, *end;
928 size_t size;
929 for (addr = block_start_careful(mr.start()), end = mr.end();
|