122 #define assert_covered_region_contains(addr) \
123 assert(_covered_region.contains(addr), \
124 #addr " (" PTR_FORMAT ") is not in covered region [" PTR_FORMAT ", " PTR_FORMAT "]", \
125 p2i(addr), p2i(_covered_region.start()), p2i(_covered_region.end()))
126
127 void allocate_block(HeapWord* p) {
128 assert_covered_region_contains(p);
129 jbyte* block = block_for_addr(p);
130 HeapWord* block_base = addr_for_block(block);
131 size_t offset = pointer_delta(p, block_base, sizeof(HeapWord*));
132 assert(offset < 128, "Sanity");
133 // When doing MT offsets, we can't assert this.
134 //assert(offset > *block, "Found backwards allocation");
135 *block = (jbyte)offset;
136 }
137
138 // Optimized for finding the first object that crosses into
139 // a given block. The blocks contain the offset of the last
140 // object in that block. Scroll backwards by one, and the first
141 // object hit should be at the beginning of the block
142 HeapWord* object_start(HeapWord* addr) const {
143 assert_covered_region_contains(addr);
144 jbyte* block = block_for_addr(addr);
145 HeapWord* scroll_forward = offset_addr_for_block(block--);
146 while (scroll_forward > addr) {
147 scroll_forward = offset_addr_for_block(block--);
148 }
149
150 HeapWord* next = scroll_forward;
151 while (next <= addr) {
152 scroll_forward = next;
153 next += oop(next)->size();
154 }
155 assert(scroll_forward <= addr, "wrong order for current and arg");
156 assert(addr <= next, "wrong order for arg and next");
157 return scroll_forward;
158 }
159
160 bool is_block_allocated(HeapWord* addr) {
161 assert_covered_region_contains(addr);
162 jbyte* block = block_for_addr(addr);
163 if (*block == clean_block)
164 return false;
165
166 return true;
167 }
168 #undef assert_covered_region_contains
169
170 // Return true if an object starts in the range of heap addresses.
171 // If an object starts at an address corresponding to
172 // "start", the method will return true.
173 bool object_starts_in_range(HeapWord* start_addr, HeapWord* end_addr) const;
174 };
175
176 #endif // SHARE_VM_GC_PARALLEL_OBJECTSTARTARRAY_HPP
|
122 #define assert_covered_region_contains(addr) \
123 assert(_covered_region.contains(addr), \
124 #addr " (" PTR_FORMAT ") is not in covered region [" PTR_FORMAT ", " PTR_FORMAT "]", \
125 p2i(addr), p2i(_covered_region.start()), p2i(_covered_region.end()))
126
127 void allocate_block(HeapWord* p) {
128 assert_covered_region_contains(p);
129 jbyte* block = block_for_addr(p);
130 HeapWord* block_base = addr_for_block(block);
131 size_t offset = pointer_delta(p, block_base, sizeof(HeapWord*));
132 assert(offset < 128, "Sanity");
133 // When doing MT offsets, we can't assert this.
134 //assert(offset > *block, "Found backwards allocation");
135 *block = (jbyte)offset;
136 }
137
138 // Optimized for finding the first object that crosses into
139 // a given block. The blocks contain the offset of the last
140 // object in that block. Scroll backwards by one, and the first
141 // object hit should be at the beginning of the block
142 HeapWord* object_start(HeapWord* addr) const;
143
144 bool is_block_allocated(HeapWord* addr) {
145 assert_covered_region_contains(addr);
146 jbyte* block = block_for_addr(addr);
147 if (*block == clean_block)
148 return false;
149
150 return true;
151 }
152
153 // Return true if an object starts in the range of heap addresses.
154 // If an object starts at an address corresponding to
155 // "start", the method will return true.
156 bool object_starts_in_range(HeapWord* start_addr, HeapWord* end_addr) const;
157 };
158
159 #endif // SHARE_VM_GC_PARALLEL_OBJECTSTARTARRAY_HPP
|