104 size_t allowed_deadspace = 0; 105 if (skip_dead) { 106 const size_t ratio = space->allowed_dead_ratio(); 107 allowed_deadspace = (space->capacity() * ratio / 100) / HeapWordSize; 108 } 109 110 HeapWord* q = space->bottom(); 111 HeapWord* t = space->scan_limit(); 112 113 HeapWord* end_of_live= q; // One byte beyond the last byte of the last 114 // live object. 115 HeapWord* first_dead = space->end(); // The first dead object. 116 LiveRange* liveRange = NULL; // The current live range, recorded in the 117 // first header of preceding free area. 118 space->_first_dead = first_dead; 119 120 const intx interval = PrefetchScanIntervalInBytes; 121 122 while (q < t) { 123 assert(!space->scanned_block_is_obj(q) || 124 oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() || 125 oop(q)->mark()->has_bias_pattern(), 126 "these are the only valid states during a mark sweep"); 127 if (space->scanned_block_is_obj(q) && oop(q)->is_gc_marked()) { 128 // prefetch beyond q 129 Prefetch::write(q, interval); 130 size_t size = space->scanned_block_size(q); 131 compact_top = cp->space->forward(oop(q), size, cp, compact_top); 132 q += size; 133 end_of_live = q; 134 } else { 135 // run over all the contiguous dead objects 136 HeapWord* end = q; 137 do { 138 // prefetch beyond end 139 Prefetch::write(end, interval); 140 end += space->scanned_block_size(end); 141 } while (end < t && (!space->scanned_block_is_obj(end) || !oop(end)->is_gc_marked())); 142 143 // see if we might want to pretend this object is alive so that 144 // we don't have to compact quite as often. 145 if (allowed_deadspace > 0 && q == compact_top) { 146 size_t sz = pointer_delta(end, q); 147 if (space->insert_deadspace(allowed_deadspace, q, sz)) { 148 compact_top = cp->space->forward(oop(q), sz, cp, compact_top); 149 q = end; 150 end_of_live = end; 151 continue; 152 } 153 } 154 155 // otherwise, it really is a free region. 156 157 // for the previous LiveRange, record the end of the live objects. 158 if (liveRange) { 159 liveRange->set_end(q); 160 } 161 162 // record the current LiveRange object. 163 // liveRange->start() is overlaid on the mark word. 164 liveRange = (LiveRange*)q; 165 liveRange->set_start(end); 166 liveRange->set_end(end); 167 168 // see if this is the first dead region. 169 if (q < first_dead) { 170 first_dead = q; 171 } 172 173 // move on to the next object 174 q = end; 175 } 176 } 177 178 assert(q == t, "just checking"); 179 if (liveRange != NULL) { 180 liveRange->set_end(q); 181 } 182 space->_end_of_live = end_of_live; 183 if (end_of_live < first_dead) { 184 first_dead = end_of_live; 185 } 186 space->_first_dead = first_dead; 187 188 // save the compaction_top of the compaction space. 189 cp->space->set_compaction_top(compact_top); 190 } 191 192 template <class SpaceType> 193 inline void CompactibleSpace::scan_and_adjust_pointers(SpaceType* space) { 194 // adjust all the interior pointers to point at the new locations of objects 195 // Used by MarkSweep::mark_sweep_phase3() 196 197 HeapWord* q = space->bottom(); 198 HeapWord* t = space->_end_of_live; // Established by "prepare_for_compaction". 199 200 assert(space->_first_dead <= space->_end_of_live, "Stands to reason, no?"); 201 202 if (q < t && space->_first_dead > q && !oop(q)->is_gc_marked()) { 203 // we have a chunk of the space which hasn't moved and we've 204 // reinitialized the mark word during the previous pass, so we can't 205 // use is_gc_marked for the traversal. 206 HeapWord* end = space->_first_dead; 207 208 while (q < end) { 209 // I originally tried to conjoin "block_start(q) == q" to the 210 // assertion below, but that doesn't work, because you can't 211 // accurately traverse previous objects to get to the current one 212 // after their pointers have been 213 // updated, until the actual compaction is done. dld, 4/00 214 assert(space->block_is_obj(q), "should be at block boundaries, and should be looking at objs"); 215 216 // point all the oops to the new location 217 size_t size = MarkSweep::adjust_pointers(oop(q)); 218 size = space->adjust_obj_size(size); 219 220 q += size; 221 } 222 223 if (space->_first_dead == t) { 224 q = t; 225 } else { 226 // $$$ This is funky. Using this to read the previously written 227 // LiveRange. See also use below. 228 q = (HeapWord*)oop(space->_first_dead)->mark()->decode_pointer(); 229 } 230 } 231 232 const intx interval = PrefetchScanIntervalInBytes; 233 234 debug_only(HeapWord* prev_q = NULL); 235 while (q < t) { 236 // prefetch beyond q 237 Prefetch::write(q, interval); 238 if (oop(q)->is_gc_marked()) { 239 // q is alive 240 // point all the oops to the new location 241 size_t size = MarkSweep::adjust_pointers(oop(q)); 242 size = space->adjust_obj_size(size); 243 debug_only(prev_q = q); 244 q += size; 245 } else { 246 // q is not a live object, so its mark should point at the next 247 // live object 248 debug_only(prev_q = q); 249 q = (HeapWord*) oop(q)->mark()->decode_pointer(); 250 assert(q > prev_q, "we should be moving forward through memory"); 251 } 252 } 253 254 assert(q == t, "just checking"); 255 } 256 257 template <class SpaceType> 258 inline void CompactibleSpace::scan_and_compact(SpaceType* space) { 259 // Copy all live objects to their new location 260 // Used by MarkSweep::mark_sweep_phase4() 261 262 HeapWord* q = space->bottom(); 263 HeapWord* const t = space->_end_of_live; 264 debug_only(HeapWord* prev_q = NULL); 265 266 if (q < t && space->_first_dead > q && !oop(q)->is_gc_marked()) { 267 #ifdef ASSERT // Debug only 268 // we have a chunk of the space which hasn't moved and we've reinitialized 269 // the mark word during the previous pass, so we can't use is_gc_marked for 270 // the traversal. 271 HeapWord* const end = space->_first_dead; 272 273 while (q < end) { 274 size_t size = space->obj_size(q); 275 assert(!oop(q)->is_gc_marked(), "should be unmarked (special dense prefix handling)"); 276 prev_q = q; 277 q += size; 278 } 279 #endif 280 281 if (space->_first_dead == t) { 282 q = t; 283 } else { 284 // $$$ Funky 285 q = (HeapWord*) oop(space->_first_dead)->mark()->decode_pointer(); 286 } 287 } 288 289 const intx scan_interval = PrefetchScanIntervalInBytes; 290 const intx copy_interval = PrefetchCopyIntervalInBytes; 291 while (q < t) { 292 if (!oop(q)->is_gc_marked()) { 293 // mark is pointer to next marked oop 294 debug_only(prev_q = q); 295 q = (HeapWord*) oop(q)->mark()->decode_pointer(); 296 assert(q > prev_q, "we should be moving forward through memory"); 297 } else { 298 // prefetch beyond q 299 Prefetch::read(q, scan_interval); 300 301 // size and destination 302 size_t size = space->obj_size(q); 303 HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); 304 305 // prefetch beyond compaction_top 306 Prefetch::write(compaction_top, copy_interval); 307 308 // copy object and reinit its mark 309 assert(q != compaction_top, "everything in this pass should be moving"); 310 Copy::aligned_conjoint_words(q, compaction_top, size); 311 oop(compaction_top)->init_mark(); 312 assert(oop(compaction_top)->klass() != NULL, "should have a class"); 313 314 debug_only(prev_q = q); 315 q += size; 316 } 317 } 318 319 // Let's remember if we were empty before we did the compaction. 320 bool was_empty = space->used_region().is_empty(); 321 // Reset space after compaction is complete 322 space->reset_after_compaction(); 323 // We do this clear, below, since it has overloaded meanings for some 324 // space subtypes. For example, OffsetTableContigSpace's that were 325 // compacted into will have had their offset table thresholds updated 326 // continuously, but those that weren't need to have their thresholds 327 // re-initialized. Also mangles unused area for debugging. 328 if (space->used_region().is_empty()) { 329 if (!was_empty) space->clear(SpaceDecorator::Mangle); 330 } else { | 104 size_t allowed_deadspace = 0; 105 if (skip_dead) { 106 const size_t ratio = space->allowed_dead_ratio(); 107 allowed_deadspace = (space->capacity() * ratio / 100) / HeapWordSize; 108 } 109 110 HeapWord* q = space->bottom(); 111 HeapWord* t = space->scan_limit(); 112 113 HeapWord* end_of_live= q; // One byte beyond the last byte of the last 114 // live object. 115 HeapWord* first_dead = space->end(); // The first dead object. 116 LiveRange* liveRange = NULL; // The current live range, recorded in the 117 // first header of preceding free area. 118 space->_first_dead = first_dead; 119 120 const intx interval = PrefetchScanIntervalInBytes; 121 122 while (q < t) { 123 assert(!space->scanned_block_is_obj(q) || 124 space->make_oop(q)->mark()->is_marked() || 125 space->make_oop(q)->mark()->is_unlocked() || 126 space->make_oop(q)->mark()->has_bias_pattern() || 127 oopDesc::bs()->read_barrier(space->make_oop(q)) != space->make_oop(q), 128 "these are the only valid states during a mark sweep"); 129 if (space->scanned_block_is_obj(q) && space->make_oop(q)->is_gc_marked()) { 130 // prefetch beyond q 131 Prefetch::write(q, interval); 132 size_t size = space->scanned_block_size(q); 133 compact_top = cp->space->forward(space->make_oop(q), size, cp, compact_top); 134 q += size; 135 end_of_live = q; 136 } else { 137 // run over all the contiguous dead objects 138 HeapWord* end = q; 139 do { 140 // prefetch beyond end 141 Prefetch::write(end, interval); 142 end += space->scanned_block_size(end); 143 } while (end < t && (!space->scanned_block_is_obj(end) || !space->make_oop(end)->is_gc_marked())); 144 145 // see if we might want to pretend this object is alive so that 146 // we don't have to compact quite as often. 147 if (allowed_deadspace > 0 && q == compact_top) { 148 size_t sz = pointer_delta(end, q); 149 if (space->insert_deadspace(allowed_deadspace, q, sz)) { 150 compact_top = cp->space->forward(space->make_oop(q), sz, cp, compact_top); 151 q = end; 152 end_of_live = end; 153 continue; 154 } 155 } 156 157 // otherwise, it really is a free region. 158 159 // for the previous LiveRange, record the end of the live objects. 160 if (liveRange) { 161 liveRange->set_end(q); 162 } 163 164 // record the current LiveRange object. 165 // liveRange->start() is overlaid on the mark word. 166 liveRange = (LiveRange*) (HeapWord*) space->make_oop(q); 167 liveRange->set_start(end); 168 liveRange->set_end(end); 169 170 // see if this is the first dead region. 171 if (q < first_dead) { 172 first_dead = q; 173 } 174 175 // move on to the next object 176 q = end; 177 } 178 } 179 180 assert(q == t, "just checking"); 181 if (liveRange != NULL) { 182 liveRange->set_end(q); 183 } 184 space->_end_of_live = end_of_live; 185 if (end_of_live < first_dead) { 186 first_dead = end_of_live; 187 } 188 space->_first_dead = first_dead; 189 190 // save the compaction_top of the compaction space. 191 cp->space->set_compaction_top(compact_top); 192 } 193 194 template <class SpaceType> 195 inline void CompactibleSpace::scan_and_adjust_pointers(SpaceType* space) { 196 // adjust all the interior pointers to point at the new locations of objects 197 // Used by MarkSweep::mark_sweep_phase3() 198 199 HeapWord* q = space->bottom(); 200 HeapWord* t = space->_end_of_live; // Established by "prepare_for_compaction". 201 202 assert(space->_first_dead <= space->_end_of_live, "Stands to reason, no?"); 203 204 if (q < t && space->_first_dead > q && !space->make_oop(q)->is_gc_marked()) { 205 // we have a chunk of the space which hasn't moved and we've 206 // reinitialized the mark word during the previous pass, so we can't 207 // use is_gc_marked for the traversal. 208 HeapWord* end = space->_first_dead; 209 210 while (q < end) { 211 // I originally tried to conjoin "block_start(q) == q" to the 212 // assertion below, but that doesn't work, because you can't 213 // accurately traverse previous objects to get to the current one 214 // after their pointers have been 215 // updated, until the actual compaction is done. dld, 4/00 216 assert(space->block_is_obj(q), "should be at block boundaries, and should be looking at objs"); 217 218 // point all the oops to the new location 219 size_t size = MarkSweep::adjust_pointers(space->make_oop(q)); 220 size = space->adjust_obj_size(size); 221 222 q += size; 223 } 224 225 if (space->_first_dead == t) { 226 q = t; 227 } else { 228 // $$$ This is funky. Using this to read the previously written 229 // LiveRange. See also use below. 230 q = (HeapWord*)oop(space->_first_dead)->mark()->decode_pointer(); 231 } 232 } 233 234 const intx interval = PrefetchScanIntervalInBytes; 235 236 debug_only(HeapWord* prev_q = NULL); 237 while (q < t) { 238 // prefetch beyond q 239 Prefetch::write(q, interval); 240 if (space->make_oop(q)->is_gc_marked()) { 241 // q is alive 242 // point all the oops to the new location 243 size_t size = MarkSweep::adjust_pointers(space->make_oop(q)); 244 size = space->adjust_obj_size(size); 245 debug_only(prev_q = q); 246 q += size; 247 } else { 248 // q is not a live object, so its mark should point at the next 249 // live object 250 debug_only(prev_q = q); 251 q = (HeapWord*) space->make_oop(q)->mark()->decode_pointer(); 252 assert(q > prev_q, "we should be moving forward through memory"); 253 } 254 } 255 256 assert(q == t, "just checking"); 257 } 258 259 template <class SpaceType> 260 inline void CompactibleSpace::scan_and_compact(SpaceType* space) { 261 // Copy all live objects to their new location 262 // Used by MarkSweep::mark_sweep_phase4() 263 264 HeapWord* q = space->bottom(); 265 HeapWord* const t = space->_end_of_live; 266 debug_only(HeapWord* prev_q = NULL); 267 268 if (q < t && space->_first_dead > q && !space->make_oop(q)->is_gc_marked()) { 269 #ifdef ASSERT // Debug only 270 // we have a chunk of the space which hasn't moved and we've reinitialized 271 // the mark word during the previous pass, so we can't use is_gc_marked for 272 // the traversal. 273 HeapWord* const end = space->_first_dead; 274 275 while (q < end) { 276 size_t size = space->obj_size(q); 277 assert(!space->make_oop(q)->is_gc_marked(), "should be unmarked (special dense prefix handling)"); 278 prev_q = q; 279 q += size; 280 } 281 #endif 282 283 if (space->_first_dead == t) { 284 q = t; 285 } else { 286 // $$$ Funky 287 q = (HeapWord*) oop(space->_first_dead)->mark()->decode_pointer(); 288 } 289 } 290 291 const intx scan_interval = PrefetchScanIntervalInBytes; 292 const intx copy_interval = PrefetchCopyIntervalInBytes; 293 while (q < t) { 294 if (!space->make_oop(q)->is_gc_marked()) { 295 // mark is pointer to next marked oop 296 debug_only(prev_q = q); 297 q = (HeapWord*) space->make_oop(q)->mark()->decode_pointer(); 298 assert(q > prev_q, "we should be moving forward through memory"); 299 } else { 300 // prefetch beyond q 301 Prefetch::read(q, scan_interval); 302 303 // size and destination 304 size_t size = space->obj_size(q); 305 HeapWord* compaction_top = (HeapWord*)space->make_oop(q)->forwardee(); 306 307 // prefetch beyond compaction_top 308 Prefetch::write(compaction_top, copy_interval); 309 310 // copy object and reinit its mark 311 assert(q != compaction_top, "everything in this pass should be moving"); 312 Copy::aligned_conjoint_words((HeapWord*) space->make_oop(q), compaction_top, size); 313 oop(compaction_top)->init_mark(); 314 assert(oop(compaction_top)->klass() != NULL, "should have a class"); 315 316 debug_only(prev_q = q); 317 q += size; 318 } 319 } 320 321 // Let's remember if we were empty before we did the compaction. 322 bool was_empty = space->used_region().is_empty(); 323 // Reset space after compaction is complete 324 space->reset_after_compaction(); 325 // We do this clear, below, since it has overloaded meanings for some 326 // space subtypes. For example, OffsetTableContigSpace's that were 327 // compacted into will have had their offset table thresholds updated 328 // continuously, but those that weren't need to have their thresholds 329 // re-initialized. Also mangles unused area for debugging. 330 if (space->used_region().is_empty()) { 331 if (!was_empty) space->clear(SpaceDecorator::Mangle); 332 } else { |