< prev index next >

src/share/vm/gc/shared/space.inline.hpp

Print this page




 235       size_t size = MarkSweep::adjust_pointers(oop(cur_obj));
 236       size = space->adjust_obj_size(size);
 237       debug_only(prev_obj = cur_obj);
 238       cur_obj += size;
 239     } else {
 240       debug_only(prev_obj = cur_obj);
 241       // cur_obj is not a live object, instead it points at the next live object
 242       cur_obj = *(HeapWord**)cur_obj;
 243       assert(cur_obj > prev_obj, "we should be moving forward through memory, cur_obj: " PTR_FORMAT ", prev_obj: " PTR_FORMAT, p2i(cur_obj), p2i(prev_obj));
 244     }
 245   }
 246 
 247   assert(cur_obj == end_of_live, "just checking");
 248 }
 249 
 250 template <class SpaceType>
 251 inline void CompactibleSpace::scan_and_compact(SpaceType* space) {
 252   // Copy all live objects to their new location
 253   // Used by MarkSweep::mark_sweep_phase4()
 254 
 255   HeapWord*       q = space->bottom();
 256   HeapWord* const t = space->_end_of_live;
 257   debug_only(HeapWord* prev_q = NULL);
 258 
 259   if (q < t && space->_first_dead > q && !oop(q)->is_gc_marked()) {
 260     #ifdef ASSERT // Debug only
 261       // we have a chunk of the space which hasn't moved and we've reinitialized
 262       // the mark word during the previous pass, so we can't use is_gc_marked for
 263       // the traversal.
 264       HeapWord* const end = space->_first_dead;
 265 
 266       while (q < end) {
 267         size_t size = space->obj_size(q);
 268         assert(!oop(q)->is_gc_marked(), "should be unmarked (special dense prefix handling)");
 269         prev_q = q;
 270         q += size;
 271       }
 272     #endif
 273 
 274     if (space->_first_dead == t) {
 275       q = t;
 276     } else {
 277       // $$$ Funky
 278       q = (HeapWord*) oop(space->_first_dead)->mark()->decode_pointer();
 279     }
 280   }
 281 
 282   const intx scan_interval = PrefetchScanIntervalInBytes;
 283   const intx copy_interval = PrefetchCopyIntervalInBytes;
 284   while (q < t) {
 285     if (!oop(q)->is_gc_marked()) {
 286       // mark is pointer to next marked oop
 287       debug_only(prev_q = q);
 288       q = (HeapWord*) oop(q)->mark()->decode_pointer();
 289       assert(q > prev_q, "we should be moving forward through memory");
 290     } else {
 291       // prefetch beyond q
 292       Prefetch::read(q, scan_interval);
 293 
 294       // size and destination
 295       size_t size = space->obj_size(q);
 296       HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee();
 297 
 298       // prefetch beyond compaction_top
 299       Prefetch::write(compaction_top, copy_interval);
 300 
 301       // copy object and reinit its mark
 302       assert(q != compaction_top, "everything in this pass should be moving");
 303       Copy::aligned_conjoint_words(q, compaction_top, size);
 304       oop(compaction_top)->init_mark();
 305       assert(oop(compaction_top)->klass() != NULL, "should have a class");
 306 
 307       debug_only(prev_q = q);
 308       q += size;
 309     }
 310   }
 311 
 312   // Let's remember if we were empty before we did the compaction.
 313   bool was_empty = space->used_region().is_empty();
 314   // Reset space after compaction is complete
 315   space->reset_after_compaction();
 316   // We do this clear, below, since it has overloaded meanings for some
 317   // space subtypes.  For example, OffsetTableContigSpace's that were
 318   // compacted into will have had their offset table thresholds updated
 319   // continuously, but those that weren't need to have their thresholds
 320   // re-initialized.  Also mangles unused area for debugging.
 321   if (space->used_region().is_empty()) {
 322     if (!was_empty) space->clear(SpaceDecorator::Mangle);
 323   } else {
 324     if (ZapUnusedHeapArea) space->mangle_unused_area();
 325   }
 326 }
 327 
 328 size_t ContiguousSpace::scanned_block_size(const HeapWord* addr) const {


 235       size_t size = MarkSweep::adjust_pointers(oop(cur_obj));
 236       size = space->adjust_obj_size(size);
 237       debug_only(prev_obj = cur_obj);
 238       cur_obj += size;
 239     } else {
 240       debug_only(prev_obj = cur_obj);
 241       // cur_obj is not a live object, instead it points at the next live object
 242       cur_obj = *(HeapWord**)cur_obj;
 243       assert(cur_obj > prev_obj, "we should be moving forward through memory, cur_obj: " PTR_FORMAT ", prev_obj: " PTR_FORMAT, p2i(cur_obj), p2i(prev_obj));
 244     }
 245   }
 246 
 247   assert(cur_obj == end_of_live, "just checking");
 248 }
 249 
 250 template <class SpaceType>
 251 inline void CompactibleSpace::scan_and_compact(SpaceType* space) {
 252   // Copy all live objects to their new location
 253   // Used by MarkSweep::mark_sweep_phase4()
 254 
 255   HeapWord*       cur_obj = space->bottom();
 256   HeapWord* const end_of_live = space->_end_of_live;
 257   debug_only(HeapWord* prev_obj = NULL);
 258 
 259   if (cur_obj < end_of_live && space->_first_dead > cur_obj && !oop(cur_obj)->is_gc_marked()) {
 260     #ifdef ASSERT // Debug only
 261       // we have a chunk of the space which hasn't moved and we've reinitialized
 262       // the mark word during the previous pass, so we can't use is_gc_marked for
 263       // the traversal.
 264       HeapWord* const end = space->_first_dead;
 265 
 266       while (cur_obj < end) {
 267         size_t size = space->obj_size(cur_obj);
 268         assert(!oop(cur_obj)->is_gc_marked(), "should be unmarked (special dense prefix handling)");
 269         prev_obj = cur_obj;
 270         cur_obj += size;
 271       }
 272     #endif
 273 
 274     if (space->_first_dead == end_of_live) {
 275       cur_obj = end_of_live;
 276     } else {
 277       // $$$ Funky
 278       cur_obj = (HeapWord*) oop(space->_first_dead)->mark()->decode_pointer();
 279     }
 280   }
 281 
 282   const intx scan_interval = PrefetchScanIntervalInBytes;
 283   const intx copy_interval = PrefetchCopyIntervalInBytes;
 284   while (cur_obj < end_of_live) {
 285     if (!oop(cur_obj)->is_gc_marked()) {
 286       // mark is pointer to next marked oop
 287       debug_only(prev_obj = cur_obj);
 288       cur_obj = (HeapWord*) oop(cur_obj)->mark()->decode_pointer();
 289       assert(cur_obj > prev_obj, "we should be moving forward through memory");
 290     } else {
 291       // prefetch beyond q
 292       Prefetch::read(cur_obj, scan_interval);
 293 
 294       // size and destination
 295       size_t size = space->obj_size(cur_obj);
 296       HeapWord* compaction_top = (HeapWord*)oop(cur_obj)->forwardee();
 297 
 298       // prefetch beyond compaction_top
 299       Prefetch::write(compaction_top, copy_interval);
 300 
 301       // copy object and reinit its mark
 302       assert(cur_obj != compaction_top, "everything in this pass should be moving");
 303       Copy::aligned_conjoint_words(cur_obj, compaction_top, size);
 304       oop(compaction_top)->init_mark();
 305       assert(oop(compaction_top)->klass() != NULL, "should have a class");
 306 
 307       debug_only(prev_obj = cur_obj);
 308       cur_obj += size;
 309     }
 310   }
 311 
 312   // Let's remember if we were empty before we did the compaction.
 313   bool was_empty = space->used_region().is_empty();
 314   // Reset space after compaction is complete
 315   space->reset_after_compaction();
 316   // We do this clear, below, since it has overloaded meanings for some
 317   // space subtypes.  For example, OffsetTableContigSpace's that were
 318   // compacted into will have had their offset table thresholds updated
 319   // continuously, but those that weren't need to have their thresholds
 320   // re-initialized.  Also mangles unused area for debugging.
 321   if (space->used_region().is_empty()) {
 322     if (!was_empty) space->clear(SpaceDecorator::Mangle);
 323   } else {
 324     if (ZapUnusedHeapArea) space->mangle_unused_area();
 325   }
 326 }
 327 
 328 size_t ContiguousSpace::scanned_block_size(const HeapWord* addr) const {
< prev index next >