379 // sweeping more than is necessary. The allocator and sweeper will
380 // of course need to synchronize on this, since the sweeper will
381 // try to bump down the address and the allocator will try to bump it up.
382 // For now, however, we'll just use the default used_region()
383 // which overestimates the region by returning the entire
384 // committed region (this is safe, but inefficient).
385
386 // Returns a subregion of the space containing all the objects in
387 // the space.
388 MemRegion used_region() const {
389 return MemRegion(bottom(),
390 BlockOffsetArrayUseUnallocatedBlock ?
391 unallocated_block() : end());
392 }
393
394 virtual bool is_free_block(const HeapWord* p) const;
395
396 // Resizing support
397 void set_end(HeapWord* value); // override
398
399 // Mutual exclusion support
400 Mutex* freelistLock() const { return &_freelistLock; }
401
402 // Iteration support
403 void oop_iterate(ExtendedOopClosure* cl);
404
405 void object_iterate(ObjectClosure* blk);
406 // Apply the closure to each object in the space whose references
407 // point to objects in the heap. The usage of CompactibleFreeListSpace
408 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
409 // objects in the space with references to objects that are no longer
410 // valid. For example, an object may reference another object
411 // that has already been sweep up (collected). This method uses
412 // obj_is_alive() to determine whether it is safe to iterate of
413 // an object.
414 void safe_object_iterate(ObjectClosure* blk);
415
416 // Iterate over all objects that intersect with mr, calling "cl->do_object"
417 // on each. There is an exception to this: if this closure has already
418 // been invoked on an object, it may skip such objects in some cases. This is
|
379 // sweeping more than is necessary. The allocator and sweeper will
380 // of course need to synchronize on this, since the sweeper will
381 // try to bump down the address and the allocator will try to bump it up.
382 // For now, however, we'll just use the default used_region()
383 // which overestimates the region by returning the entire
384 // committed region (this is safe, but inefficient).
385
386 // Returns a subregion of the space containing all the objects in
387 // the space.
388 MemRegion used_region() const {
389 return MemRegion(bottom(),
390 BlockOffsetArrayUseUnallocatedBlock ?
391 unallocated_block() : end());
392 }
393
394 virtual bool is_free_block(const HeapWord* p) const;
395
396 // Resizing support
397 void set_end(HeapWord* value); // override
398
399 // Never mangle CompactibleFreeListSpace
400 void mangle_unused_area() {}
401 void mangle_unused_area_complete() {}
402
403 // Mutual exclusion support
404 Mutex* freelistLock() const { return &_freelistLock; }
405
406 // Iteration support
407 void oop_iterate(ExtendedOopClosure* cl);
408
409 void object_iterate(ObjectClosure* blk);
410 // Apply the closure to each object in the space whose references
411 // point to objects in the heap. The usage of CompactibleFreeListSpace
412 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
413 // objects in the space with references to objects that are no longer
414 // valid. For example, an object may reference another object
415 // that has already been sweep up (collected). This method uses
416 // obj_is_alive() to determine whether it is safe to iterate of
417 // an object.
418 void safe_object_iterate(ObjectClosure* blk);
419
420 // Iterate over all objects that intersect with mr, calling "cl->do_object"
421 // on each. There is an exception to this: if this closure has already
422 // been invoked on an object, it may skip such objects in some cases. This is
|