1 #ifdef USE_PRAGMA_IDENT_HDR
2 #pragma ident "@(#)compactibleFreeListSpace.hpp 1.91 07/05/05 17:05:45 JVM"
3 #endif
4 /*
5 * Copyright 2001-2006 Sun Microsystems, Inc. All Rights Reserved.
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7 *
8 * This code is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 only, as
10 * published by the Free Software Foundation.
11 *
12 * This code is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 * version 2 for more details (a copy is included in the LICENSE file that
16 * accompanied this code).
17 *
18 * You should have received a copy of the GNU General Public License version
19 * 2 along with this work; if not, write to the Free Software Foundation,
20 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
21 *
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
24 * have any questions.
25 *
404 // by the MemRegion parameter.
405 void reset(MemRegion mr);
406 // Return the total number of words in the indexed free lists.
407 size_t totalSizeInIndexedFreeLists() const;
408
409 public:
410 // Constructor...
411 CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr,
412 bool use_adaptive_freelists,
413 FreeBlockDictionary::DictionaryChoice);
414 // accessors
415 bool bestFitFirst() { return _fitStrategy == FreeBlockBestFitFirst; }
416 FreeBlockDictionary* dictionary() const { return _dictionary; }
417 HeapWord* nearLargestChunk() const { return _nearLargestChunk; }
418 void set_nearLargestChunk(HeapWord* v) { _nearLargestChunk = v; }
419
420 // Return the free chunk at the end of the space. If no such
421 // chunk exists, return NULL.
422 FreeChunk* find_chunk_at_end();
423
424 bool adaptive_freelists() { return _adaptive_freelists; }
425
426 void set_collector(CMSCollector* collector) { _collector = collector; }
427
428 // Support for parallelization of rescan and marking
429 const size_t rescan_task_size() const { return _rescan_task_size; }
430 const size_t marking_task_size() const { return _marking_task_size; }
431 SequentialSubTasksDone* conc_par_seq_tasks() {return &_conc_par_seq_tasks; }
432 void initialize_sequential_subtasks_for_rescan(int n_threads);
433 void initialize_sequential_subtasks_for_marking(int n_threads,
434 HeapWord* low = NULL);
435
436 #if CFLS_LAB_REFILL_STATS
437 void print_par_alloc_stats();
438 #endif
439
440 // Space enquiries
441 size_t used() const;
442 size_t free() const;
443 size_t max_alloc_in_words() const;
444 // XXX: should have a less conservative used_region() than that of
488
489 // Requires that "mr" be entirely within the space.
490 // Apply "cl->do_object" to all objects that intersect with "mr".
491 // If the iteration encounters an unparseable portion of the region,
492 // terminate the iteration and return the address of the start of the
493 // subregion that isn't done. Return of "NULL" indicates that the
494 // interation completed.
495 virtual HeapWord*
496 object_iterate_careful_m(MemRegion mr,
497 ObjectClosureCareful* cl);
498 virtual HeapWord*
499 object_iterate_careful(ObjectClosureCareful* cl);
500
501 // Override: provides a DCTO_CL specific to this kind of space.
502 DirtyCardToOopClosure* new_dcto_cl(OopClosure* cl,
503 CardTableModRefBS::PrecisionStyle precision,
504 HeapWord* boundary);
505
506 void blk_iterate(BlkClosure* cl);
507 void blk_iterate_careful(BlkClosureCareful* cl);
508 HeapWord* block_start(const void* p) const;
509 HeapWord* block_start_careful(const void* p) const;
510 size_t block_size(const HeapWord* p) const;
511 size_t block_size_no_stall(HeapWord* p, const CMSCollector* c) const;
512 bool block_is_obj(const HeapWord* p) const;
513 bool obj_is_alive(const HeapWord* p) const;
514 size_t block_size_nopar(const HeapWord* p) const;
515 bool block_is_obj_nopar(const HeapWord* p) const;
516
517 // iteration support for promotion
518 void save_marks();
519 bool no_allocs_since_save_marks();
520 void object_iterate_since_last_GC(ObjectClosure* cl);
521
522 // iteration support for sweeping
523 void save_sweep_limit() {
524 _sweep_limit = BlockOffsetArrayUseUnallocatedBlock ?
525 unallocated_block() : end();
526 }
527 NOT_PRODUCT(
528 void clear_sweep_limit() { _sweep_limit = NULL; }
529 )
530 HeapWord* sweep_limit() { return _sweep_limit; }
531
532 // Apply "blk->do_oop" to the addresses of all reference fields in objects
533 // promoted into this generation since the most recent save_marks() call.
534 // Fields in objects allocated by applications of the closure
535 // *are* included in the iteration. Thus, when the iteration completes
536 // there should be no further such objects remaining.
537 #define CFLS_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
538 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk);
539 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DECL)
540 #undef CFLS_OOP_SINCE_SAVE_MARKS_DECL
541
542 // Allocation support
543 HeapWord* allocate(size_t size);
544 HeapWord* par_allocate(size_t size);
545
546 oop promote(oop obj, size_t obj_size, oop* ref);
547 void gc_prologue();
548 void gc_epilogue();
549
550 // This call is used by a containing CMS generation / collector
551 // to inform the CFLS space that a sweep has been completed
552 // and that the space can do any related house-keeping functions.
553 void sweep_completed();
554
555 // For an object in this space, the mark-word's two
556 // LSB's having the value [11] indicates that it has been
557 // promoted since the most recent call to save_marks() on
558 // this generation and has not subsequently been iterated
559 // over (using oop_since_save_marks_iterate() above).
560 bool obj_allocated_since_save_marks(const oop obj) const {
561 assert(is_in_reserved(obj), "Wrong space?");
562 return ((PromotedObject*)obj)->hasPromotedMark();
563 }
564
565 // A worst-case estimate of the space required (in HeapWords) to expand the
566 // heap when promoting an obj of size obj_size.
567 size_t expansionSpaceRequired(size_t obj_size) const;
568
569 FreeChunk* allocateScratch(size_t size);
570
571 // returns true if either the small or large linear allocation buffer is empty.
572 bool linearAllocationWouldFail();
573
574 // Adjust the chunk for the minimum size. This version is called in
575 // most cases in CompactibleFreeListSpace methods.
576 inline static size_t adjustObjectSize(size_t size) {
577 return (size_t) align_object_size(MAX2(size, (size_t)MinChunkSize));
578 }
579 // This is a virtual version of adjustObjectSize() that is called
580 // only occasionally when the compaction space changes and the type
581 // of the new compaction space is is only known to be CompactibleSpace.
582 size_t adjust_object_size_v(size_t size) const {
583 return adjustObjectSize(size);
584 }
585 // Minimum size of a free block.
586 virtual size_t minimum_free_block_size() const { return MinChunkSize; }
587 void removeFreeChunkFromFreeLists(FreeChunk* chunk);
588 void addChunkAndRepairOffsetTable(HeapWord* chunk, size_t size,
589 bool coalesced);
590
591 // Support for compaction
592 void prepare_for_compaction(CompactPoint* cp);
593 void adjust_pointers();
594 void compact();
595 // reset the space to reflect the fact that a compaction of the
596 // space has been done.
597 virtual void reset_after_compaction();
598
599 // Debugging support
600 void print() const;
601 void prepare_for_verify();
602 void verify(bool allow_dirty) const;
603 void verifyFreeLists() const PRODUCT_RETURN;
604 void verifyIndexedFreeLists() const;
605 void verifyIndexedFreeList(size_t size) const;
606 // verify that the given chunk is in the free lists.
607 bool verifyChunkInFreeLists(FreeChunk* fc) const;
608 // Do some basic checks on the the free lists.
609 void checkFreeListConsistency() const PRODUCT_RETURN;
610
611 NOT_PRODUCT (
612 void initializeIndexedFreeListArrayReturnedBytes();
613 size_t sumIndexedFreeListArrayReturnedBytes();
614 // Return the total number of chunks in the indexed free lists.
615 size_t totalCountInIndexedFreeLists() const;
616 // Return the total numberof chunks in the space.
617 size_t totalCount();
618 )
619
620 // The census consists of counts of the quantities such as
621 // the current count of the free chunks, number of chunks
622 // created as a result of the split of a larger chunk or
623 // coalescing of smaller chucks, etc. The counts in the
624 // census is used to make decisions on splitting and
625 // coalescing of chunks during the sweep of garbage.
626
627 // Print the statistics for the free lists.
628 void printFLCensus(int sweepCt) const;
629
630 // Statistics functions
631 // Initialize census for lists before the sweep.
632 void beginSweepFLCensus(float sweep_current,
633 float sweep_estimate);
634 // Set the surplus for each of the free lists.
635 void setFLSurplus();
636 // Set the hint for each of the free lists.
637 void setFLHints();
638 // Clear the census for each of the free lists.
639 void clearFLCensus();
640 // Perform functions for the census after the end of the sweep.
641 void endSweepFLCensus(int sweepCt);
642 // Return true if the count of free chunks is greater
643 // than the desired number of free chunks.
644 bool coalOverPopulated(size_t size);
645
646
647 // Record (for each size):
648 //
649 // split-births = #chunks added due to splits in (prev-sweep-end,
650 // this-sweep-start)
651 // split-deaths = #chunks removed for splits in (prev-sweep-end,
652 // this-sweep-start)
653 // num-curr = #chunks at start of this sweep
654 // num-prev = #chunks at end of previous sweep
655 //
656 // The above are quantities that are measured. Now define:
657 //
658 // num-desired := num-prev + split-births - split-deaths - num-curr
659 //
660 // Roughly, num-prev + split-births is the supply,
661 // split-deaths is demand due to other sizes
662 // and num-curr is what we have left.
663 //
664 // Thus, num-desired is roughly speaking the "legitimate demand"
665 // for blocks of this size and what we are striving to reach at the
666 // end of the current sweep.
|
1 #ifdef USE_PRAGMA_IDENT_HDR
2 #pragma ident "@(#)compactibleFreeListSpace.hpp 1.91 07/05/05 17:05:45 JVM"
3 #endif
4 /*
5 * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved.
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7 *
8 * This code is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 only, as
10 * published by the Free Software Foundation.
11 *
12 * This code is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 * version 2 for more details (a copy is included in the LICENSE file that
16 * accompanied this code).
17 *
18 * You should have received a copy of the GNU General Public License version
19 * 2 along with this work; if not, write to the Free Software Foundation,
20 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
21 *
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
24 * have any questions.
25 *
404 // by the MemRegion parameter.
405 void reset(MemRegion mr);
406 // Return the total number of words in the indexed free lists.
407 size_t totalSizeInIndexedFreeLists() const;
408
409 public:
410 // Constructor...
411 CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr,
412 bool use_adaptive_freelists,
413 FreeBlockDictionary::DictionaryChoice);
414 // accessors
415 bool bestFitFirst() { return _fitStrategy == FreeBlockBestFitFirst; }
416 FreeBlockDictionary* dictionary() const { return _dictionary; }
417 HeapWord* nearLargestChunk() const { return _nearLargestChunk; }
418 void set_nearLargestChunk(HeapWord* v) { _nearLargestChunk = v; }
419
420 // Return the free chunk at the end of the space. If no such
421 // chunk exists, return NULL.
422 FreeChunk* find_chunk_at_end();
423
424 bool adaptive_freelists() const { return _adaptive_freelists; }
425
426 void set_collector(CMSCollector* collector) { _collector = collector; }
427
428 // Support for parallelization of rescan and marking
429 const size_t rescan_task_size() const { return _rescan_task_size; }
430 const size_t marking_task_size() const { return _marking_task_size; }
431 SequentialSubTasksDone* conc_par_seq_tasks() {return &_conc_par_seq_tasks; }
432 void initialize_sequential_subtasks_for_rescan(int n_threads);
433 void initialize_sequential_subtasks_for_marking(int n_threads,
434 HeapWord* low = NULL);
435
436 #if CFLS_LAB_REFILL_STATS
437 void print_par_alloc_stats();
438 #endif
439
440 // Space enquiries
441 size_t used() const;
442 size_t free() const;
443 size_t max_alloc_in_words() const;
444 // XXX: should have a less conservative used_region() than that of
488
489 // Requires that "mr" be entirely within the space.
490 // Apply "cl->do_object" to all objects that intersect with "mr".
491 // If the iteration encounters an unparseable portion of the region,
492 // terminate the iteration and return the address of the start of the
493 // subregion that isn't done. Return of "NULL" indicates that the
494 // interation completed.
495 virtual HeapWord*
496 object_iterate_careful_m(MemRegion mr,
497 ObjectClosureCareful* cl);
498 virtual HeapWord*
499 object_iterate_careful(ObjectClosureCareful* cl);
500
501 // Override: provides a DCTO_CL specific to this kind of space.
502 DirtyCardToOopClosure* new_dcto_cl(OopClosure* cl,
503 CardTableModRefBS::PrecisionStyle precision,
504 HeapWord* boundary);
505
506 void blk_iterate(BlkClosure* cl);
507 void blk_iterate_careful(BlkClosureCareful* cl);
508 HeapWord* block_start_const(const void* p) const;
509 HeapWord* block_start_careful(const void* p) const;
510 size_t block_size(const HeapWord* p) const;
511 size_t block_size_no_stall(HeapWord* p, const CMSCollector* c) const;
512 bool block_is_obj(const HeapWord* p) const;
513 bool obj_is_alive(const HeapWord* p) const;
514 size_t block_size_nopar(const HeapWord* p) const;
515 bool block_is_obj_nopar(const HeapWord* p) const;
516
517 // iteration support for promotion
518 void save_marks();
519 bool no_allocs_since_save_marks();
520 void object_iterate_since_last_GC(ObjectClosure* cl);
521
522 // iteration support for sweeping
523 void save_sweep_limit() {
524 _sweep_limit = BlockOffsetArrayUseUnallocatedBlock ?
525 unallocated_block() : end();
526 }
527 NOT_PRODUCT(
528 void clear_sweep_limit() { _sweep_limit = NULL; }
529 )
530 HeapWord* sweep_limit() { return _sweep_limit; }
531
532 // Apply "blk->do_oop" to the addresses of all reference fields in objects
533 // promoted into this generation since the most recent save_marks() call.
534 // Fields in objects allocated by applications of the closure
535 // *are* included in the iteration. Thus, when the iteration completes
536 // there should be no further such objects remaining.
537 #define CFLS_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
538 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk);
539 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DECL)
540 #undef CFLS_OOP_SINCE_SAVE_MARKS_DECL
541
542 // Allocation support
543 HeapWord* allocate(size_t size);
544 HeapWord* par_allocate(size_t size);
545
546 oop promote(oop obj, size_t obj_size);
547 void gc_prologue();
548 void gc_epilogue();
549
550 // This call is used by a containing CMS generation / collector
551 // to inform the CFLS space that a sweep has been completed
552 // and that the space can do any related house-keeping functions.
553 void sweep_completed();
554
555 // For an object in this space, the mark-word's two
556 // LSB's having the value [11] indicates that it has been
557 // promoted since the most recent call to save_marks() on
558 // this generation and has not subsequently been iterated
559 // over (using oop_since_save_marks_iterate() above).
560 bool obj_allocated_since_save_marks(const oop obj) const {
561 assert(is_in_reserved(obj), "Wrong space?");
562 return ((PromotedObject*)obj)->hasPromotedMark();
563 }
564
565 // A worst-case estimate of the space required (in HeapWords) to expand the
566 // heap when promoting an obj of size obj_size.
567 size_t expansionSpaceRequired(size_t obj_size) const;
568
569 FreeChunk* allocateScratch(size_t size);
570
571 // returns true if either the small or large linear allocation buffer is empty.
572 bool linearAllocationWouldFail() const;
573
574 // Adjust the chunk for the minimum size. This version is called in
575 // most cases in CompactibleFreeListSpace methods.
576 inline static size_t adjustObjectSize(size_t size) {
577 return (size_t) align_object_size(MAX2(size, (size_t)MinChunkSize));
578 }
579 // This is a virtual version of adjustObjectSize() that is called
580 // only occasionally when the compaction space changes and the type
581 // of the new compaction space is is only known to be CompactibleSpace.
582 size_t adjust_object_size_v(size_t size) const {
583 return adjustObjectSize(size);
584 }
585 // Minimum size of a free block.
586 virtual size_t minimum_free_block_size() const { return MinChunkSize; }
587 void removeFreeChunkFromFreeLists(FreeChunk* chunk);
588 void addChunkAndRepairOffsetTable(HeapWord* chunk, size_t size,
589 bool coalesced);
590
591 // Support for decisions regarding concurrent collection policy
592 bool should_concurrent_collect() const;
593
594 // Support for compaction
595 void prepare_for_compaction(CompactPoint* cp);
596 void adjust_pointers();
597 void compact();
598 // reset the space to reflect the fact that a compaction of the
599 // space has been done.
600 virtual void reset_after_compaction();
601
602 // Debugging support
603 void print() const;
604 void prepare_for_verify();
605 void verify(bool allow_dirty) const;
606 void verifyFreeLists() const PRODUCT_RETURN;
607 void verifyIndexedFreeLists() const;
608 void verifyIndexedFreeList(size_t size) const;
609 // verify that the given chunk is in the free lists.
610 bool verifyChunkInFreeLists(FreeChunk* fc) const;
611 // Do some basic checks on the the free lists.
612 void checkFreeListConsistency() const PRODUCT_RETURN;
613
614 NOT_PRODUCT (
615 void initializeIndexedFreeListArrayReturnedBytes();
616 size_t sumIndexedFreeListArrayReturnedBytes();
617 // Return the total number of chunks in the indexed free lists.
618 size_t totalCountInIndexedFreeLists() const;
619 // Return the total numberof chunks in the space.
620 size_t totalCount();
621 )
622
623 // The census consists of counts of the quantities such as
624 // the current count of the free chunks, number of chunks
625 // created as a result of the split of a larger chunk or
626 // coalescing of smaller chucks, etc. The counts in the
627 // census is used to make decisions on splitting and
628 // coalescing of chunks during the sweep of garbage.
629
630 // Print the statistics for the free lists.
631 void printFLCensus(size_t sweep_count) const;
632
633 // Statistics functions
634 // Initialize census for lists before the sweep.
635 void beginSweepFLCensus(float sweep_current,
636 float sweep_estimate);
637 // Set the surplus for each of the free lists.
638 void setFLSurplus();
639 // Set the hint for each of the free lists.
640 void setFLHints();
641 // Clear the census for each of the free lists.
642 void clearFLCensus();
643 // Perform functions for the census after the end of the sweep.
644 void endSweepFLCensus(size_t sweep_count);
645 // Return true if the count of free chunks is greater
646 // than the desired number of free chunks.
647 bool coalOverPopulated(size_t size);
648
649 // Record (for each size):
650 //
651 // split-births = #chunks added due to splits in (prev-sweep-end,
652 // this-sweep-start)
653 // split-deaths = #chunks removed for splits in (prev-sweep-end,
654 // this-sweep-start)
655 // num-curr = #chunks at start of this sweep
656 // num-prev = #chunks at end of previous sweep
657 //
658 // The above are quantities that are measured. Now define:
659 //
660 // num-desired := num-prev + split-births - split-deaths - num-curr
661 //
662 // Roughly, num-prev + split-births is the supply,
663 // split-deaths is demand due to other sizes
664 // and num-curr is what we have left.
665 //
666 // Thus, num-desired is roughly speaking the "legitimate demand"
667 // for blocks of this size and what we are striving to reach at the
668 // end of the current sweep.
|