1 #ifdef USE_PRAGMA_IDENT_SRC
2 #pragma ident "@(#)compactibleFreeListSpace.cpp 1.144 08/09/06 09:20:55 JVM"
3 #endif
4 /*
5 * Copyright 2001-2006 Sun Microsystems, Inc. All Rights Reserved.
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7 *
8 * This code is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 only, as
10 * published by the Free Software Foundation.
11 *
12 * This code is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 * version 2 for more details (a copy is included in the LICENSE file that
16 * accompanied this code).
17 *
18 * You should have received a copy of the GNU General Public License version
19 * 2 along with this work; if not, write to the Free Software Foundation,
20 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
21 *
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
24 * have any questions.
25 *
40 MemRegion mr, bool use_adaptive_freelists,
41 FreeBlockDictionary::DictionaryChoice dictionaryChoice) :
42 _dictionaryChoice(dictionaryChoice),
43 _adaptive_freelists(use_adaptive_freelists),
44 _bt(bs, mr),
45 // free list locks are in the range of values taken by _lockRank
46 // This range currently is [_leaf+2, _leaf+3]
47 // Note: this requires that CFLspace c'tors
48 // are called serially in the order in which the locks are
49 // are acquired in the program text. This is true today.
50 _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
51 _parDictionaryAllocLock(Mutex::leaf - 1, // == rank(ExpandHeap_lock) - 1
52 "CompactibleFreeListSpace._dict_par_lock", true),
53 _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
54 CMSRescanMultiple),
55 _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
56 CMSConcMarkMultiple),
57 _collector(NULL)
58 {
59 _bt.set_space(this);
60 initialize(mr, true);
61 // We have all of "mr", all of which we place in the dictionary
62 // as one big chunk. We'll need to decide here which of several
63 // possible alternative dictionary implementations to use. For
64 // now the choice is easy, since we have only one working
65 // implementation, namely, the simple binary tree (splaying
66 // temporarily disabled).
67 switch (dictionaryChoice) {
68 case FreeBlockDictionary::dictionaryBinaryTree:
69 _dictionary = new BinaryTreeDictionary(mr);
70 break;
71 case FreeBlockDictionary::dictionarySplayTree:
72 case FreeBlockDictionary::dictionarySkipList:
73 default:
74 warning("dictionaryChoice: selected option not understood; using"
75 " default BinaryTreeDictionary implementation instead.");
76 _dictionary = new BinaryTreeDictionary(mr);
77 break;
78 }
79 splitBirth(mr.word_size());
80 assert(_dictionary != NULL, "CMS dictionary initialization");
163 // (i.e., cp->space may no longer be "this" so adjust the size again.
164 // Use the virtual method which is not used above to save the virtual
165 // dispatch.
166 adjusted_size = cp->space->adjust_object_size_v(size);
167 compaction_max_size = pointer_delta(cp->space->end(), compact_top);
168 assert(cp->space->minimum_free_block_size() == 0, "just checking");
169 } while (adjusted_size > compaction_max_size);
170 }
171
172 // store the forwarding pointer into the mark word
173 if ((HeapWord*)q != compact_top) {
174 q->forward_to(oop(compact_top));
175 assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
176 } else {
177 // if the object isn't moving we can just set the mark to the default
178 // mark and handle it specially later on.
179 q->init_mark();
180 assert(q->forwardee() == NULL, "should be forwarded to NULL");
181 }
182
183 debug_only(MarkSweep::register_live_oop(q, adjusted_size));
184 compact_top += adjusted_size;
185
186 // we need to update the offset table so that the beginnings of objects can be
187 // found during scavenge. Note that we are updating the offset table based on
188 // where the object will be once the compaction phase finishes.
189
190 // Always call cross_threshold(). A contiguous space can only call it when
191 // the compaction_top exceeds the current threshold but not for an
192 // non-contiguous space.
193 cp->threshold =
194 cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
195 return compact_top;
196 }
197
198 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
199 // and use of single_block instead of alloc_block. The name here is not really
200 // appropriate - maybe a more general name could be invented for both the
201 // contiguous and noncontiguous spaces.
202
203 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
776 // Since we hold the free list lock, which protects direct
777 // allocation in this generation by mutators, a free object
778 // will remain free throughout this iteration code.
779 size = fc->size();
780 } else {
781 // Note that the object need not necessarily be initialized,
782 // because (for instance) the free list lock does NOT protect
783 // object initialization. The closure application below must
784 // therefore be correct in the face of uninitialized objects.
785 size = cl->do_object_careful_m(oop(addr), mr);
786 if (size == 0) {
787 // An unparsable object found. Signal early termination.
788 return addr;
789 }
790 }
791 }
792 return NULL;
793 }
794
795
796 HeapWord* CompactibleFreeListSpace::block_start(const void* p) const {
797 NOT_PRODUCT(verify_objects_initialized());
798 return _bt.block_start(p);
799 }
800
801 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
802 return _bt.block_start_careful(p);
803 }
804
805 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
806 NOT_PRODUCT(verify_objects_initialized());
807 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
808 // This must be volatile, or else there is a danger that the compiler
809 // will compile the code below into a sometimes-infinite loop, by keeping
810 // the value read the first time in a register.
811 oop o = (oop)p;
812 volatile oop* second_word_addr = o->klass_addr();
813 while (true) {
814 klassOop k = (klassOop)(*second_word_addr);
815 // We must do this until we get a consistent view of the object.
816 if (FreeChunk::secondWordIndicatesFreeChunk((intptr_t)k)) {
817 FreeChunk* fc = (FreeChunk*)p;
818 volatile size_t* sz_addr = (volatile size_t*)(fc->size_addr());
819 size_t res = (*sz_addr);
820 klassOop k2 = (klassOop)(*second_word_addr); // Read to confirm.
821 if (k == k2) {
822 assert(res != 0, "Block size should not be 0");
823 return res;
824 }
825 } else if (k != NULL) {
826 assert(k->is_oop(true /* ignore mark word */), "Should really be klass oop.");
827 assert(o->is_parsable(), "Should be parsable");
828 assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
829 size_t res = o->size_given_klass(k->klass_part());
830 res = adjustObjectSize(res);
831 assert(res != 0, "Block size should not be 0");
832 return res;
833 }
834 }
835 }
836
837 // A variant of the above that uses the Printezis bits for
838 // unparsable but allocated objects. This avoids any possible
839 // stalls waiting for mutators to initialize objects, and is
840 // thus potentially faster than the variant above. However,
841 // this variant may return a zero size for a block that is
842 // under mutation and for which a consistent size cannot be
843 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
844 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
845 const CMSCollector* c)
846 const {
847 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
848 // This must be volatile, or else there is a danger that the compiler
849 // will compile the code below into a sometimes-infinite loop, by keeping
850 // the value read the first time in a register.
851 oop o = (oop)p;
852 volatile oop* second_word_addr = o->klass_addr();
853 DEBUG_ONLY(uint loops = 0;)
854 while (true) {
855 klassOop k = (klassOop)(*second_word_addr);
856 // We must do this until we get a consistent view of the object.
857 if (FreeChunk::secondWordIndicatesFreeChunk((intptr_t)k)) {
858 FreeChunk* fc = (FreeChunk*)p;
859 volatile size_t* sz_addr = (volatile size_t*)(fc->size_addr());
860 size_t res = (*sz_addr);
861 klassOop k2 = (klassOop)(*second_word_addr); // Read to confirm.
862 if (k == k2) {
863 assert(res != 0, "Block size should not be 0");
864 assert(loops == 0, "Should be 0");
865 return res;
866 }
867 } else if (k != NULL && o->is_parsable()) {
868 assert(k->is_oop(), "Should really be klass oop.");
869 assert(o->is_oop(), "Should be an oop");
870 size_t res = o->size_given_klass(k->klass_part());
871 res = adjustObjectSize(res);
872 assert(res != 0, "Block size should not be 0");
873 return res;
874 } else {
875 return c->block_size_if_printezis_bits(p);
876 }
877 assert(loops == 0, "Can loop at most once");
878 DEBUG_ONLY(loops++;)
879 }
880 }
881
882 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
883 NOT_PRODUCT(verify_objects_initialized());
884 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
885 FreeChunk* fc = (FreeChunk*)p;
886 if (fc->isFree()) {
887 return fc->size();
888 } else {
889 // Ignore mark word because this may be a recently promoted
890 // object whose mark word is used to chain together grey
891 // objects (the last one would have a null value).
892 assert(oop(p)->is_oop(true), "Should be an oop");
893 return adjustObjectSize(oop(p)->size());
894 }
895 }
896
897 // This implementation assumes that the property of "being an object" is
898 // stable. But being a free chunk may not be (because of parallel
899 // promotion.)
900 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
901 FreeChunk* fc = (FreeChunk*)p;
902 assert(is_in_reserved(p), "Should be in space");
903 // When doing a mark-sweep-compact of the CMS generation, this
904 // assertion may fail because prepare_for_compaction() uses
905 // space that is garbage to maintain information on ranges of
906 // live objects so that these live ranges can be moved as a whole.
907 // Comment out this assertion until that problem can be solved
908 // (i.e., that the block start calculation may look at objects
909 // at address below "p" in finding the object that contains "p"
910 // and those objects (if garbage) may have been modified to hold
911 // live range information.
912 // assert(ParallelGCThreads > 0 || _bt.block_start(p) == p, "Should be a block boundary");
913 klassOop k = oop(p)->klass();
914 intptr_t ki = (intptr_t)k;
915 if (FreeChunk::secondWordIndicatesFreeChunk(ki)) return false;
916 if (k != NULL) {
917 // Ignore mark word because it may have been used to
918 // chain together promoted objects (the last one
919 // would have a null value).
920 assert(oop(p)->is_oop(true), "Should be an oop");
921 return true;
922 } else {
923 return false; // Was not an object at the start of collection.
924 }
925 }
926
927 // Check if the object is alive. This fact is checked either by consulting
928 // the main marking bitmap in the sweeping phase or, if it's a permanent
929 // generation and we're not in the sweeping phase, by checking the
930 // perm_gen_verify_bit_map where we store the "deadness" information if
931 // we did not sweep the perm gen in the most recent previous GC cycle.
932 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
933 assert (block_is_obj(p), "The address should point to an object");
934
935 // If we're sweeping, we use object liveness information from the main bit map
1013 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
1014 assert_lock_strong(freelistLock());
1015 HeapWord* res = NULL;
1016 assert(size == adjustObjectSize(size),
1017 "use adjustObjectSize() before calling into allocate()");
1018
1019 if (_adaptive_freelists) {
1020 res = allocate_adaptive_freelists(size);
1021 } else { // non-adaptive free lists
1022 res = allocate_non_adaptive_freelists(size);
1023 }
1024
1025 if (res != NULL) {
1026 // check that res does lie in this space!
1027 assert(is_in_reserved(res), "Not in this space!");
1028 assert(is_aligned((void*)res), "alignment check");
1029
1030 FreeChunk* fc = (FreeChunk*)res;
1031 fc->markNotFree();
1032 assert(!fc->isFree(), "shouldn't be marked free");
1033 assert(oop(fc)->klass() == NULL, "should look uninitialized");
1034 // Verify that the block offset table shows this to
1035 // be a single block, but not one which is unallocated.
1036 _bt.verify_single_block(res, size);
1037 _bt.verify_not_unallocated(res, size);
1038 // mangle a just allocated object with a distinct pattern.
1039 debug_only(fc->mangleAllocated(size));
1040 }
1041
1042 return res;
1043 }
1044
1045 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
1046 HeapWord* res = NULL;
1047 // try and use linear allocation for smaller blocks
1048 if (size < _smallLinearAllocBlock._allocation_size_limit) {
1049 // if successful, the following also adjusts block offset table
1050 res = getChunkFromSmallLinearAllocBlock(size);
1051 }
1052 // Else triage to indexed lists for smaller sizes
1053 if (res == NULL) {
1197 FreeChunk* fc;
1198 {
1199 // If GC is parallel, this might be called by several threads.
1200 // This should be rare enough that the locking overhead won't affect
1201 // the sequential code.
1202 MutexLockerEx x(parDictionaryAllocLock(),
1203 Mutex::_no_safepoint_check_flag);
1204 fc = getChunkFromDictionary(size);
1205 }
1206 if (fc != NULL) {
1207 fc->dontCoalesce();
1208 assert(fc->isFree(), "Should be free, but not coalescable");
1209 // Verify that the block offset table shows this to
1210 // be a single block, but not one which is unallocated.
1211 _bt.verify_single_block((HeapWord*)fc, fc->size());
1212 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
1213 }
1214 return fc;
1215 }
1216
1217 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size, oop* ref) {
1218 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1219 assert_locked();
1220
1221 // if we are tracking promotions, then first ensure space for
1222 // promotion (including spooling space for saving header if necessary).
1223 // then allocate and copy, then track promoted info if needed.
1224 // When tracking (see PromotionInfo::track()), the mark word may
1225 // be displaced and in this case restoration of the mark word
1226 // occurs in the (oop_since_save_marks_)iterate phase.
1227 if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
1228 return NULL;
1229 }
1230 // Call the allocate(size_t, bool) form directly to avoid the
1231 // additional call through the allocate(size_t) form. Having
1232 // the compile inline the call is problematic because allocate(size_t)
1233 // is a virtual method.
1234 HeapWord* res = allocate(adjustObjectSize(obj_size));
1235 if (res != NULL) {
1236 Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
1237 // if we should be tracking promotions, do so.
1821 assert(noPromotions(), "post-condition violation"); \
1822 assert(_promoHead == NULL && _promoTail == NULL, "emptied promoted list");\
1823 assert(_spoolHead == _spoolTail, "emptied spooling buffers"); \
1824 assert(_firstIndex == _nextIndex, "empty buffer"); \
1825 }
1826
1827 // This should have been ALL_SINCE_...() just like the others,
1828 // but, because the body of the method above is somehwat longer,
1829 // the MSVC compiler cannot cope; as a workaround, we split the
1830 // macro into its 3 constituent parts below (see original macro
1831 // definition in specializedOopClosures.hpp).
1832 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG(PROMOTED_OOPS_ITERATE_DEFN)
1833 PROMOTED_OOPS_ITERATE_DEFN(OopsInGenClosure,_v)
1834
1835
1836 void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) {
1837 // ugghh... how would one do this efficiently for a non-contiguous space?
1838 guarantee(false, "NYI");
1839 }
1840
1841 bool CompactibleFreeListSpace::linearAllocationWouldFail() {
1842 return _smallLinearAllocBlock._word_size == 0;
1843 }
1844
1845 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
1846 // Fix up linear allocation blocks to look like free blocks
1847 repairLinearAllocBlock(&_smallLinearAllocBlock);
1848 }
1849
1850 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
1851 assert_locked();
1852 if (blk->_ptr != NULL) {
1853 assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
1854 "Minimum block size requirement");
1855 FreeChunk* fc = (FreeChunk*)(blk->_ptr);
1856 fc->setSize(blk->_word_size);
1857 fc->linkPrev(NULL); // mark as free
1858 fc->dontCoalesce();
1859 assert(fc->isFree(), "just marked it free");
1860 assert(fc->cantCoalesce(), "just marked it uncoalescable");
1861 }
1892 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
1893 assert_locked();
1894 assert(blk->_word_size == 0 && blk->_ptr == NULL,
1895 "linear allocation block should be empty");
1896 FreeChunk* fc;
1897 if (blk->_refillSize < SmallForDictionary &&
1898 (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
1899 // A linAB's strategy might be to use small sizes to reduce
1900 // fragmentation but still get the benefits of allocation from a
1901 // linAB.
1902 } else {
1903 fc = getChunkFromDictionary(blk->_refillSize);
1904 }
1905 if (fc != NULL) {
1906 blk->_ptr = (HeapWord*)fc;
1907 blk->_word_size = fc->size();
1908 fc->dontCoalesce(); // to prevent sweeper from sweeping us up
1909 }
1910 }
1911
1912 // Support for compaction
1913
1914 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
1915 SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
1916 // prepare_for_compaction() uses the space between live objects
1917 // so that later phase can skip dead space quickly. So verification
1918 // of the free lists doesn't work after.
1919 }
1920
1921 #define obj_size(q) adjustObjectSize(oop(q)->size())
1922 #define adjust_obj_size(s) adjustObjectSize(s)
1923
1924 void CompactibleFreeListSpace::adjust_pointers() {
1925 // In other versions of adjust_pointers(), a bail out
1926 // based on the amount of live data in the generation
1927 // (i.e., if 0, bail out) may be used.
1928 // Cannot test used() == 0 here because the free lists have already
1929 // been mangled by the compaction.
1930
1931 SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
1999 fl->set_hint(h);
2000 if (fl->surplus() > 0) {
2001 h = i;
2002 }
2003 }
2004 }
2005
2006 void CompactibleFreeListSpace::clearFLCensus() {
2007 assert_locked();
2008 int i;
2009 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2010 FreeList *fl = &_indexedFreeList[i];
2011 fl->set_prevSweep(fl->count());
2012 fl->set_coalBirths(0);
2013 fl->set_coalDeaths(0);
2014 fl->set_splitBirths(0);
2015 fl->set_splitDeaths(0);
2016 }
2017 }
2018
2019 void CompactibleFreeListSpace::endSweepFLCensus(int sweepCt) {
2020 setFLSurplus();
2021 setFLHints();
2022 if (PrintGC && PrintFLSCensus > 0) {
2023 printFLCensus(sweepCt);
2024 }
2025 clearFLCensus();
2026 assert_locked();
2027 _dictionary->endSweepDictCensus(SplitSurplusPercent);
2028 }
2029
2030 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
2031 if (size < SmallForDictionary) {
2032 FreeList *fl = &_indexedFreeList[size];
2033 return (fl->coalDesired() < 0) ||
2034 ((int)fl->count() > fl->coalDesired());
2035 } else {
2036 return dictionary()->coalDictOverPopulated(size);
2037 }
2038 }
2039
2040 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
2041 assert(size < SmallForDictionary, "Size too large for indexed list");
2042 FreeList *fl = &_indexedFreeList[size];
2043 fl->increment_coalBirths();
2095 }
2096 }
2097
2098 void CompactibleFreeListSpace::splitDeath(size_t size) {
2099 if (size < SmallForDictionary) {
2100 smallSplitDeath(size);
2101 } else {
2102 dictionary()->dictCensusUpdate(size,
2103 true /* split */,
2104 false /* birth */);
2105 }
2106 }
2107
2108 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
2109 size_t to2 = from - to1;
2110 splitDeath(from);
2111 splitBirth(to1);
2112 splitBirth(to2);
2113 }
2114
2115
2116 void CompactibleFreeListSpace::print() const {
2117 tty->print(" CompactibleFreeListSpace");
2118 Space::print();
2119 }
2120
2121 void CompactibleFreeListSpace::prepare_for_verify() {
2122 assert_locked();
2123 repairLinearAllocationBlocks();
2124 // Verify that the SpoolBlocks look like free blocks of
2125 // appropriate sizes... To be done ...
2126 }
2127
2128 class VerifyAllBlksClosure: public BlkClosure {
2129 const CompactibleFreeListSpace* _sp;
2130 const MemRegion _span;
2131
2132 public:
2133 VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
2134 MemRegion span) : _sp(sp), _span(span) { }
2135
2136 size_t do_blk(HeapWord* addr) {
2137 size_t res;
2138 if (_sp->block_is_obj(addr)) {
2139 oop p = oop(addr);
2140 guarantee(p->is_oop(), "Should be an oop");
2141 res = _sp->adjustObjectSize(p->size());
2142 if (_sp->obj_is_alive(addr)) {
2143 p->verify();
2144 }
2145 } else {
2146 FreeChunk* fc = (FreeChunk*)addr;
2147 res = fc->size();
2148 if (FLSVerifyLists && !fc->cantCoalesce()) {
2149 guarantee(_sp->verifyChunkInFreeLists(fc),
2150 "Chunk should be on a free list");
2151 }
2152 }
2153 guarantee(res != 0, "Livelock: no rank reduction!");
2154 return res;
2155 }
2156 };
2157
2158 class VerifyAllOopsClosure: public OopClosure {
2159 const CMSCollector* _collector;
2160 const CompactibleFreeListSpace* _sp;
2161 const MemRegion _span;
2162 const bool _past_remark;
2163 const CMSBitMap* _bit_map;
2164
2165 public:
2166 VerifyAllOopsClosure(const CMSCollector* collector,
2167 const CompactibleFreeListSpace* sp, MemRegion span,
2168 bool past_remark, CMSBitMap* bit_map) :
2169 OopClosure(), _collector(collector), _sp(sp), _span(span),
2170 _past_remark(past_remark), _bit_map(bit_map) { }
2171
2172 void do_oop(oop* ptr) {
2173 oop p = *ptr;
2174 if (p != NULL) {
2175 if (_span.contains(p)) { // the interior oop points into CMS heap
2176 if (!_span.contains(ptr)) { // reference from outside CMS heap
2177 // Should be a valid object; the first disjunct below allows
2178 // us to sidestep an assertion in block_is_obj() that insists
2179 // that p be in _sp. Note that several generations (and spaces)
2180 // are spanned by _span (CMS heap) above.
2181 guarantee(!_sp->is_in_reserved(p) || _sp->block_is_obj((HeapWord*)p),
2182 "Should be an object");
2183 guarantee(p->is_oop(), "Should be an oop");
2184 p->verify();
2185 if (_past_remark) {
2186 // Remark has been completed, the object should be marked
2187 _bit_map->isMarked((HeapWord*)p);
2188 }
2189 }
2190 else { // reference within CMS heap
2191 if (_past_remark) {
2192 // Remark has been completed -- so the referent should have
2193 // been marked, if referring object is.
2194 if (_bit_map->isMarked(_collector->block_start(ptr))) {
2195 guarantee(_bit_map->isMarked((HeapWord*)p), "Marking error?");
2196 }
2197 }
2198 }
2199 } else if (_sp->is_in_reserved(ptr)) {
2200 // the reference is from FLS, and points out of FLS
2201 guarantee(p->is_oop(), "Should be an oop");
2202 p->verify();
2203 }
2204 }
2205 }
2206 };
2207
2208 void CompactibleFreeListSpace::verify(bool ignored) const {
2209 assert_lock_strong(&_freelistLock);
2210 verify_objects_initialized();
2211 MemRegion span = _collector->_span;
2212 bool past_remark = (_collector->abstract_state() ==
2213 CMSCollector::Sweeping);
2214
2215 ResourceMark rm;
2216 HandleMark hm;
2217
2218 // Check integrity of CFL data structures
2219 _promoInfo.verify();
2220 _dictionary->verify();
2221 if (FLSVerifyIndexTable) {
2222 verifyIndexedFreeLists();
2223 }
2224 // Check integrity of all objects and free blocks in space
2225 {
2254 _dictionary->verify();
2255 }
2256 if (FLSVerifyIndexTable) {
2257 verifyIndexedFreeLists();
2258 }
2259 }
2260 }
2261 #endif
2262
2263 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
2264 size_t i = 0;
2265 for (; i < MinChunkSize; i++) {
2266 guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
2267 }
2268 for (; i < IndexSetSize; i++) {
2269 verifyIndexedFreeList(i);
2270 }
2271 }
2272
2273 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
2274 guarantee(size % 2 == 0, "Odd slots should be empty");
2275 for (FreeChunk* fc = _indexedFreeList[size].head(); fc != NULL;
2276 fc = fc->next()) {
2277 guarantee(fc->size() == size, "Size inconsistency");
2278 guarantee(fc->isFree(), "!free?");
2279 guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
2280 }
2281 }
2282
2283 #ifndef PRODUCT
2284 void CompactibleFreeListSpace::checkFreeListConsistency() const {
2285 assert(_dictionary->minSize() <= IndexSetSize,
2286 "Some sizes can't be allocated without recourse to"
2287 " linear allocation buffers");
2288 assert(MIN_TREE_CHUNK_SIZE*HeapWordSize == sizeof(TreeChunk),
2289 "else MIN_TREE_CHUNK_SIZE is wrong");
2290 assert((IndexSetStride == 2 && IndexSetStart == 2) ||
2291 (IndexSetStride == 1 && IndexSetStart == 1), "just checking");
2292 assert((IndexSetStride != 2) || (MinChunkSize % 2 == 0),
2293 "Some for-loops may be incorrectly initialized");
2294 assert((IndexSetStride != 2) || (IndexSetSize % 2 == 1),
2295 "For-loops that iterate over IndexSet with stride 2 may be wrong");
2296 }
2297 #endif
2298
2299 void CompactibleFreeListSpace::printFLCensus(int sweepCt) const {
2300 assert_lock_strong(&_freelistLock);
2301 ssize_t bfrSurp = 0;
2302 ssize_t surplus = 0;
2303 ssize_t desired = 0;
2304 ssize_t prevSweep = 0;
2305 ssize_t beforeSweep = 0;
2306 ssize_t count = 0;
2307 ssize_t coalBirths = 0;
2308 ssize_t coalDeaths = 0;
2309 ssize_t splitBirths = 0;
2310 ssize_t splitDeaths = 0;
2311 gclog_or_tty->print("end sweep# %d\n", sweepCt);
2312 gclog_or_tty->print("%4s\t" "%7s\t" "%7s\t" "%7s\t" "%7s\t"
2313 "%7s\t" "%7s\t" "%7s\t" "%7s\t" "%7s\t"
2314 "%7s\t" "\n",
2315 "size", "bfrsurp", "surplus", "desired", "prvSwep",
2316 "bfrSwep", "count", "cBirths", "cDeaths", "sBirths",
2317 "sDeaths");
2318
2319 size_t totalFree = 0;
2320 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2321 const FreeList *fl = &_indexedFreeList[i];
2322 totalFree += fl->count() * fl->size();
2323
2324 gclog_or_tty->print("%4d\t" "%7d\t" "%7d\t" "%7d\t"
2325 "%7d\t" "%7d\t" "%7d\t" "%7d\t"
2326 "%7d\t" "%7d\t" "%7d\t" "\n",
2327 fl->size(), fl->bfrSurp(), fl->surplus(), fl->desired(),
2328 fl->prevSweep(), fl->beforeSweep(), fl->count(), fl->coalBirths(),
2329 fl->coalDeaths(), fl->splitBirths(), fl->splitDeaths());
2330 bfrSurp += fl->bfrSurp();
2331 surplus += fl->surplus();
2332 desired += fl->desired();
2333 prevSweep += fl->prevSweep();
2334 beforeSweep += fl->beforeSweep();
2335 count += fl->count();
2336 coalBirths += fl->coalBirths();
2337 coalDeaths += fl->coalDeaths();
2338 splitBirths += fl->splitBirths();
2339 splitDeaths += fl->splitDeaths();
2340 }
2341 gclog_or_tty->print("%4s\t"
2342 "%7d\t" "%7d\t" "%7d\t" "%7d\t" "%7d\t"
2343 "%7d\t" "%7d\t" "%7d\t" "%7d\t" "%7d\t" "\n",
2344 "totl",
2345 bfrSurp, surplus, desired, prevSweep, beforeSweep,
2346 count, coalBirths, coalDeaths, splitBirths, splitDeaths);
2347 gclog_or_tty->print_cr("Total free in indexed lists %d words", totalFree);
2348 gclog_or_tty->print("growth: %8.5f deficit: %8.5f\n",
2349 (double)(splitBirths+coalBirths-splitDeaths-coalDeaths)/
2350 (prevSweep != 0 ? (double)prevSweep : 1.0),
2351 (double)(desired - count)/(desired != 0 ? (double)desired : 1.0));
2352 _dictionary->printDictCensus();
2353 }
2354
2355 // Return the next displaced header, incrementing the pointer and
2356 // recycling spool area as necessary.
2357 markOop PromotionInfo::nextDisplacedHeader() {
2358 assert(_spoolHead != NULL, "promotionInfo inconsistency");
2359 assert(_spoolHead != _spoolTail || _firstIndex < _nextIndex,
2360 "Empty spool space: no displaced header can be fetched");
2361 assert(_spoolHead->bufferSize > _firstIndex, "Off by one error at head?");
2362 markOop hdr = _spoolHead->displacedHdr[_firstIndex];
2363 // Spool forward
2364 if (++_firstIndex == _spoolHead->bufferSize) { // last location in this block
2365 // forward to next block, recycling this block into spare spool buffer
2366 SpoolBlock* tmp = _spoolHead->nextSpoolBlock;
2367 assert(_spoolHead != _spoolTail, "Spooling storage mix-up");
2368 _spoolHead->nextSpoolBlock = _spareSpool;
2369 _spareSpool = _spoolHead;
2370 _spoolHead = tmp;
2371 _firstIndex = 1;
2584 // This locking manages sync with other large object allocations.
2585 MutexLockerEx x(_cfls->parDictionaryAllocLock(),
2586 Mutex::_no_safepoint_check_flag);
2587 res = _cfls->getChunkFromDictionaryExact(word_sz);
2588 if (res == NULL) return NULL;
2589 } else {
2590 FreeList* fl = &_indexedFreeList[word_sz];
2591 bool filled = false; //TRAP
2592 if (fl->count() == 0) {
2593 bool filled = true; //TRAP
2594 // Attempt to refill this local free list.
2595 _cfls->par_get_chunk_of_blocks(word_sz, _blocks_to_claim, fl);
2596 // If it didn't work, give up.
2597 if (fl->count() == 0) return NULL;
2598 }
2599 res = fl->getChunkAtHead();
2600 assert(res != NULL, "Why was count non-zero?");
2601 }
2602 res->markNotFree();
2603 assert(!res->isFree(), "shouldn't be marked free");
2604 assert(oop(res)->klass() == NULL, "should look uninitialized");
2605 // mangle a just allocated object with a distinct pattern.
2606 debug_only(res->mangleAllocated(word_sz));
2607 return (HeapWord*)res;
2608 }
2609
2610 void CFLS_LAB::retire() {
2611 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2612 i < CompactibleFreeListSpace::IndexSetSize;
2613 i += CompactibleFreeListSpace::IndexSetStride) {
2614 if (_indexedFreeList[i].count() > 0) {
2615 MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
2616 Mutex::_no_safepoint_check_flag);
2617 _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
2618 // Reset this list.
2619 _indexedFreeList[i] = FreeList();
2620 _indexedFreeList[i].set_size(i);
2621 }
2622 }
2623 }
2624
2785 // TRAP
2786 assert(fl->tail()->next() == NULL, "List invariant.");
2787 }
2788
2789 // Set up the space's par_seq_tasks structure for work claiming
2790 // for parallel rescan. See CMSParRemarkTask where this is currently used.
2791 // XXX Need to suitably abstract and generalize this and the next
2792 // method into one.
2793 void
2794 CompactibleFreeListSpace::
2795 initialize_sequential_subtasks_for_rescan(int n_threads) {
2796 // The "size" of each task is fixed according to rescan_task_size.
2797 assert(n_threads > 0, "Unexpected n_threads argument");
2798 const size_t task_size = rescan_task_size();
2799 size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
2800 assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
2801 assert(n_tasks == 0 ||
2802 ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
2803 (used_region().start() + n_tasks*task_size >= used_region().end())),
2804 "n_tasks calculation incorrect");
2805
2806 SequentialSubTasksDone* pst = conc_par_seq_tasks();
2807 assert(!pst->valid(), "Clobbering existing data?");
2808 pst->set_par_threads(n_threads);
2809 pst->set_n_tasks((int)n_tasks);
2810 }
2811
2812 // Set up the space's par_seq_tasks structure for work claiming
2813 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
2814 void
2815 CompactibleFreeListSpace::
2816 initialize_sequential_subtasks_for_marking(int n_threads,
2817 HeapWord* low) {
2818 // The "size" of each task is fixed according to rescan_task_size.
2819 assert(n_threads > 0, "Unexpected n_threads argument");
2820 const size_t task_size = marking_task_size();
2821 assert(task_size > CardTableModRefBS::card_size_in_words &&
2822 (task_size % CardTableModRefBS::card_size_in_words == 0),
2823 "Otherwise arithmetic below would be incorrect");
2824 MemRegion span = _gen->reserved();
2825 if (low != NULL) {
|
1 #ifdef USE_PRAGMA_IDENT_SRC
2 #pragma ident "@(#)compactibleFreeListSpace.cpp 1.144 08/09/06 09:20:55 JVM"
3 #endif
4 /*
5 * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved.
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7 *
8 * This code is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 only, as
10 * published by the Free Software Foundation.
11 *
12 * This code is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 * version 2 for more details (a copy is included in the LICENSE file that
16 * accompanied this code).
17 *
18 * You should have received a copy of the GNU General Public License version
19 * 2 along with this work; if not, write to the Free Software Foundation,
20 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
21 *
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
24 * have any questions.
25 *
40 MemRegion mr, bool use_adaptive_freelists,
41 FreeBlockDictionary::DictionaryChoice dictionaryChoice) :
42 _dictionaryChoice(dictionaryChoice),
43 _adaptive_freelists(use_adaptive_freelists),
44 _bt(bs, mr),
45 // free list locks are in the range of values taken by _lockRank
46 // This range currently is [_leaf+2, _leaf+3]
47 // Note: this requires that CFLspace c'tors
48 // are called serially in the order in which the locks are
49 // are acquired in the program text. This is true today.
50 _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
51 _parDictionaryAllocLock(Mutex::leaf - 1, // == rank(ExpandHeap_lock) - 1
52 "CompactibleFreeListSpace._dict_par_lock", true),
53 _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
54 CMSRescanMultiple),
55 _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
56 CMSConcMarkMultiple),
57 _collector(NULL)
58 {
59 _bt.set_space(this);
60 initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
61 // We have all of "mr", all of which we place in the dictionary
62 // as one big chunk. We'll need to decide here which of several
63 // possible alternative dictionary implementations to use. For
64 // now the choice is easy, since we have only one working
65 // implementation, namely, the simple binary tree (splaying
66 // temporarily disabled).
67 switch (dictionaryChoice) {
68 case FreeBlockDictionary::dictionaryBinaryTree:
69 _dictionary = new BinaryTreeDictionary(mr);
70 break;
71 case FreeBlockDictionary::dictionarySplayTree:
72 case FreeBlockDictionary::dictionarySkipList:
73 default:
74 warning("dictionaryChoice: selected option not understood; using"
75 " default BinaryTreeDictionary implementation instead.");
76 _dictionary = new BinaryTreeDictionary(mr);
77 break;
78 }
79 splitBirth(mr.word_size());
80 assert(_dictionary != NULL, "CMS dictionary initialization");
163 // (i.e., cp->space may no longer be "this" so adjust the size again.
164 // Use the virtual method which is not used above to save the virtual
165 // dispatch.
166 adjusted_size = cp->space->adjust_object_size_v(size);
167 compaction_max_size = pointer_delta(cp->space->end(), compact_top);
168 assert(cp->space->minimum_free_block_size() == 0, "just checking");
169 } while (adjusted_size > compaction_max_size);
170 }
171
172 // store the forwarding pointer into the mark word
173 if ((HeapWord*)q != compact_top) {
174 q->forward_to(oop(compact_top));
175 assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
176 } else {
177 // if the object isn't moving we can just set the mark to the default
178 // mark and handle it specially later on.
179 q->init_mark();
180 assert(q->forwardee() == NULL, "should be forwarded to NULL");
181 }
182
183 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, adjusted_size));
184 compact_top += adjusted_size;
185
186 // we need to update the offset table so that the beginnings of objects can be
187 // found during scavenge. Note that we are updating the offset table based on
188 // where the object will be once the compaction phase finishes.
189
190 // Always call cross_threshold(). A contiguous space can only call it when
191 // the compaction_top exceeds the current threshold but not for an
192 // non-contiguous space.
193 cp->threshold =
194 cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
195 return compact_top;
196 }
197
198 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
199 // and use of single_block instead of alloc_block. The name here is not really
200 // appropriate - maybe a more general name could be invented for both the
201 // contiguous and noncontiguous spaces.
202
203 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
776 // Since we hold the free list lock, which protects direct
777 // allocation in this generation by mutators, a free object
778 // will remain free throughout this iteration code.
779 size = fc->size();
780 } else {
781 // Note that the object need not necessarily be initialized,
782 // because (for instance) the free list lock does NOT protect
783 // object initialization. The closure application below must
784 // therefore be correct in the face of uninitialized objects.
785 size = cl->do_object_careful_m(oop(addr), mr);
786 if (size == 0) {
787 // An unparsable object found. Signal early termination.
788 return addr;
789 }
790 }
791 }
792 return NULL;
793 }
794
795
796 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
797 NOT_PRODUCT(verify_objects_initialized());
798 return _bt.block_start(p);
799 }
800
801 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
802 return _bt.block_start_careful(p);
803 }
804
805 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
806 NOT_PRODUCT(verify_objects_initialized());
807 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
808 // This must be volatile, or else there is a danger that the compiler
809 // will compile the code below into a sometimes-infinite loop, by keeping
810 // the value read the first time in a register.
811 while (true) {
812 // We must do this until we get a consistent view of the object.
813 if (FreeChunk::indicatesFreeChunk(p)) {
814 volatile FreeChunk* fc = (volatile FreeChunk*)p;
815 size_t res = fc->size();
816 // If the object is still a free chunk, return the size, else it
817 // has been allocated so try again.
818 if (FreeChunk::indicatesFreeChunk(p)) {
819 assert(res != 0, "Block size should not be 0");
820 return res;
821 }
822 } else {
823 // must read from what 'p' points to in each loop.
824 klassOop k = ((volatile oopDesc*)p)->klass_or_null();
825 if (k != NULL) {
826 assert(k->is_oop(true /* ignore mark word */), "Should really be klass oop.");
827 oop o = (oop)p;
828 assert(o->is_parsable(), "Should be parsable");
829 assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
830 size_t res = o->size_given_klass(k->klass_part());
831 res = adjustObjectSize(res);
832 assert(res != 0, "Block size should not be 0");
833 return res;
834 }
835 }
836 }
837 }
838
839 // A variant of the above that uses the Printezis bits for
840 // unparsable but allocated objects. This avoids any possible
841 // stalls waiting for mutators to initialize objects, and is
842 // thus potentially faster than the variant above. However,
843 // this variant may return a zero size for a block that is
844 // under mutation and for which a consistent size cannot be
845 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
846 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
847 const CMSCollector* c)
848 const {
849 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
850 // This must be volatile, or else there is a danger that the compiler
851 // will compile the code below into a sometimes-infinite loop, by keeping
852 // the value read the first time in a register.
853 DEBUG_ONLY(uint loops = 0;)
854 while (true) {
855 // We must do this until we get a consistent view of the object.
856 if (FreeChunk::indicatesFreeChunk(p)) {
857 volatile FreeChunk* fc = (volatile FreeChunk*)p;
858 size_t res = fc->size();
859 if (FreeChunk::indicatesFreeChunk(p)) {
860 assert(res != 0, "Block size should not be 0");
861 assert(loops == 0, "Should be 0");
862 return res;
863 }
864 } else {
865 // must read from what 'p' points to in each loop.
866 klassOop k = ((volatile oopDesc*)p)->klass_or_null();
867 if (k != NULL && ((oopDesc*)p)->is_parsable()) {
868 assert(k->is_oop(), "Should really be klass oop.");
869 oop o = (oop)p;
870 assert(o->is_oop(), "Should be an oop");
871 size_t res = o->size_given_klass(k->klass_part());
872 res = adjustObjectSize(res);
873 assert(res != 0, "Block size should not be 0");
874 return res;
875 } else {
876 return c->block_size_if_printezis_bits(p);
877 }
878 }
879 assert(loops == 0, "Can loop at most once");
880 DEBUG_ONLY(loops++;)
881 }
882 }
883
884 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
885 NOT_PRODUCT(verify_objects_initialized());
886 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
887 FreeChunk* fc = (FreeChunk*)p;
888 if (fc->isFree()) {
889 return fc->size();
890 } else {
891 // Ignore mark word because this may be a recently promoted
892 // object whose mark word is used to chain together grey
893 // objects (the last one would have a null value).
894 assert(oop(p)->is_oop(true), "Should be an oop");
895 return adjustObjectSize(oop(p)->size());
896 }
897 }
898
899 // This implementation assumes that the property of "being an object" is
900 // stable. But being a free chunk may not be (because of parallel
901 // promotion.)
902 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
903 FreeChunk* fc = (FreeChunk*)p;
904 assert(is_in_reserved(p), "Should be in space");
905 // When doing a mark-sweep-compact of the CMS generation, this
906 // assertion may fail because prepare_for_compaction() uses
907 // space that is garbage to maintain information on ranges of
908 // live objects so that these live ranges can be moved as a whole.
909 // Comment out this assertion until that problem can be solved
910 // (i.e., that the block start calculation may look at objects
911 // at address below "p" in finding the object that contains "p"
912 // and those objects (if garbage) may have been modified to hold
913 // live range information.
914 // assert(ParallelGCThreads > 0 || _bt.block_start(p) == p, "Should be a block boundary");
915 if (FreeChunk::indicatesFreeChunk(p)) return false;
916 klassOop k = oop(p)->klass_or_null();
917 if (k != NULL) {
918 // Ignore mark word because it may have been used to
919 // chain together promoted objects (the last one
920 // would have a null value).
921 assert(oop(p)->is_oop(true), "Should be an oop");
922 return true;
923 } else {
924 return false; // Was not an object at the start of collection.
925 }
926 }
927
928 // Check if the object is alive. This fact is checked either by consulting
929 // the main marking bitmap in the sweeping phase or, if it's a permanent
930 // generation and we're not in the sweeping phase, by checking the
931 // perm_gen_verify_bit_map where we store the "deadness" information if
932 // we did not sweep the perm gen in the most recent previous GC cycle.
933 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
934 assert (block_is_obj(p), "The address should point to an object");
935
936 // If we're sweeping, we use object liveness information from the main bit map
1014 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
1015 assert_lock_strong(freelistLock());
1016 HeapWord* res = NULL;
1017 assert(size == adjustObjectSize(size),
1018 "use adjustObjectSize() before calling into allocate()");
1019
1020 if (_adaptive_freelists) {
1021 res = allocate_adaptive_freelists(size);
1022 } else { // non-adaptive free lists
1023 res = allocate_non_adaptive_freelists(size);
1024 }
1025
1026 if (res != NULL) {
1027 // check that res does lie in this space!
1028 assert(is_in_reserved(res), "Not in this space!");
1029 assert(is_aligned((void*)res), "alignment check");
1030
1031 FreeChunk* fc = (FreeChunk*)res;
1032 fc->markNotFree();
1033 assert(!fc->isFree(), "shouldn't be marked free");
1034 assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
1035 // Verify that the block offset table shows this to
1036 // be a single block, but not one which is unallocated.
1037 _bt.verify_single_block(res, size);
1038 _bt.verify_not_unallocated(res, size);
1039 // mangle a just allocated object with a distinct pattern.
1040 debug_only(fc->mangleAllocated(size));
1041 }
1042
1043 return res;
1044 }
1045
1046 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
1047 HeapWord* res = NULL;
1048 // try and use linear allocation for smaller blocks
1049 if (size < _smallLinearAllocBlock._allocation_size_limit) {
1050 // if successful, the following also adjusts block offset table
1051 res = getChunkFromSmallLinearAllocBlock(size);
1052 }
1053 // Else triage to indexed lists for smaller sizes
1054 if (res == NULL) {
1198 FreeChunk* fc;
1199 {
1200 // If GC is parallel, this might be called by several threads.
1201 // This should be rare enough that the locking overhead won't affect
1202 // the sequential code.
1203 MutexLockerEx x(parDictionaryAllocLock(),
1204 Mutex::_no_safepoint_check_flag);
1205 fc = getChunkFromDictionary(size);
1206 }
1207 if (fc != NULL) {
1208 fc->dontCoalesce();
1209 assert(fc->isFree(), "Should be free, but not coalescable");
1210 // Verify that the block offset table shows this to
1211 // be a single block, but not one which is unallocated.
1212 _bt.verify_single_block((HeapWord*)fc, fc->size());
1213 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
1214 }
1215 return fc;
1216 }
1217
1218 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
1219 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1220 assert_locked();
1221
1222 // if we are tracking promotions, then first ensure space for
1223 // promotion (including spooling space for saving header if necessary).
1224 // then allocate and copy, then track promoted info if needed.
1225 // When tracking (see PromotionInfo::track()), the mark word may
1226 // be displaced and in this case restoration of the mark word
1227 // occurs in the (oop_since_save_marks_)iterate phase.
1228 if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
1229 return NULL;
1230 }
1231 // Call the allocate(size_t, bool) form directly to avoid the
1232 // additional call through the allocate(size_t) form. Having
1233 // the compile inline the call is problematic because allocate(size_t)
1234 // is a virtual method.
1235 HeapWord* res = allocate(adjustObjectSize(obj_size));
1236 if (res != NULL) {
1237 Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
1238 // if we should be tracking promotions, do so.
1822 assert(noPromotions(), "post-condition violation"); \
1823 assert(_promoHead == NULL && _promoTail == NULL, "emptied promoted list");\
1824 assert(_spoolHead == _spoolTail, "emptied spooling buffers"); \
1825 assert(_firstIndex == _nextIndex, "empty buffer"); \
1826 }
1827
1828 // This should have been ALL_SINCE_...() just like the others,
1829 // but, because the body of the method above is somehwat longer,
1830 // the MSVC compiler cannot cope; as a workaround, we split the
1831 // macro into its 3 constituent parts below (see original macro
1832 // definition in specializedOopClosures.hpp).
1833 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG(PROMOTED_OOPS_ITERATE_DEFN)
1834 PROMOTED_OOPS_ITERATE_DEFN(OopsInGenClosure,_v)
1835
1836
1837 void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) {
1838 // ugghh... how would one do this efficiently for a non-contiguous space?
1839 guarantee(false, "NYI");
1840 }
1841
1842 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
1843 return _smallLinearAllocBlock._word_size == 0;
1844 }
1845
1846 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
1847 // Fix up linear allocation blocks to look like free blocks
1848 repairLinearAllocBlock(&_smallLinearAllocBlock);
1849 }
1850
1851 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
1852 assert_locked();
1853 if (blk->_ptr != NULL) {
1854 assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
1855 "Minimum block size requirement");
1856 FreeChunk* fc = (FreeChunk*)(blk->_ptr);
1857 fc->setSize(blk->_word_size);
1858 fc->linkPrev(NULL); // mark as free
1859 fc->dontCoalesce();
1860 assert(fc->isFree(), "just marked it free");
1861 assert(fc->cantCoalesce(), "just marked it uncoalescable");
1862 }
1893 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
1894 assert_locked();
1895 assert(blk->_word_size == 0 && blk->_ptr == NULL,
1896 "linear allocation block should be empty");
1897 FreeChunk* fc;
1898 if (blk->_refillSize < SmallForDictionary &&
1899 (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
1900 // A linAB's strategy might be to use small sizes to reduce
1901 // fragmentation but still get the benefits of allocation from a
1902 // linAB.
1903 } else {
1904 fc = getChunkFromDictionary(blk->_refillSize);
1905 }
1906 if (fc != NULL) {
1907 blk->_ptr = (HeapWord*)fc;
1908 blk->_word_size = fc->size();
1909 fc->dontCoalesce(); // to prevent sweeper from sweeping us up
1910 }
1911 }
1912
1913 // Support for concurrent collection policy decisions.
1914 bool CompactibleFreeListSpace::should_concurrent_collect() const {
1915 // In the future we might want to add in frgamentation stats --
1916 // including erosion of the "mountain" into this decision as well.
1917 return !adaptive_freelists() && linearAllocationWouldFail();
1918 }
1919
1920 // Support for compaction
1921
1922 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
1923 SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
1924 // prepare_for_compaction() uses the space between live objects
1925 // so that later phase can skip dead space quickly. So verification
1926 // of the free lists doesn't work after.
1927 }
1928
1929 #define obj_size(q) adjustObjectSize(oop(q)->size())
1930 #define adjust_obj_size(s) adjustObjectSize(s)
1931
1932 void CompactibleFreeListSpace::adjust_pointers() {
1933 // In other versions of adjust_pointers(), a bail out
1934 // based on the amount of live data in the generation
1935 // (i.e., if 0, bail out) may be used.
1936 // Cannot test used() == 0 here because the free lists have already
1937 // been mangled by the compaction.
1938
1939 SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
2007 fl->set_hint(h);
2008 if (fl->surplus() > 0) {
2009 h = i;
2010 }
2011 }
2012 }
2013
2014 void CompactibleFreeListSpace::clearFLCensus() {
2015 assert_locked();
2016 int i;
2017 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2018 FreeList *fl = &_indexedFreeList[i];
2019 fl->set_prevSweep(fl->count());
2020 fl->set_coalBirths(0);
2021 fl->set_coalDeaths(0);
2022 fl->set_splitBirths(0);
2023 fl->set_splitDeaths(0);
2024 }
2025 }
2026
2027 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
2028 setFLSurplus();
2029 setFLHints();
2030 if (PrintGC && PrintFLSCensus > 0) {
2031 printFLCensus(sweep_count);
2032 }
2033 clearFLCensus();
2034 assert_locked();
2035 _dictionary->endSweepDictCensus(SplitSurplusPercent);
2036 }
2037
2038 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
2039 if (size < SmallForDictionary) {
2040 FreeList *fl = &_indexedFreeList[size];
2041 return (fl->coalDesired() < 0) ||
2042 ((int)fl->count() > fl->coalDesired());
2043 } else {
2044 return dictionary()->coalDictOverPopulated(size);
2045 }
2046 }
2047
2048 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
2049 assert(size < SmallForDictionary, "Size too large for indexed list");
2050 FreeList *fl = &_indexedFreeList[size];
2051 fl->increment_coalBirths();
2103 }
2104 }
2105
2106 void CompactibleFreeListSpace::splitDeath(size_t size) {
2107 if (size < SmallForDictionary) {
2108 smallSplitDeath(size);
2109 } else {
2110 dictionary()->dictCensusUpdate(size,
2111 true /* split */,
2112 false /* birth */);
2113 }
2114 }
2115
2116 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
2117 size_t to2 = from - to1;
2118 splitDeath(from);
2119 splitBirth(to1);
2120 splitBirth(to2);
2121 }
2122
2123 void CompactibleFreeListSpace::print() const {
2124 tty->print(" CompactibleFreeListSpace");
2125 Space::print();
2126 }
2127
2128 void CompactibleFreeListSpace::prepare_for_verify() {
2129 assert_locked();
2130 repairLinearAllocationBlocks();
2131 // Verify that the SpoolBlocks look like free blocks of
2132 // appropriate sizes... To be done ...
2133 }
2134
2135 class VerifyAllBlksClosure: public BlkClosure {
2136 private:
2137 const CompactibleFreeListSpace* _sp;
2138 const MemRegion _span;
2139
2140 public:
2141 VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
2142 MemRegion span) : _sp(sp), _span(span) { }
2143
2144 virtual size_t do_blk(HeapWord* addr) {
2145 size_t res;
2146 if (_sp->block_is_obj(addr)) {
2147 oop p = oop(addr);
2148 guarantee(p->is_oop(), "Should be an oop");
2149 res = _sp->adjustObjectSize(p->size());
2150 if (_sp->obj_is_alive(addr)) {
2151 p->verify();
2152 }
2153 } else {
2154 FreeChunk* fc = (FreeChunk*)addr;
2155 res = fc->size();
2156 if (FLSVerifyLists && !fc->cantCoalesce()) {
2157 guarantee(_sp->verifyChunkInFreeLists(fc),
2158 "Chunk should be on a free list");
2159 }
2160 }
2161 guarantee(res != 0, "Livelock: no rank reduction!");
2162 return res;
2163 }
2164 };
2165
2166 class VerifyAllOopsClosure: public OopClosure {
2167 private:
2168 const CMSCollector* _collector;
2169 const CompactibleFreeListSpace* _sp;
2170 const MemRegion _span;
2171 const bool _past_remark;
2172 const CMSBitMap* _bit_map;
2173
2174 protected:
2175 void do_oop(void* p, oop obj) {
2176 if (_span.contains(obj)) { // the interior oop points into CMS heap
2177 if (!_span.contains(p)) { // reference from outside CMS heap
2178 // Should be a valid object; the first disjunct below allows
2179 // us to sidestep an assertion in block_is_obj() that insists
2180 // that p be in _sp. Note that several generations (and spaces)
2181 // are spanned by _span (CMS heap) above.
2182 guarantee(!_sp->is_in_reserved(obj) ||
2183 _sp->block_is_obj((HeapWord*)obj),
2184 "Should be an object");
2185 guarantee(obj->is_oop(), "Should be an oop");
2186 obj->verify();
2187 if (_past_remark) {
2188 // Remark has been completed, the object should be marked
2189 _bit_map->isMarked((HeapWord*)obj);
2190 }
2191 } else { // reference within CMS heap
2192 if (_past_remark) {
2193 // Remark has been completed -- so the referent should have
2194 // been marked, if referring object is.
2195 if (_bit_map->isMarked(_collector->block_start(p))) {
2196 guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
2197 }
2198 }
2199 }
2200 } else if (_sp->is_in_reserved(p)) {
2201 // the reference is from FLS, and points out of FLS
2202 guarantee(obj->is_oop(), "Should be an oop");
2203 obj->verify();
2204 }
2205 }
2206
2207 template <class T> void do_oop_work(T* p) {
2208 T heap_oop = oopDesc::load_heap_oop(p);
2209 if (!oopDesc::is_null(heap_oop)) {
2210 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2211 do_oop(p, obj);
2212 }
2213 }
2214
2215 public:
2216 VerifyAllOopsClosure(const CMSCollector* collector,
2217 const CompactibleFreeListSpace* sp, MemRegion span,
2218 bool past_remark, CMSBitMap* bit_map) :
2219 OopClosure(), _collector(collector), _sp(sp), _span(span),
2220 _past_remark(past_remark), _bit_map(bit_map) { }
2221
2222 virtual void do_oop(oop* p) { VerifyAllOopsClosure::do_oop_work(p); }
2223 virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
2224 };
2225
2226 void CompactibleFreeListSpace::verify(bool ignored) const {
2227 assert_lock_strong(&_freelistLock);
2228 verify_objects_initialized();
2229 MemRegion span = _collector->_span;
2230 bool past_remark = (_collector->abstract_state() ==
2231 CMSCollector::Sweeping);
2232
2233 ResourceMark rm;
2234 HandleMark hm;
2235
2236 // Check integrity of CFL data structures
2237 _promoInfo.verify();
2238 _dictionary->verify();
2239 if (FLSVerifyIndexTable) {
2240 verifyIndexedFreeLists();
2241 }
2242 // Check integrity of all objects and free blocks in space
2243 {
2272 _dictionary->verify();
2273 }
2274 if (FLSVerifyIndexTable) {
2275 verifyIndexedFreeLists();
2276 }
2277 }
2278 }
2279 #endif
2280
2281 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
2282 size_t i = 0;
2283 for (; i < MinChunkSize; i++) {
2284 guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
2285 }
2286 for (; i < IndexSetSize; i++) {
2287 verifyIndexedFreeList(i);
2288 }
2289 }
2290
2291 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
2292 FreeChunk* fc = _indexedFreeList[size].head();
2293 guarantee((size % 2 == 0) || fc == NULL, "Odd slots should be empty");
2294 for (; fc != NULL; fc = fc->next()) {
2295 guarantee(fc->size() == size, "Size inconsistency");
2296 guarantee(fc->isFree(), "!free?");
2297 guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
2298 }
2299 }
2300
2301 #ifndef PRODUCT
2302 void CompactibleFreeListSpace::checkFreeListConsistency() const {
2303 assert(_dictionary->minSize() <= IndexSetSize,
2304 "Some sizes can't be allocated without recourse to"
2305 " linear allocation buffers");
2306 assert(MIN_TREE_CHUNK_SIZE*HeapWordSize == sizeof(TreeChunk),
2307 "else MIN_TREE_CHUNK_SIZE is wrong");
2308 assert((IndexSetStride == 2 && IndexSetStart == 2) ||
2309 (IndexSetStride == 1 && IndexSetStart == 1), "just checking");
2310 assert((IndexSetStride != 2) || (MinChunkSize % 2 == 0),
2311 "Some for-loops may be incorrectly initialized");
2312 assert((IndexSetStride != 2) || (IndexSetSize % 2 == 1),
2313 "For-loops that iterate over IndexSet with stride 2 may be wrong");
2314 }
2315 #endif
2316
2317 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
2318 assert_lock_strong(&_freelistLock);
2319 FreeList total;
2320 gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
2321 FreeList::print_labels_on(gclog_or_tty, "size");
2322 size_t totalFree = 0;
2323 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2324 const FreeList *fl = &_indexedFreeList[i];
2325 totalFree += fl->count() * fl->size();
2326 if (i % (40*IndexSetStride) == 0) {
2327 FreeList::print_labels_on(gclog_or_tty, "size");
2328 }
2329 fl->print_on(gclog_or_tty);
2330 total.set_bfrSurp( total.bfrSurp() + fl->bfrSurp() );
2331 total.set_surplus( total.surplus() + fl->surplus() );
2332 total.set_desired( total.desired() + fl->desired() );
2333 total.set_prevSweep( total.prevSweep() + fl->prevSweep() );
2334 total.set_beforeSweep(total.beforeSweep() + fl->beforeSweep());
2335 total.set_count( total.count() + fl->count() );
2336 total.set_coalBirths( total.coalBirths() + fl->coalBirths() );
2337 total.set_coalDeaths( total.coalDeaths() + fl->coalDeaths() );
2338 total.set_splitBirths(total.splitBirths() + fl->splitBirths());
2339 total.set_splitDeaths(total.splitDeaths() + fl->splitDeaths());
2340 }
2341 total.print_on(gclog_or_tty, "TOTAL");
2342 gclog_or_tty->print_cr("Total free in indexed lists "
2343 SIZE_FORMAT " words", totalFree);
2344 gclog_or_tty->print("growth: %8.5f deficit: %8.5f\n",
2345 (double)(total.splitBirths()+total.coalBirths()-total.splitDeaths()-total.coalDeaths())/
2346 (total.prevSweep() != 0 ? (double)total.prevSweep() : 1.0),
2347 (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
2348 _dictionary->printDictCensus();
2349 }
2350
2351 // Return the next displaced header, incrementing the pointer and
2352 // recycling spool area as necessary.
2353 markOop PromotionInfo::nextDisplacedHeader() {
2354 assert(_spoolHead != NULL, "promotionInfo inconsistency");
2355 assert(_spoolHead != _spoolTail || _firstIndex < _nextIndex,
2356 "Empty spool space: no displaced header can be fetched");
2357 assert(_spoolHead->bufferSize > _firstIndex, "Off by one error at head?");
2358 markOop hdr = _spoolHead->displacedHdr[_firstIndex];
2359 // Spool forward
2360 if (++_firstIndex == _spoolHead->bufferSize) { // last location in this block
2361 // forward to next block, recycling this block into spare spool buffer
2362 SpoolBlock* tmp = _spoolHead->nextSpoolBlock;
2363 assert(_spoolHead != _spoolTail, "Spooling storage mix-up");
2364 _spoolHead->nextSpoolBlock = _spareSpool;
2365 _spareSpool = _spoolHead;
2366 _spoolHead = tmp;
2367 _firstIndex = 1;
2580 // This locking manages sync with other large object allocations.
2581 MutexLockerEx x(_cfls->parDictionaryAllocLock(),
2582 Mutex::_no_safepoint_check_flag);
2583 res = _cfls->getChunkFromDictionaryExact(word_sz);
2584 if (res == NULL) return NULL;
2585 } else {
2586 FreeList* fl = &_indexedFreeList[word_sz];
2587 bool filled = false; //TRAP
2588 if (fl->count() == 0) {
2589 bool filled = true; //TRAP
2590 // Attempt to refill this local free list.
2591 _cfls->par_get_chunk_of_blocks(word_sz, _blocks_to_claim, fl);
2592 // If it didn't work, give up.
2593 if (fl->count() == 0) return NULL;
2594 }
2595 res = fl->getChunkAtHead();
2596 assert(res != NULL, "Why was count non-zero?");
2597 }
2598 res->markNotFree();
2599 assert(!res->isFree(), "shouldn't be marked free");
2600 assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
2601 // mangle a just allocated object with a distinct pattern.
2602 debug_only(res->mangleAllocated(word_sz));
2603 return (HeapWord*)res;
2604 }
2605
2606 void CFLS_LAB::retire() {
2607 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2608 i < CompactibleFreeListSpace::IndexSetSize;
2609 i += CompactibleFreeListSpace::IndexSetStride) {
2610 if (_indexedFreeList[i].count() > 0) {
2611 MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
2612 Mutex::_no_safepoint_check_flag);
2613 _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
2614 // Reset this list.
2615 _indexedFreeList[i] = FreeList();
2616 _indexedFreeList[i].set_size(i);
2617 }
2618 }
2619 }
2620
2781 // TRAP
2782 assert(fl->tail()->next() == NULL, "List invariant.");
2783 }
2784
2785 // Set up the space's par_seq_tasks structure for work claiming
2786 // for parallel rescan. See CMSParRemarkTask where this is currently used.
2787 // XXX Need to suitably abstract and generalize this and the next
2788 // method into one.
2789 void
2790 CompactibleFreeListSpace::
2791 initialize_sequential_subtasks_for_rescan(int n_threads) {
2792 // The "size" of each task is fixed according to rescan_task_size.
2793 assert(n_threads > 0, "Unexpected n_threads argument");
2794 const size_t task_size = rescan_task_size();
2795 size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
2796 assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
2797 assert(n_tasks == 0 ||
2798 ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
2799 (used_region().start() + n_tasks*task_size >= used_region().end())),
2800 "n_tasks calculation incorrect");
2801 SequentialSubTasksDone* pst = conc_par_seq_tasks();
2802 assert(!pst->valid(), "Clobbering existing data?");
2803 pst->set_par_threads(n_threads);
2804 pst->set_n_tasks((int)n_tasks);
2805 }
2806
2807 // Set up the space's par_seq_tasks structure for work claiming
2808 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
2809 void
2810 CompactibleFreeListSpace::
2811 initialize_sequential_subtasks_for_marking(int n_threads,
2812 HeapWord* low) {
2813 // The "size" of each task is fixed according to rescan_task_size.
2814 assert(n_threads > 0, "Unexpected n_threads argument");
2815 const size_t task_size = marking_task_size();
2816 assert(task_size > CardTableModRefBS::card_size_in_words &&
2817 (task_size % CardTableModRefBS::card_size_in_words == 0),
2818 "Otherwise arithmetic below would be incorrect");
2819 MemRegion span = _gen->reserved();
2820 if (low != NULL) {
|