59 class G1ParScanThreadState;
60 class ObjectClosure;
61 class SpaceClosure;
62 class CompactibleSpaceClosure;
63 class Space;
64 class G1CollectorPolicy;
65 class GenRemSet;
66 class G1RemSet;
67 class HeapRegionRemSetIterator;
68 class ConcurrentMark;
69 class ConcurrentMarkThread;
70 class ConcurrentG1Refine;
71 class ConcurrentGCTimer;
72 class GenerationCounters;
73 class STWGCTimer;
74 class G1NewTracer;
75 class G1OldTracer;
76 class EvacuationFailedInfo;
77 class nmethod;
78 class Ticks;
79
80 typedef OverflowTaskQueue<StarTask, mtGC> RefToScanQueue;
81 typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
82
83 typedef int RegionIdx_t; // needs to hold [ 0..max_regions() )
84 typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
85
86 class YoungList : public CHeapObj<mtGC> {
87 private:
88 G1CollectedHeap* _g1h;
89
90 HeapRegion* _head;
91
92 HeapRegion* _survivor_head;
93 HeapRegion* _survivor_tail;
94
95 HeapRegion* _curr;
96
97 uint _length;
98 uint _survivor_length;
187 friend class SurvivorGCAllocRegion;
188 friend class OldGCAllocRegion;
189 friend class G1Allocator;
190
191 // Closures used in implementation.
192 friend class G1ParScanThreadState;
193 friend class G1ParTask;
194 friend class G1ParGCAllocator;
195 friend class G1PrepareCompactClosure;
196
197 // Other related classes.
198 friend class HeapRegionClaimer;
199
200 // Testing classes.
201 friend class G1CheckCSetFastTableClosure;
202
203 private:
204 // The one and only G1CollectedHeap, so static functions can find it.
205 static G1CollectedHeap* _g1h;
206
207 static size_t _humongous_object_threshold_in_words;
208
209 // The secondary free list which contains regions that have been
210 // freed up during the cleanup process. This will be appended to
211 // the master free list when appropriate.
212 FreeRegionList _secondary_free_list;
213
214 // It keeps track of the old regions.
215 HeapRegionSet _old_set;
216
217 // It keeps track of the humongous regions.
218 HeapRegionSet _humongous_set;
219
220 void clear_humongous_is_live_table();
221 void eagerly_reclaim_humongous_regions();
222
223 // The number of regions we could create by expansion.
224 uint _expansion_regions;
225
226 // The block offset table for the G1 heap.
588 // failed allocation request (including collection, expansion, etc.)
589 HeapWord* satisfy_failed_allocation(size_t word_size,
590 AllocationContext_t context,
591 bool* succeeded);
592
593 // Attempting to expand the heap sufficiently
594 // to support an allocation of the given "word_size". If
595 // successful, perform the allocation and return the address of the
596 // allocated block, or else "NULL".
597 HeapWord* expand_and_allocate(size_t word_size, AllocationContext_t context);
598
599 // Process any reference objects discovered during
600 // an incremental evacuation pause.
601 void process_discovered_references(uint no_of_gc_workers);
602
603 // Enqueue any remaining discovered references
604 // after processing.
605 void enqueue_discovered_references(uint no_of_gc_workers);
606
607 public:
608
609 G1Allocator* allocator() {
610 return _allocator;
611 }
612
613 G1MonitoringSupport* g1mm() {
614 assert(_g1mm != NULL, "should have been initialized");
615 return _g1mm;
616 }
617
618 // Expand the garbage-first heap by at least the given size (in bytes!).
619 // Returns true if the heap was expanded by the requested amount;
620 // false otherwise.
621 // (Rounds up to a HeapRegion boundary.)
622 bool expand(size_t expand_bytes);
623
624 // Returns the PLAB statistics for a given destination.
625 inline PLABStats* alloc_buffer_stats(InCSetState dest);
626
627 // Determines PLAB size for a given destination.
628 inline size_t desired_plab_sz(InCSetState dest);
629
630 inline AllocationContextStats& allocation_context_stats();
631
632 // Do anything common to GC's.
633 virtual void gc_prologue(bool full);
634 virtual void gc_epilogue(bool full);
635
636 inline void set_humongous_is_live(oop obj);
637
638 bool humongous_is_live(uint region) {
639 return _humongous_is_live.is_live(region);
640 }
641
642 // Returns whether the given region (which must be a humongous (start) region)
643 // is to be considered conservatively live regardless of any other conditions.
644 bool humongous_region_is_always_live(uint index);
645 // Returns whether the given region (which must be a humongous (start) region)
646 // is considered a candidate for eager reclamation.
647 bool humongous_region_is_candidate(uint index);
648 // Register the given region to be part of the collection set.
649 inline void register_humongous_region_with_cset(uint index);
650 // Register regions with humongous objects (actually on the start region) in
651 // the in_cset_fast_test table.
652 void register_humongous_regions_with_cset();
653 // We register a region with the fast "in collection set" test. We
654 // simply set to true the array slot corresponding to this region.
983 // update the remembered sets of the regions in the collection
984 // set in the event of an evacuation failure.
985 DirtyCardQueueSet& into_cset_dirty_card_queue_set()
986 { return _into_cset_dirty_card_queue_set; }
987
988 // Create a G1CollectedHeap with the specified policy.
989 // Must call the initialize method afterwards.
990 // May not return if something goes wrong.
991 G1CollectedHeap(G1CollectorPolicy* policy);
992
993 // Initialize the G1CollectedHeap to have the initial and
994 // maximum sizes and remembered and barrier sets
995 // specified by the policy object.
996 jint initialize();
997
998 virtual void stop();
999
1000 // Return the (conservative) maximum heap alignment for any G1 heap
1001 static size_t conservative_max_heap_alignment();
1002
1003 // Initialize weak reference processing.
1004 virtual void ref_processing_init();
1005
1006 // Explicitly import set_par_threads into this scope
1007 using SharedHeap::set_par_threads;
1008 // Set _n_par_threads according to a policy TBD.
1009 void set_par_threads();
1010
1011 virtual CollectedHeap::Name kind() const {
1012 return CollectedHeap::G1CollectedHeap;
1013 }
1014
1015 // The current policy object for the collector.
1016 G1CollectorPolicy* g1_policy() const { return _g1_policy; }
1017
1018 virtual CollectorPolicy* collector_policy() const { return (CollectorPolicy*) g1_policy(); }
1019
1020 // Adaptive size policy. No such thing for g1.
1021 virtual AdaptiveSizePolicy* size_policy() { return NULL; }
1022
1097 #ifndef PRODUCT
1098 // Make sure that the given bitmap has no marked objects in the
1099 // range [from,limit). If it does, print an error message and return
1100 // false. Otherwise, just return true. bitmap_name should be "prev"
1101 // or "next".
1102 bool verify_no_bits_over_tams(const char* bitmap_name, CMBitMapRO* bitmap,
1103 HeapWord* from, HeapWord* limit);
1104
1105 // Verify that the prev / next bitmap range [tams,end) for the given
1106 // region has no marks. Return true if all is well, false if errors
1107 // are detected.
1108 bool verify_bitmaps(const char* caller, HeapRegion* hr);
1109
1110 // True if the heap_lock is held by the a non-gc thread invoking a gc
1111 // operation.
1112 bool _thread_holds_heap_lock_for_gc;
1113 // Returns true if the calling thread holds the heap lock,
1114 // or the calling thread is a par gc thread and the heap_lock is held
1115 // by the vm thread doing a gc operation.
1116 bool heap_lock_held_for_gc();
1117
1118 void set_heap_lock_held_for_gc(bool value) { _thread_holds_heap_lock_for_gc = value; }
1119
1120 #endif // PRODUCT
1121
1122 // If G1VerifyBitmaps is set, verify that the marking bitmaps for
1123 // the given region do not have any spurious marks. If errors are
1124 // detected, print appropriate error messages and crash.
1125 void check_bitmaps(const char* caller, HeapRegion* hr) PRODUCT_RETURN;
1126
1127 // If G1VerifyBitmaps is set, verify that the marking bitmaps do not
1128 // have any spurious marks. If errors are detected, print
1129 // appropriate error messages and crash.
1130 void check_bitmaps(const char* caller) PRODUCT_RETURN;
1131
1132 // Do sanity check on the contents of the in-cset fast test table.
1133 bool check_cset_fast_test() PRODUCT_RETURN_( return true; );
1134
1135 // verify_region_sets() performs verification over the region
1136 // lists. It will be compiled in the product code to be used when
1137 // necessary (i.e., during heap verification).
1138 void verify_region_sets();
1139
1140 // verify_region_sets_optional() is planted in the code for
1141 // list verification in non-product builds (and it can be enabled in
1256 return barrier_set_cast<G1SATBCardTableLoggingModRefBS>(barrier_set());
1257 }
1258
1259 // This resets the card table to all zeros. It is used after
1260 // a collection pause which used the card table to claim cards.
1261 void cleanUpCardTable();
1262
1263 // Iteration functions.
1264
1265 // Iterate over all the ref-containing fields of all objects, calling
1266 // "cl.do_oop" on each.
1267 virtual void oop_iterate(ExtendedOopClosure* cl);
1268
1269 // Iterate over all objects, calling "cl.do_object" on each.
1270 virtual void object_iterate(ObjectClosure* cl);
1271
1272 virtual void safe_object_iterate(ObjectClosure* cl) {
1273 object_iterate(cl);
1274 }
1275
1276 // Iterate over all spaces in use in the heap, in ascending address order.
1277 virtual void space_iterate(SpaceClosure* cl);
1278
1279 // Iterate over heap regions, in address order, terminating the
1280 // iteration early if the "doHeapRegion" method returns "true".
1281 void heap_region_iterate(HeapRegionClosure* blk) const;
1282
1283 // Return the region with the given index. It assumes the index is valid.
1284 inline HeapRegion* region_at(uint index) const;
1285
1286 // Calculate the region index of the given address. Given address must be
1287 // within the heap.
1288 inline uint addr_to_region(HeapWord* addr) const;
1289
1290 inline HeapWord* bottom_addr_for_region(uint index) const;
1291
1292 // Iterate over the heap regions in parallel. Assumes that this will be called
1293 // in parallel by ParallelGCThreads worker threads with distinct worker ids
1294 // in the range [0..max(ParallelGCThreads-1, 1)]. Applies "blk->doHeapRegion"
1295 // to each of the regions, by attempting to claim the region using the
1296 // HeapRegionClaimer and, if successful, applying the closure to the claimed
1297 // region. The concurrent argument should be set to true if iteration is
1298 // performed concurrently, during which no assumptions are made for consistent
1303 bool concurrent = false) const;
1304
1305 // Clear the cached cset start regions and (more importantly)
1306 // the time stamps. Called when we reset the GC time stamp.
1307 void clear_cset_start_regions();
1308
1309 // Given the id of a worker, obtain or calculate a suitable
1310 // starting region for iterating over the current collection set.
1311 HeapRegion* start_cset_region_for_worker(uint worker_i);
1312
1313 // Iterate over the regions (if any) in the current collection set.
1314 void collection_set_iterate(HeapRegionClosure* blk);
1315
1316 // As above but starting from region r
1317 void collection_set_iterate_from(HeapRegion* r, HeapRegionClosure *blk);
1318
1319 HeapRegion* next_compaction_region(const HeapRegion* from) const;
1320
1321 // A CollectedHeap will contain some number of spaces. This finds the
1322 // space containing a given address, or else returns NULL.
1323 virtual Space* space_containing(const void* addr) const;
1324
1325 // Returns the HeapRegion that contains addr. addr must not be NULL.
1326 template <class T>
1327 inline HeapRegion* heap_region_containing_raw(const T addr) const;
1328
1329 // Returns the HeapRegion that contains addr. addr must not be NULL.
1330 // If addr is within a humongous continues region, it returns its humongous start region.
1331 template <class T>
1332 inline HeapRegion* heap_region_containing(const T addr) const;
1333
1334 // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
1335 // each address in the (reserved) heap is a member of exactly
1336 // one block. The defining characteristic of a block is that it is
1337 // possible to find its size, and thus to progress forward to the next
1338 // block. (Blocks may be of different sizes.) Thus, blocks may
1339 // represent Java objects, or they might be free blocks in a
1340 // free-list-based heap (or subheap), as long as the two kinds are
1341 // distinguishable and the size of each is determinable.
1342
1343 // Returns the address of the start of the "block" that contains the
|
59 class G1ParScanThreadState;
60 class ObjectClosure;
61 class SpaceClosure;
62 class CompactibleSpaceClosure;
63 class Space;
64 class G1CollectorPolicy;
65 class GenRemSet;
66 class G1RemSet;
67 class HeapRegionRemSetIterator;
68 class ConcurrentMark;
69 class ConcurrentMarkThread;
70 class ConcurrentG1Refine;
71 class ConcurrentGCTimer;
72 class GenerationCounters;
73 class STWGCTimer;
74 class G1NewTracer;
75 class G1OldTracer;
76 class EvacuationFailedInfo;
77 class nmethod;
78 class Ticks;
79 class FlexibleWorkGang;
80
81 typedef OverflowTaskQueue<StarTask, mtGC> RefToScanQueue;
82 typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
83
84 typedef int RegionIdx_t; // needs to hold [ 0..max_regions() )
85 typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
86
87 class YoungList : public CHeapObj<mtGC> {
88 private:
89 G1CollectedHeap* _g1h;
90
91 HeapRegion* _head;
92
93 HeapRegion* _survivor_head;
94 HeapRegion* _survivor_tail;
95
96 HeapRegion* _curr;
97
98 uint _length;
99 uint _survivor_length;
188 friend class SurvivorGCAllocRegion;
189 friend class OldGCAllocRegion;
190 friend class G1Allocator;
191
192 // Closures used in implementation.
193 friend class G1ParScanThreadState;
194 friend class G1ParTask;
195 friend class G1ParGCAllocator;
196 friend class G1PrepareCompactClosure;
197
198 // Other related classes.
199 friend class HeapRegionClaimer;
200
201 // Testing classes.
202 friend class G1CheckCSetFastTableClosure;
203
204 private:
205 // The one and only G1CollectedHeap, so static functions can find it.
206 static G1CollectedHeap* _g1h;
207
208 FlexibleWorkGang* _workers;
209
210 static size_t _humongous_object_threshold_in_words;
211
212 // The secondary free list which contains regions that have been
213 // freed up during the cleanup process. This will be appended to
214 // the master free list when appropriate.
215 FreeRegionList _secondary_free_list;
216
217 // It keeps track of the old regions.
218 HeapRegionSet _old_set;
219
220 // It keeps track of the humongous regions.
221 HeapRegionSet _humongous_set;
222
223 void clear_humongous_is_live_table();
224 void eagerly_reclaim_humongous_regions();
225
226 // The number of regions we could create by expansion.
227 uint _expansion_regions;
228
229 // The block offset table for the G1 heap.
591 // failed allocation request (including collection, expansion, etc.)
592 HeapWord* satisfy_failed_allocation(size_t word_size,
593 AllocationContext_t context,
594 bool* succeeded);
595
596 // Attempting to expand the heap sufficiently
597 // to support an allocation of the given "word_size". If
598 // successful, perform the allocation and return the address of the
599 // allocated block, or else "NULL".
600 HeapWord* expand_and_allocate(size_t word_size, AllocationContext_t context);
601
602 // Process any reference objects discovered during
603 // an incremental evacuation pause.
604 void process_discovered_references(uint no_of_gc_workers);
605
606 // Enqueue any remaining discovered references
607 // after processing.
608 void enqueue_discovered_references(uint no_of_gc_workers);
609
610 public:
611 FlexibleWorkGang* workers() const { return _workers; }
612
613 G1Allocator* allocator() {
614 return _allocator;
615 }
616
617 G1MonitoringSupport* g1mm() {
618 assert(_g1mm != NULL, "should have been initialized");
619 return _g1mm;
620 }
621
622 // Expand the garbage-first heap by at least the given size (in bytes!).
623 // Returns true if the heap was expanded by the requested amount;
624 // false otherwise.
625 // (Rounds up to a HeapRegion boundary.)
626 bool expand(size_t expand_bytes);
627
628 // Returns the PLAB statistics for a given destination.
629 inline PLABStats* alloc_buffer_stats(InCSetState dest);
630
631 // Determines PLAB size for a given destination.
632 inline size_t desired_plab_sz(InCSetState dest);
633
634 inline AllocationContextStats& allocation_context_stats();
635
636 // Do anything common to GC's.
637 void gc_prologue(bool full);
638 void gc_epilogue(bool full);
639
640 inline void set_humongous_is_live(oop obj);
641
642 bool humongous_is_live(uint region) {
643 return _humongous_is_live.is_live(region);
644 }
645
646 // Returns whether the given region (which must be a humongous (start) region)
647 // is to be considered conservatively live regardless of any other conditions.
648 bool humongous_region_is_always_live(uint index);
649 // Returns whether the given region (which must be a humongous (start) region)
650 // is considered a candidate for eager reclamation.
651 bool humongous_region_is_candidate(uint index);
652 // Register the given region to be part of the collection set.
653 inline void register_humongous_region_with_cset(uint index);
654 // Register regions with humongous objects (actually on the start region) in
655 // the in_cset_fast_test table.
656 void register_humongous_regions_with_cset();
657 // We register a region with the fast "in collection set" test. We
658 // simply set to true the array slot corresponding to this region.
987 // update the remembered sets of the regions in the collection
988 // set in the event of an evacuation failure.
989 DirtyCardQueueSet& into_cset_dirty_card_queue_set()
990 { return _into_cset_dirty_card_queue_set; }
991
992 // Create a G1CollectedHeap with the specified policy.
993 // Must call the initialize method afterwards.
994 // May not return if something goes wrong.
995 G1CollectedHeap(G1CollectorPolicy* policy);
996
997 // Initialize the G1CollectedHeap to have the initial and
998 // maximum sizes and remembered and barrier sets
999 // specified by the policy object.
1000 jint initialize();
1001
1002 virtual void stop();
1003
1004 // Return the (conservative) maximum heap alignment for any G1 heap
1005 static size_t conservative_max_heap_alignment();
1006
1007 // Does operations required after initialization has been done.
1008 void post_initialize();
1009
1010 // Initialize weak reference processing.
1011 virtual void ref_processing_init();
1012
1013 // Explicitly import set_par_threads into this scope
1014 using SharedHeap::set_par_threads;
1015 // Set _n_par_threads according to a policy TBD.
1016 void set_par_threads();
1017
1018 virtual CollectedHeap::Name kind() const {
1019 return CollectedHeap::G1CollectedHeap;
1020 }
1021
1022 // The current policy object for the collector.
1023 G1CollectorPolicy* g1_policy() const { return _g1_policy; }
1024
1025 virtual CollectorPolicy* collector_policy() const { return (CollectorPolicy*) g1_policy(); }
1026
1027 // Adaptive size policy. No such thing for g1.
1028 virtual AdaptiveSizePolicy* size_policy() { return NULL; }
1029
1104 #ifndef PRODUCT
1105 // Make sure that the given bitmap has no marked objects in the
1106 // range [from,limit). If it does, print an error message and return
1107 // false. Otherwise, just return true. bitmap_name should be "prev"
1108 // or "next".
1109 bool verify_no_bits_over_tams(const char* bitmap_name, CMBitMapRO* bitmap,
1110 HeapWord* from, HeapWord* limit);
1111
1112 // Verify that the prev / next bitmap range [tams,end) for the given
1113 // region has no marks. Return true if all is well, false if errors
1114 // are detected.
1115 bool verify_bitmaps(const char* caller, HeapRegion* hr);
1116
1117 // True if the heap_lock is held by the a non-gc thread invoking a gc
1118 // operation.
1119 bool _thread_holds_heap_lock_for_gc;
1120 // Returns true if the calling thread holds the heap lock,
1121 // or the calling thread is a par gc thread and the heap_lock is held
1122 // by the vm thread doing a gc operation.
1123 bool heap_lock_held_for_gc();
1124 #endif // PRODUCT
1125
1126 void set_heap_lock_held_for_gc(bool value) PRODUCT_RETURN;
1127
1128 // If G1VerifyBitmaps is set, verify that the marking bitmaps for
1129 // the given region do not have any spurious marks. If errors are
1130 // detected, print appropriate error messages and crash.
1131 void check_bitmaps(const char* caller, HeapRegion* hr) PRODUCT_RETURN;
1132
1133 // If G1VerifyBitmaps is set, verify that the marking bitmaps do not
1134 // have any spurious marks. If errors are detected, print
1135 // appropriate error messages and crash.
1136 void check_bitmaps(const char* caller) PRODUCT_RETURN;
1137
1138 // Do sanity check on the contents of the in-cset fast test table.
1139 bool check_cset_fast_test() PRODUCT_RETURN_( return true; );
1140
1141 // verify_region_sets() performs verification over the region
1142 // lists. It will be compiled in the product code to be used when
1143 // necessary (i.e., during heap verification).
1144 void verify_region_sets();
1145
1146 // verify_region_sets_optional() is planted in the code for
1147 // list verification in non-product builds (and it can be enabled in
1262 return barrier_set_cast<G1SATBCardTableLoggingModRefBS>(barrier_set());
1263 }
1264
1265 // This resets the card table to all zeros. It is used after
1266 // a collection pause which used the card table to claim cards.
1267 void cleanUpCardTable();
1268
1269 // Iteration functions.
1270
1271 // Iterate over all the ref-containing fields of all objects, calling
1272 // "cl.do_oop" on each.
1273 virtual void oop_iterate(ExtendedOopClosure* cl);
1274
1275 // Iterate over all objects, calling "cl.do_object" on each.
1276 virtual void object_iterate(ObjectClosure* cl);
1277
1278 virtual void safe_object_iterate(ObjectClosure* cl) {
1279 object_iterate(cl);
1280 }
1281
1282 // Iterate over heap regions, in address order, terminating the
1283 // iteration early if the "doHeapRegion" method returns "true".
1284 void heap_region_iterate(HeapRegionClosure* blk) const;
1285
1286 // Return the region with the given index. It assumes the index is valid.
1287 inline HeapRegion* region_at(uint index) const;
1288
1289 // Calculate the region index of the given address. Given address must be
1290 // within the heap.
1291 inline uint addr_to_region(HeapWord* addr) const;
1292
1293 inline HeapWord* bottom_addr_for_region(uint index) const;
1294
1295 // Iterate over the heap regions in parallel. Assumes that this will be called
1296 // in parallel by ParallelGCThreads worker threads with distinct worker ids
1297 // in the range [0..max(ParallelGCThreads-1, 1)]. Applies "blk->doHeapRegion"
1298 // to each of the regions, by attempting to claim the region using the
1299 // HeapRegionClaimer and, if successful, applying the closure to the claimed
1300 // region. The concurrent argument should be set to true if iteration is
1301 // performed concurrently, during which no assumptions are made for consistent
1306 bool concurrent = false) const;
1307
1308 // Clear the cached cset start regions and (more importantly)
1309 // the time stamps. Called when we reset the GC time stamp.
1310 void clear_cset_start_regions();
1311
1312 // Given the id of a worker, obtain or calculate a suitable
1313 // starting region for iterating over the current collection set.
1314 HeapRegion* start_cset_region_for_worker(uint worker_i);
1315
1316 // Iterate over the regions (if any) in the current collection set.
1317 void collection_set_iterate(HeapRegionClosure* blk);
1318
1319 // As above but starting from region r
1320 void collection_set_iterate_from(HeapRegion* r, HeapRegionClosure *blk);
1321
1322 HeapRegion* next_compaction_region(const HeapRegion* from) const;
1323
1324 // A CollectedHeap will contain some number of spaces. This finds the
1325 // space containing a given address, or else returns NULL.
1326 Space* space_containing(const void* addr) const;
1327
1328 // Returns the HeapRegion that contains addr. addr must not be NULL.
1329 template <class T>
1330 inline HeapRegion* heap_region_containing_raw(const T addr) const;
1331
1332 // Returns the HeapRegion that contains addr. addr must not be NULL.
1333 // If addr is within a humongous continues region, it returns its humongous start region.
1334 template <class T>
1335 inline HeapRegion* heap_region_containing(const T addr) const;
1336
1337 // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
1338 // each address in the (reserved) heap is a member of exactly
1339 // one block. The defining characteristic of a block is that it is
1340 // possible to find its size, and thus to progress forward to the next
1341 // block. (Blocks may be of different sizes.) Thus, blocks may
1342 // represent Java objects, or they might be free blocks in a
1343 // free-list-based heap (or subheap), as long as the two kinds are
1344 // distinguishable and the size of each is determinable.
1345
1346 // Returns the address of the start of the "block" that contains the
|