1161 // Perform a collection of the heap; intended for use in implementing
1162 // "System.gc". This probably implies as full a collection as the
1163 // "CollectedHeap" supports.
1164 virtual void collect(GCCause::Cause cause);
1165
1166 // The same as above but assume that the caller holds the Heap_lock.
1167 void collect_locked(GCCause::Cause cause);
1168
1169 virtual bool copy_allocation_context_stats(const jint* contexts,
1170 jlong* totals,
1171 jbyte* accuracy,
1172 jint len);
1173
1174 // True iff an evacuation has failed in the most-recent collection.
1175 bool evacuation_failed() { return _evacuation_failed; }
1176
1177 void remove_from_old_sets(const HeapRegionSetCount& old_regions_removed, const HeapRegionSetCount& humongous_regions_removed);
1178 void prepend_to_freelist(FreeRegionList* list);
1179 void decrement_summary_bytes(size_t bytes);
1180
1181 // Returns "TRUE" iff "p" points into the committed areas of the heap.
1182 virtual bool is_in(const void* p) const;
1183 #ifdef ASSERT
1184 // Returns whether p is in one of the available areas of the heap. Slow but
1185 // extensive version.
1186 bool is_in_exact(const void* p) const;
1187 #endif
1188
1189 // Return "TRUE" iff the given object address is within the collection
1190 // set. Slow implementation.
1191 bool obj_in_cs(oop obj);
1192
1193 inline bool is_in_cset(const HeapRegion *hr);
1194 inline bool is_in_cset(oop obj);
1195
1196 inline bool is_in_cset_or_humongous(const oop obj);
1197
1198 private:
1199 // This array is used for a quick test on whether a reference points into
1200 // the collection set or not. Each of the array's elements denotes whether the
1201 // corresponding region is in the collection set or not.
1226 // This resets the card table to all zeros. It is used after
1227 // a collection pause which used the card table to claim cards.
1228 void cleanUpCardTable();
1229
1230 // Iteration functions.
1231
1232 // Iterate over all objects, calling "cl.do_object" on each.
1233 virtual void object_iterate(ObjectClosure* cl);
1234
1235 virtual void safe_object_iterate(ObjectClosure* cl) {
1236 object_iterate(cl);
1237 }
1238
1239 // Iterate over heap regions, in address order, terminating the
1240 // iteration early if the "doHeapRegion" method returns "true".
1241 void heap_region_iterate(HeapRegionClosure* blk) const;
1242
1243 // Return the region with the given index. It assumes the index is valid.
1244 inline HeapRegion* region_at(uint index) const;
1245
1246 // Calculate the region index of the given address. Given address must be
1247 // within the heap.
1248 inline uint addr_to_region(HeapWord* addr) const;
1249
1250 inline HeapWord* bottom_addr_for_region(uint index) const;
1251
1252 // Iterate over the heap regions in parallel. Assumes that this will be called
1253 // in parallel by ParallelGCThreads worker threads with distinct worker ids
1254 // in the range [0..max(ParallelGCThreads-1, 1)]. Applies "blk->doHeapRegion"
1255 // to each of the regions, by attempting to claim the region using the
1256 // HeapRegionClaimer and, if successful, applying the closure to the claimed
1257 // region. The concurrent argument should be set to true if iteration is
1258 // performed concurrently, during which no assumptions are made for consistent
1259 // attributes of the heap regions (as they might be modified while iterating).
1260 void heap_region_par_iterate(HeapRegionClosure* cl,
1261 uint worker_id,
1262 HeapRegionClaimer* hrclaimer,
1263 bool concurrent = false) const;
1264
1265 // Clear the cached cset start regions and (more importantly)
1266 // the time stamps. Called when we reset the GC time stamp.
1267 void clear_cset_start_regions();
1268
1269 // Given the id of a worker, obtain or calculate a suitable
1270 // starting region for iterating over the current collection set.
1271 HeapRegion* start_cset_region_for_worker(uint worker_i);
1272
1273 // Iterate over the regions (if any) in the current collection set.
1274 void collection_set_iterate(HeapRegionClosure* blk);
1275
1276 // As above but starting from region r
1277 void collection_set_iterate_from(HeapRegion* r, HeapRegionClosure *blk);
1278
1279 HeapRegion* next_compaction_region(const HeapRegion* from) const;
1280
1281 // Returns the HeapRegion that contains addr. addr must not be NULL.
1282 template <class T>
1283 inline HeapRegion* heap_region_containing_raw(const T addr) const;
1284
1285 // Returns the HeapRegion that contains addr. addr must not be NULL.
1286 // If addr is within a humongous continues region, it returns its humongous start region.
1287 template <class T>
1288 inline HeapRegion* heap_region_containing(const T addr) const;
1289
1290 // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
1291 // each address in the (reserved) heap is a member of exactly
1292 // one block. The defining characteristic of a block is that it is
1293 // possible to find its size, and thus to progress forward to the next
1294 // block. (Blocks may be of different sizes.) Thus, blocks may
1295 // represent Java objects, or they might be free blocks in a
1296 // free-list-based heap (or subheap), as long as the two kinds are
1297 // distinguishable and the size of each is determinable.
1298
1299 // Returns the address of the start of the "block" that contains the
1300 // address "addr". We say "blocks" instead of "object" since some heaps
1301 // may not pack objects densely; a chunk may either be an object or a
1302 // non-object.
1303 virtual HeapWord* block_start(const void* addr) const;
1304
1305 // Requires "addr" to be the start of a chunk, and returns its size.
1306 // "addr + size" is required to be the start of a new chunk, or the end
|
1161 // Perform a collection of the heap; intended for use in implementing
1162 // "System.gc". This probably implies as full a collection as the
1163 // "CollectedHeap" supports.
1164 virtual void collect(GCCause::Cause cause);
1165
1166 // The same as above but assume that the caller holds the Heap_lock.
1167 void collect_locked(GCCause::Cause cause);
1168
1169 virtual bool copy_allocation_context_stats(const jint* contexts,
1170 jlong* totals,
1171 jbyte* accuracy,
1172 jint len);
1173
1174 // True iff an evacuation has failed in the most-recent collection.
1175 bool evacuation_failed() { return _evacuation_failed; }
1176
1177 void remove_from_old_sets(const HeapRegionSetCount& old_regions_removed, const HeapRegionSetCount& humongous_regions_removed);
1178 void prepend_to_freelist(FreeRegionList* list);
1179 void decrement_summary_bytes(size_t bytes);
1180
1181 virtual bool is_in(const void* p) const;
1182 #ifdef ASSERT
1183 // Returns whether p is in one of the available areas of the heap. Slow but
1184 // extensive version.
1185 bool is_in_exact(const void* p) const;
1186 #endif
1187
1188 // Return "TRUE" iff the given object address is within the collection
1189 // set. Slow implementation.
1190 bool obj_in_cs(oop obj);
1191
1192 inline bool is_in_cset(const HeapRegion *hr);
1193 inline bool is_in_cset(oop obj);
1194
1195 inline bool is_in_cset_or_humongous(const oop obj);
1196
1197 private:
1198 // This array is used for a quick test on whether a reference points into
1199 // the collection set or not. Each of the array's elements denotes whether the
1200 // corresponding region is in the collection set or not.
1225 // This resets the card table to all zeros. It is used after
1226 // a collection pause which used the card table to claim cards.
1227 void cleanUpCardTable();
1228
1229 // Iteration functions.
1230
1231 // Iterate over all objects, calling "cl.do_object" on each.
1232 virtual void object_iterate(ObjectClosure* cl);
1233
1234 virtual void safe_object_iterate(ObjectClosure* cl) {
1235 object_iterate(cl);
1236 }
1237
1238 // Iterate over heap regions, in address order, terminating the
1239 // iteration early if the "doHeapRegion" method returns "true".
1240 void heap_region_iterate(HeapRegionClosure* blk) const;
1241
1242 // Return the region with the given index. It assumes the index is valid.
1243 inline HeapRegion* region_at(uint index) const;
1244
1245 // Return the next region (by index) that is part of the same
1246 // humongous object that hr is part of.
1247 inline HeapRegion* next_region_in_humongous(HeapRegion* hr) const;
1248
1249 // Calculate the region index of the given address. Given address must be
1250 // within the heap.
1251 inline uint addr_to_region(HeapWord* addr) const;
1252
1253 inline HeapWord* bottom_addr_for_region(uint index) const;
1254
1255 // Iterate over the heap regions in parallel. Assumes that this will be called
1256 // in parallel by ParallelGCThreads worker threads with distinct worker ids
1257 // in the range [0..max(ParallelGCThreads-1, 1)]. Applies "blk->doHeapRegion"
1258 // to each of the regions, by attempting to claim the region using the
1259 // HeapRegionClaimer and, if successful, applying the closure to the claimed
1260 // region. The concurrent argument should be set to true if iteration is
1261 // performed concurrently, during which no assumptions are made for consistent
1262 // attributes of the heap regions (as they might be modified while iterating).
1263 void heap_region_par_iterate(HeapRegionClosure* cl,
1264 uint worker_id,
1265 HeapRegionClaimer* hrclaimer,
1266 bool concurrent = false) const;
1267
1268 // Clear the cached cset start regions and (more importantly)
1269 // the time stamps. Called when we reset the GC time stamp.
1270 void clear_cset_start_regions();
1271
1272 // Given the id of a worker, obtain or calculate a suitable
1273 // starting region for iterating over the current collection set.
1274 HeapRegion* start_cset_region_for_worker(uint worker_i);
1275
1276 // Iterate over the regions (if any) in the current collection set.
1277 void collection_set_iterate(HeapRegionClosure* blk);
1278
1279 // As above but starting from region r
1280 void collection_set_iterate_from(HeapRegion* r, HeapRegionClosure *blk);
1281
1282 HeapRegion* next_compaction_region(const HeapRegion* from) const;
1283
1284 // Returns the HeapRegion that contains addr. addr must not be NULL.
1285 template <class T>
1286 inline HeapRegion* heap_region_containing(const T addr) const;
1287
1288 // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
1289 // each address in the (reserved) heap is a member of exactly
1290 // one block. The defining characteristic of a block is that it is
1291 // possible to find its size, and thus to progress forward to the next
1292 // block. (Blocks may be of different sizes.) Thus, blocks may
1293 // represent Java objects, or they might be free blocks in a
1294 // free-list-based heap (or subheap), as long as the two kinds are
1295 // distinguishable and the size of each is determinable.
1296
1297 // Returns the address of the start of the "block" that contains the
1298 // address "addr". We say "blocks" instead of "object" since some heaps
1299 // may not pack objects densely; a chunk may either be an object or a
1300 // non-object.
1301 virtual HeapWord* block_start(const void* addr) const;
1302
1303 // Requires "addr" to be the start of a chunk, and returns its size.
1304 // "addr + size" is required to be the start of a new chunk, or the end
|