1191 }
1192
1193 virtual bool is_in_closed_subset(const void* p) const;
1194
1195 G1SATBCardTableLoggingModRefBS* g1_barrier_set() {
1196 return barrier_set_cast<G1SATBCardTableLoggingModRefBS>(barrier_set());
1197 }
1198
1199 G1HotCardCache* g1_hot_card_cache() const { return _hot_card_cache; }
1200
1201 // Iteration functions.
1202
1203 // Iterate over all objects, calling "cl.do_object" on each.
1204 virtual void object_iterate(ObjectClosure* cl);
1205
1206 virtual void safe_object_iterate(ObjectClosure* cl) {
1207 object_iterate(cl);
1208 }
1209
1210 // Iterate over heap regions, in address order, terminating the
1211 // iteration early if the "do_heap_region" method returns "true".
1212 void heap_region_iterate(HeapRegionClosure* blk) const;
1213
1214 // Return the region with the given index. It assumes the index is valid.
1215 inline HeapRegion* region_at(uint index) const;
1216
1217 // Return the next region (by index) that is part of the same
1218 // humongous object that hr is part of.
1219 inline HeapRegion* next_region_in_humongous(HeapRegion* hr) const;
1220
1221 // Calculate the region index of the given address. Given address must be
1222 // within the heap.
1223 inline uint addr_to_region(HeapWord* addr) const;
1224
1225 inline HeapWord* bottom_addr_for_region(uint index) const;
1226
1227 // Two functions to iterate over the heap regions in parallel. Threads
1228 // compete using the HeapRegionClaimer to claim the regions before
1229 // applying the closure on them.
1230 // The _from_worker_offset version uses the HeapRegionClaimer and
1231 // the worker id to calculate a start offset to prevent all workers to
|
1191 }
1192
1193 virtual bool is_in_closed_subset(const void* p) const;
1194
1195 G1SATBCardTableLoggingModRefBS* g1_barrier_set() {
1196 return barrier_set_cast<G1SATBCardTableLoggingModRefBS>(barrier_set());
1197 }
1198
1199 G1HotCardCache* g1_hot_card_cache() const { return _hot_card_cache; }
1200
1201 // Iteration functions.
1202
1203 // Iterate over all objects, calling "cl.do_object" on each.
1204 virtual void object_iterate(ObjectClosure* cl);
1205
1206 virtual void safe_object_iterate(ObjectClosure* cl) {
1207 object_iterate(cl);
1208 }
1209
1210 // Iterate over heap regions, in address order, terminating the
1211 // iteration early if the "doHeapRegion" method returns "true".
1212 void heap_region_iterate(HeapRegionClosure* blk) const;
1213
1214 // Return the region with the given index. It assumes the index is valid.
1215 inline HeapRegion* region_at(uint index) const;
1216
1217 // Return the next region (by index) that is part of the same
1218 // humongous object that hr is part of.
1219 inline HeapRegion* next_region_in_humongous(HeapRegion* hr) const;
1220
1221 // Calculate the region index of the given address. Given address must be
1222 // within the heap.
1223 inline uint addr_to_region(HeapWord* addr) const;
1224
1225 inline HeapWord* bottom_addr_for_region(uint index) const;
1226
1227 // Two functions to iterate over the heap regions in parallel. Threads
1228 // compete using the HeapRegionClaimer to claim the regions before
1229 // applying the closure on them.
1230 // The _from_worker_offset version uses the HeapRegionClaimer and
1231 // the worker id to calculate a start offset to prevent all workers to
|