57 // categories. In particular, support for concurrent iteration by the garbage
58 // collector, under certain restrictions, is required. Further, it must not
59 // block nor be blocked by other operations for long periods.
60 //
61 // Internally, OopStorage is a set of Block objects, from which entries are
62 // allocated and released. A block contains an oop[] and a bitmask indicating
63 // which entries are in use (have been allocated and not yet released). New
64 // blocks are constructed and added to the storage object when an entry
65 // allocation request is made and there are no blocks with unused entries.
66 // Blocks may be removed and deleted when empty.
67 //
68 // There are two important (and somewhat intertwined) protocols governing
69 // concurrent access to a storage object. These are the Concurrent Iteration
70 // Protocol and the Allocation Protocol. See the ParState class for a
71 // discussion of concurrent iteration and the management of thread
72 // interactions for this protocol. Similarly, see the allocate() function for
73 // a discussion of allocation.
74
75 class OopStorage : public CHeapObj<mtGC> {
76 public:
77 OopStorage(const char* name, Mutex* allocation_mutex, Mutex* active_mutex);
78 ~OopStorage();
79
80 // These count and usage accessors are racy unless at a safepoint.
81
82 // The number of allocated and not yet released entries.
83 size_t allocation_count() const;
84
85 // The number of blocks of entries. Useful for sizing parallel iteration.
86 size_t block_count() const;
87
88 // Total number of blocks * memory allocation per block, plus
89 // bookkeeping overhead, including this storage object.
90 size_t total_memory_usage() const;
91
92 enum EntryStatus {
93 INVALID_ENTRY,
94 UNALLOCATED_ENTRY,
95 ALLOCATED_ENTRY
96 };
97
134 // - closure->do_oop(p) must be a valid expression whose value is ignored.
135 //
136 // - is_alive->do_object_b(*p) must be a valid expression whose value is
137 // convertible to bool.
138 //
139 // For weak_oops_do, if *p == NULL then neither is_alive nor closure will be
140 // invoked for p. If is_alive->do_object_b(*p) is false, then closure will
141 // not be invoked on p, and *p will be set to NULL.
142
143 template<typename Closure> inline void oops_do(Closure* closure);
144 template<typename Closure> inline void oops_do(Closure* closure) const;
145 template<typename Closure> inline void weak_oops_do(Closure* closure);
146
147 template<typename IsAliveClosure, typename Closure>
148 inline void weak_oops_do(IsAliveClosure* is_alive, Closure* closure);
149
150 // Parallel iteration is for the exclusive use of the GC.
151 // Other clients must use serial iteration.
152 template<bool concurrent, bool is_const> class ParState;
153
154 // Service thread cleanup support.
155
156 // Called by the service thread to process any pending cleanups for this
157 // storage object. Drains the _deferred_updates list, and deletes empty
158 // blocks. Stops deleting if there is an in-progress concurrent
159 // iteration. Locks both the _allocation_mutex and the _active_mutex, and
160 // may safepoint. Deletion may be throttled, with only some available
161 // work performed, in order to allow other Service thread subtasks to run.
162 // Returns true if there may be more work to do, false if nothing to do.
163 bool delete_empty_blocks();
164
165 // Called by safepoint cleanup to notify the service thread (via
166 // Service_lock) that there may be some OopStorage objects with pending
167 // cleanups to process.
168 static void trigger_cleanup_if_needed();
169
170 // Called by the service thread (while holding Service_lock) to to test
171 // for pending cleanup requests, and resets the request state to allow
172 // recognition of new requests. Returns true if there was a pending
173 // request.
217
218 private:
219 const char* _name;
220 ActiveArray* _active_array;
221 AllocationList _allocation_list;
222 Block* volatile _deferred_updates;
223 Mutex* _allocation_mutex;
224 Mutex* _active_mutex;
225
226 // Volatile for racy unlocked accesses.
227 volatile size_t _allocation_count;
228
229 // Protection for _active_array.
230 mutable SingleWriterSynchronizer _protect_active;
231
232 // mutable because this gets set even for const iteration.
233 mutable int _concurrent_iteration_count;
234
235 volatile bool _needs_cleanup;
236
237 bool try_add_block();
238 Block* block_for_allocation();
239
240 Block* find_block_or_null(const oop* ptr) const;
241 void delete_empty_block(const Block& block);
242 bool reduce_deferred_updates();
243 void record_needs_cleanup();
244
245 // Managing _active_array.
246 bool expand_active_array();
247 void replace_active_array(ActiveArray* new_array);
248 ActiveArray* obtain_active_array() const;
249 void relinquish_block_array(ActiveArray* array) const;
250 class WithActiveArray; // RAII helper for active array access.
251
252 template<typename F, typename Storage>
253 static bool iterate_impl(F f, Storage* storage);
254
255 // Implementation support for parallel iteration
256 class BasicParState;
|
57 // categories. In particular, support for concurrent iteration by the garbage
58 // collector, under certain restrictions, is required. Further, it must not
59 // block nor be blocked by other operations for long periods.
60 //
61 // Internally, OopStorage is a set of Block objects, from which entries are
62 // allocated and released. A block contains an oop[] and a bitmask indicating
63 // which entries are in use (have been allocated and not yet released). New
64 // blocks are constructed and added to the storage object when an entry
65 // allocation request is made and there are no blocks with unused entries.
66 // Blocks may be removed and deleted when empty.
67 //
68 // There are two important (and somewhat intertwined) protocols governing
69 // concurrent access to a storage object. These are the Concurrent Iteration
70 // Protocol and the Allocation Protocol. See the ParState class for a
71 // discussion of concurrent iteration and the management of thread
72 // interactions for this protocol. Similarly, see the allocate() function for
73 // a discussion of allocation.
74
75 class OopStorage : public CHeapObj<mtGC> {
76 public:
77 // GC notification support.
78 typedef void (*NotificationFunction)(size_t dead_count);
79
80 OopStorage(const char* name, Mutex* allocation_mutex, Mutex* active_mutex, NotificationFunction f = NULL);
81 ~OopStorage();
82
83 // These count and usage accessors are racy unless at a safepoint.
84
85 // The number of allocated and not yet released entries.
86 size_t allocation_count() const;
87
88 // The number of blocks of entries. Useful for sizing parallel iteration.
89 size_t block_count() const;
90
91 // Total number of blocks * memory allocation per block, plus
92 // bookkeeping overhead, including this storage object.
93 size_t total_memory_usage() const;
94
95 enum EntryStatus {
96 INVALID_ENTRY,
97 UNALLOCATED_ENTRY,
98 ALLOCATED_ENTRY
99 };
100
137 // - closure->do_oop(p) must be a valid expression whose value is ignored.
138 //
139 // - is_alive->do_object_b(*p) must be a valid expression whose value is
140 // convertible to bool.
141 //
142 // For weak_oops_do, if *p == NULL then neither is_alive nor closure will be
143 // invoked for p. If is_alive->do_object_b(*p) is false, then closure will
144 // not be invoked on p, and *p will be set to NULL.
145
146 template<typename Closure> inline void oops_do(Closure* closure);
147 template<typename Closure> inline void oops_do(Closure* closure) const;
148 template<typename Closure> inline void weak_oops_do(Closure* closure);
149
150 template<typename IsAliveClosure, typename Closure>
151 inline void weak_oops_do(IsAliveClosure* is_alive, Closure* closure);
152
153 // Parallel iteration is for the exclusive use of the GC.
154 // Other clients must use serial iteration.
155 template<bool concurrent, bool is_const> class ParState;
156
157 // This function is called by the GC to notify the registered notification function.
158 void notify(size_t num_dead) const;
159
160 // Checks if the given OopStorage has an associated notification function for the GC.
161 bool can_notify() const;
162
163 // Service thread cleanup support.
164
165 // Called by the service thread to process any pending cleanups for this
166 // storage object. Drains the _deferred_updates list, and deletes empty
167 // blocks. Stops deleting if there is an in-progress concurrent
168 // iteration. Locks both the _allocation_mutex and the _active_mutex, and
169 // may safepoint. Deletion may be throttled, with only some available
170 // work performed, in order to allow other Service thread subtasks to run.
171 // Returns true if there may be more work to do, false if nothing to do.
172 bool delete_empty_blocks();
173
174 // Called by safepoint cleanup to notify the service thread (via
175 // Service_lock) that there may be some OopStorage objects with pending
176 // cleanups to process.
177 static void trigger_cleanup_if_needed();
178
179 // Called by the service thread (while holding Service_lock) to to test
180 // for pending cleanup requests, and resets the request state to allow
181 // recognition of new requests. Returns true if there was a pending
182 // request.
226
227 private:
228 const char* _name;
229 ActiveArray* _active_array;
230 AllocationList _allocation_list;
231 Block* volatile _deferred_updates;
232 Mutex* _allocation_mutex;
233 Mutex* _active_mutex;
234
235 // Volatile for racy unlocked accesses.
236 volatile size_t _allocation_count;
237
238 // Protection for _active_array.
239 mutable SingleWriterSynchronizer _protect_active;
240
241 // mutable because this gets set even for const iteration.
242 mutable int _concurrent_iteration_count;
243
244 volatile bool _needs_cleanup;
245
246 NotificationFunction _notification_function;
247
248 bool try_add_block();
249 Block* block_for_allocation();
250
251 Block* find_block_or_null(const oop* ptr) const;
252 void delete_empty_block(const Block& block);
253 bool reduce_deferred_updates();
254 void record_needs_cleanup();
255
256 // Managing _active_array.
257 bool expand_active_array();
258 void replace_active_array(ActiveArray* new_array);
259 ActiveArray* obtain_active_array() const;
260 void relinquish_block_array(ActiveArray* array) const;
261 class WithActiveArray; // RAII helper for active array access.
262
263 template<typename F, typename Storage>
264 static bool iterate_impl(F f, Storage* storage);
265
266 // Implementation support for parallel iteration
267 class BasicParState;
|