153 static void reset_recycled_stack_index() {
154 _recycled_bottom = _recycled_top = -1;
155 }
156
157 ParCompactionManager();
158 ~ParCompactionManager();
159
160 // Pushes onto the region stack at the given index. If the
161 // region stack is full,
162 // pushes onto the region overflow stack.
163 static void region_list_push(uint stack_index, size_t region_index);
164 static void verify_region_list_empty(uint stack_index);
165 ParMarkBitMap* mark_bitmap() { return _mark_bitmap; }
166
167 // void drain_stacks();
168
169 bool should_update();
170 bool should_copy();
171
172 // Save for later processing. Must not fail.
173 inline void push(oop obj) { _marking_stack.push(obj); }
174 inline void push_objarray(oop objarray, size_t index);
175 inline void push_region(size_t index);
176
177 // Access function for compaction managers
178 static ParCompactionManager* gc_thread_compaction_manager(int index);
179
180 static bool steal(int queue_num, int* seed, oop& t) {
181 return stack_array()->steal(queue_num, seed, t);
182 }
183
184 static bool steal_objarray(int queue_num, int* seed, ObjArrayTask& t) {
185 return _objarray_queues->steal(queue_num, seed, t);
186 }
187
188 static bool steal(int queue_num, int* seed, size_t& region) {
189 return region_array()->steal(queue_num, seed, region);
190 }
191
192 // Process tasks remaining on any marking stack
193 void follow_marking_stacks();
194 inline bool marking_stacks_empty() const;
195
196 // Process tasks remaining on any stack
197 void drain_region_stacks();
198
199 void follow_contents(oop obj);
200 void follow_contents(objArrayOop array, int index);
201
202 void update_contents(oop obj);
203 };
204
205 inline ParCompactionManager* ParCompactionManager::manager_array(int index) {
206 assert(_manager_array != NULL, "access of NULL manager_array");
207 assert(index >= 0 && index <= (int)ParallelGCThreads,
208 "out of range manager_array access");
209 return _manager_array[index];
210 }
211
212 bool ParCompactionManager::marking_stacks_empty() const {
213 return _marking_stack.is_empty() && _objarray_stack.is_empty();
214 }
215
216 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSCOMPACTIONMANAGER_HPP
|
153 static void reset_recycled_stack_index() {
154 _recycled_bottom = _recycled_top = -1;
155 }
156
157 ParCompactionManager();
158 ~ParCompactionManager();
159
160 // Pushes onto the region stack at the given index. If the
161 // region stack is full,
162 // pushes onto the region overflow stack.
163 static void region_list_push(uint stack_index, size_t region_index);
164 static void verify_region_list_empty(uint stack_index);
165 ParMarkBitMap* mark_bitmap() { return _mark_bitmap; }
166
167 // void drain_stacks();
168
169 bool should_update();
170 bool should_copy();
171
172 // Save for later processing. Must not fail.
173 inline void push(oop obj);
174 inline void push_objarray(oop objarray, size_t index);
175 inline void push_region(size_t index);
176
177 template <typename T>
178 inline void mark_and_push(T* p);
179
180 inline void follow_klass(Klass* klass);
181
182 // Access function for compaction managers
183 static ParCompactionManager* gc_thread_compaction_manager(int index);
184
185 static bool steal(int queue_num, int* seed, oop& t) {
186 return stack_array()->steal(queue_num, seed, t);
187 }
188
189 static bool steal_objarray(int queue_num, int* seed, ObjArrayTask& t) {
190 return _objarray_queues->steal(queue_num, seed, t);
191 }
192
193 static bool steal(int queue_num, int* seed, size_t& region) {
194 return region_array()->steal(queue_num, seed, region);
195 }
196
197 // Process tasks remaining on any marking stack
198 void follow_marking_stacks();
199 inline bool marking_stacks_empty() const;
200
201 // Process tasks remaining on any stack
202 void drain_region_stacks();
203
204 void follow_contents(oop obj);
205 void follow_contents(objArrayOop array, int index);
206
207 void update_contents(oop obj);
208
209 class MarkAndPushClosure: public ExtendedOopClosure {
210 private:
211 ParCompactionManager* _compaction_manager;
212 public:
213 MarkAndPushClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
214
215 template <typename T> void do_oop_nv(T* p);
216 virtual void do_oop(oop* p);
217 virtual void do_oop(narrowOop* p);
218
219 // This closure provides its own oop verification code.
220 debug_only(virtual bool should_verify_oops() { return false; })
221 };
222
223 class FollowStackClosure: public VoidClosure {
224 private:
225 ParCompactionManager* _compaction_manager;
226 public:
227 FollowStackClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
228 virtual void do_void();
229 };
230
231 // The one and only place to start following the classes.
232 // Should only be applied to the ClassLoaderData klasses list.
233 class FollowKlassClosure : public KlassClosure {
234 private:
235 MarkAndPushClosure* _mark_and_push_closure;
236 public:
237 FollowKlassClosure(MarkAndPushClosure* mark_and_push_closure) :
238 _mark_and_push_closure(mark_and_push_closure) { }
239 void do_klass(Klass* klass);
240 };
241 };
242
243 inline ParCompactionManager* ParCompactionManager::manager_array(int index) {
244 assert(_manager_array != NULL, "access of NULL manager_array");
245 assert(index >= 0 && index <= (int)ParallelGCThreads,
246 "out of range manager_array access");
247 return _manager_array[index];
248 }
249
250 bool ParCompactionManager::marking_stacks_empty() const {
251 return _marking_stack.is_empty() && _objarray_stack.is_empty();
252 }
253
254 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSCOMPACTIONMANAGER_HPP
|