241 bool should_yield,
242 bool concurrent_precleaning);
243 virtual void do_oop(oop* p);
244 virtual void do_oop(narrowOop* p);
245 inline void do_oop_nv(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
246 inline void do_oop_nv(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
247
248 Prefetch::style prefetch_style() {
249 return Prefetch::do_read;
250 }
251 void set_freelistLock(Mutex* m) {
252 _freelistLock = m;
253 }
254
255 private:
256 inline void do_yield_check();
257 void do_yield_work();
258 bool take_from_overflow_list();
259 };
260
261 // Tn this, the parallel avatar of MarkRefsIntoAndScanClosure, the revisit
262 // stack and the bitMap are shared, so access needs to be suitably
263 // sycnhronized. An OopTaskQueue structure, supporting efficient
264 // workstealing, replaces a CMSMarkStack for storing grey objects.
265 class Par_MarkRefsIntoAndScanClosure: public CMSOopsInGenClosure {
266 private:
267 MemRegion _span;
268 CMSBitMap* _bit_map;
269 OopTaskQueue* _work_queue;
270 const uint _low_water_mark;
271 Par_PushAndMarkClosure _par_pushAndMarkClosure;
272 protected:
273 DO_OOP_WORK_DEFN
274 public:
275 Par_MarkRefsIntoAndScanClosure(CMSCollector* collector,
276 MemRegion span,
277 ReferenceProcessor* rp,
278 CMSBitMap* bit_map,
279 OopTaskQueue* work_queue);
280 virtual void do_oop(oop* p);
281 virtual void do_oop(narrowOop* p);
282 inline void do_oop_nv(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
283 inline void do_oop_nv(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
284
|
241 bool should_yield,
242 bool concurrent_precleaning);
243 virtual void do_oop(oop* p);
244 virtual void do_oop(narrowOop* p);
245 inline void do_oop_nv(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
246 inline void do_oop_nv(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
247
248 Prefetch::style prefetch_style() {
249 return Prefetch::do_read;
250 }
251 void set_freelistLock(Mutex* m) {
252 _freelistLock = m;
253 }
254
255 private:
256 inline void do_yield_check();
257 void do_yield_work();
258 bool take_from_overflow_list();
259 };
260
261 // In this, the parallel avatar of MarkRefsIntoAndScanClosure, the revisit
262 // stack and the bitMap are shared, so access needs to be suitably
263 // synchronized. An OopTaskQueue structure, supporting efficient
264 // work stealing, replaces a CMSMarkStack for storing grey objects.
265 class Par_MarkRefsIntoAndScanClosure: public CMSOopsInGenClosure {
266 private:
267 MemRegion _span;
268 CMSBitMap* _bit_map;
269 OopTaskQueue* _work_queue;
270 const uint _low_water_mark;
271 Par_PushAndMarkClosure _par_pushAndMarkClosure;
272 protected:
273 DO_OOP_WORK_DEFN
274 public:
275 Par_MarkRefsIntoAndScanClosure(CMSCollector* collector,
276 MemRegion span,
277 ReferenceProcessor* rp,
278 CMSBitMap* bit_map,
279 OopTaskQueue* work_queue);
280 virtual void do_oop(oop* p);
281 virtual void do_oop(narrowOop* p);
282 inline void do_oop_nv(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
283 inline void do_oop_nv(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
284
|