< prev index next >

src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp

Print this page
rev 10742 : Make fields used in lock-free algorithms volatile


2987     _task = task;
2988   }
2989 };
2990 
2991 class CMSConcMarkingTerminatorTerminator: public TerminatorTerminator {
2992   CMSConcMarkingTask* _task;
2993  public:
2994   bool should_exit_termination();
2995   void set_task(CMSConcMarkingTask* task) {
2996     _task = task;
2997   }
2998 };
2999 
3000 // MT Concurrent Marking Task
3001 class CMSConcMarkingTask: public YieldingFlexibleGangTask {
3002   CMSCollector* _collector;
3003   uint          _n_workers;       // requested/desired # workers
3004   bool          _result;
3005   CompactibleFreeListSpace*  _cms_space;
3006   char          _pad_front[64];   // padding to ...
3007   HeapWord*     _global_finger;   // ... avoid sharing cache line
3008   char          _pad_back[64];
3009   HeapWord*     _restart_addr;
3010 
3011   //  Exposed here for yielding support
3012   Mutex* const _bit_map_lock;
3013 
3014   // The per thread work queues, available here for stealing
3015   OopTaskQueueSet*  _task_queues;
3016 
3017   // Termination (and yielding) support
3018   CMSConcMarkingTerminator _term;
3019   CMSConcMarkingTerminatorTerminator _term_term;
3020 
3021  public:
3022   CMSConcMarkingTask(CMSCollector* collector,
3023                  CompactibleFreeListSpace* cms_space,
3024                  YieldingFlexibleWorkGang* workers,
3025                  OopTaskQueueSet* task_queues):
3026     YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
3027     _collector(collector),
3028     _cms_space(cms_space),
3029     _n_workers(0), _result(true),
3030     _task_queues(task_queues),
3031     _term(_n_workers, task_queues, _collector),
3032     _bit_map_lock(collector->bitMapLock())
3033   {
3034     _requested_size = _n_workers;
3035     _term.set_task(this);
3036     _term_term.set_task(this);
3037     _restart_addr = _global_finger = _cms_space->bottom();
3038   }
3039 
3040 
3041   OopTaskQueueSet* task_queues()  { return _task_queues; }
3042 
3043   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
3044 
3045   HeapWord** global_finger_addr() { return &_global_finger; }
3046 
3047   CMSConcMarkingTerminator* terminator() { return &_term; }
3048 
3049   virtual void set_for_termination(uint active_workers) {
3050     terminator()->reset_for_reuse(active_workers);
3051   }
3052 
3053   void work(uint worker_id);
3054   bool should_yield() {
3055     return    ConcurrentMarkSweepThread::should_yield()
3056            && !_collector->foregroundGCIsActive();
3057   }
3058 
3059   virtual void coordinator_yield();  // stuff done by coordinator
3060   bool result() { return _result; }
3061 
3062   void reset(HeapWord* ra) {
3063     assert(_global_finger >= _cms_space->end(),  "Postcondition of ::work(i)");
3064     _restart_addr = _global_finger = ra;
3065     _term.reset_for_reuse();


6508     // [_threshold, _finger) represents the interval
6509     // of cards to be cleared  in MUT (or precleaned in card table).
6510     // The set of cards to be cleared is all those that overlap
6511     // with the interval [_threshold, _finger); note that
6512     // _threshold is always kept card-aligned but _finger isn't
6513     // always card-aligned.
6514     HeapWord* old_threshold = _threshold;
6515     assert(old_threshold == (HeapWord*)round_to(
6516             (intptr_t)old_threshold, CardTableModRefBS::card_size),
6517            "_threshold should always be card-aligned");
6518     _threshold = (HeapWord*)round_to(
6519                    (intptr_t)_finger, CardTableModRefBS::card_size);
6520     MemRegion mr(old_threshold, _threshold);
6521     assert(!mr.is_empty(), "Control point invariant");
6522     assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
6523     _mut->clear_range(mr);
6524   }
6525 
6526   // Note: the local finger doesn't advance while we drain
6527   // the stack below, but the global finger sure can and will.
6528   HeapWord** gfa = _task->global_finger_addr();
6529   ParPushOrMarkClosure pushOrMarkClosure(_collector,
6530                                          _span, _bit_map,
6531                                          _work_queue,
6532                                          _overflow_stack,
6533                                          _finger,
6534                                          gfa, this);
6535   bool res = _work_queue->push(obj);   // overflow could occur here
6536   assert(res, "Will hold once we use workqueues");
6537   while (true) {
6538     oop new_oop;
6539     if (!_work_queue->pop_local(new_oop)) {
6540       // We emptied our work_queue; check if there's stuff that can
6541       // be gotten from the overflow stack.
6542       if (CMSConcMarkingTask::get_work_from_overflow_stack(
6543             _overflow_stack, _work_queue)) {
6544         do_yield_check();
6545         continue;
6546       } else {  // done
6547         break;
6548       }


6675 
6676 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
6677                      MemRegion span,
6678                      CMSBitMap* bitMap, CMSMarkStack*  markStack,
6679                      HeapWord* finger, MarkFromRootsClosure* parent) :
6680   MetadataAwareOopClosure(collector->ref_processor()),
6681   _collector(collector),
6682   _span(span),
6683   _bitMap(bitMap),
6684   _markStack(markStack),
6685   _finger(finger),
6686   _parent(parent)
6687 { }
6688 
6689 ParPushOrMarkClosure::ParPushOrMarkClosure(CMSCollector* collector,
6690                                            MemRegion span,
6691                                            CMSBitMap* bit_map,
6692                                            OopTaskQueue* work_queue,
6693                                            CMSMarkStack*  overflow_stack,
6694                                            HeapWord* finger,
6695                                            HeapWord** global_finger_addr,
6696                                            ParMarkFromRootsClosure* parent) :
6697   MetadataAwareOopClosure(collector->ref_processor()),
6698   _collector(collector),
6699   _whole_span(collector->_span),
6700   _span(span),
6701   _bit_map(bit_map),
6702   _work_queue(work_queue),
6703   _overflow_stack(overflow_stack),
6704   _finger(finger),
6705   _global_finger_addr(global_finger_addr),
6706   _parent(parent)
6707 { }
6708 
6709 // Assumes thread-safe access by callers, who are
6710 // responsible for mutual exclusion.
6711 void CMSCollector::lower_restart_addr(HeapWord* low) {
6712   assert(_span.contains(low), "Out of bounds addr");
6713   if (_restart_addr == NULL) {
6714     _restart_addr = low;
6715   } else {




2987     _task = task;
2988   }
2989 };
2990 
2991 class CMSConcMarkingTerminatorTerminator: public TerminatorTerminator {
2992   CMSConcMarkingTask* _task;
2993  public:
2994   bool should_exit_termination();
2995   void set_task(CMSConcMarkingTask* task) {
2996     _task = task;
2997   }
2998 };
2999 
3000 // MT Concurrent Marking Task
3001 class CMSConcMarkingTask: public YieldingFlexibleGangTask {
3002   CMSCollector*       _collector;
3003   uint                _n_workers;      // requested/desired # workers
3004   bool                _result;
3005   CompactibleFreeListSpace* _cms_space;
3006   char               _pad_front[64];   // padding to ...
3007   HeapWord* volatile _global_finger;   // ... avoid sharing cache line
3008   char               _pad_back[64];
3009   HeapWord*          _restart_addr;
3010 
3011   //  Exposed here for yielding support
3012   Mutex* const _bit_map_lock;
3013 
3014   // The per thread work queues, available here for stealing
3015   OopTaskQueueSet*  _task_queues;
3016 
3017   // Termination (and yielding) support
3018   CMSConcMarkingTerminator _term;
3019   CMSConcMarkingTerminatorTerminator _term_term;
3020 
3021  public:
3022   CMSConcMarkingTask(CMSCollector* collector,
3023                  CompactibleFreeListSpace* cms_space,
3024                  YieldingFlexibleWorkGang* workers,
3025                  OopTaskQueueSet* task_queues):
3026     YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
3027     _collector(collector),
3028     _cms_space(cms_space),
3029     _n_workers(0), _result(true),
3030     _task_queues(task_queues),
3031     _term(_n_workers, task_queues, _collector),
3032     _bit_map_lock(collector->bitMapLock())
3033   {
3034     _requested_size = _n_workers;
3035     _term.set_task(this);
3036     _term_term.set_task(this);
3037     _restart_addr = _global_finger = _cms_space->bottom();
3038   }
3039 
3040 
3041   OopTaskQueueSet* task_queues()  { return _task_queues; }
3042 
3043   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
3044 
3045   HeapWord* volatile* global_finger_addr() { return &_global_finger; }
3046 
3047   CMSConcMarkingTerminator* terminator() { return &_term; }
3048 
3049   virtual void set_for_termination(uint active_workers) {
3050     terminator()->reset_for_reuse(active_workers);
3051   }
3052 
3053   void work(uint worker_id);
3054   bool should_yield() {
3055     return    ConcurrentMarkSweepThread::should_yield()
3056            && !_collector->foregroundGCIsActive();
3057   }
3058 
3059   virtual void coordinator_yield();  // stuff done by coordinator
3060   bool result() { return _result; }
3061 
3062   void reset(HeapWord* ra) {
3063     assert(_global_finger >= _cms_space->end(),  "Postcondition of ::work(i)");
3064     _restart_addr = _global_finger = ra;
3065     _term.reset_for_reuse();


6508     // [_threshold, _finger) represents the interval
6509     // of cards to be cleared  in MUT (or precleaned in card table).
6510     // The set of cards to be cleared is all those that overlap
6511     // with the interval [_threshold, _finger); note that
6512     // _threshold is always kept card-aligned but _finger isn't
6513     // always card-aligned.
6514     HeapWord* old_threshold = _threshold;
6515     assert(old_threshold == (HeapWord*)round_to(
6516             (intptr_t)old_threshold, CardTableModRefBS::card_size),
6517            "_threshold should always be card-aligned");
6518     _threshold = (HeapWord*)round_to(
6519                    (intptr_t)_finger, CardTableModRefBS::card_size);
6520     MemRegion mr(old_threshold, _threshold);
6521     assert(!mr.is_empty(), "Control point invariant");
6522     assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
6523     _mut->clear_range(mr);
6524   }
6525 
6526   // Note: the local finger doesn't advance while we drain
6527   // the stack below, but the global finger sure can and will.
6528   HeapWord* volatile* gfa = _task->global_finger_addr();
6529   ParPushOrMarkClosure pushOrMarkClosure(_collector,
6530                                          _span, _bit_map,
6531                                          _work_queue,
6532                                          _overflow_stack,
6533                                          _finger,
6534                                          gfa, this);
6535   bool res = _work_queue->push(obj);   // overflow could occur here
6536   assert(res, "Will hold once we use workqueues");
6537   while (true) {
6538     oop new_oop;
6539     if (!_work_queue->pop_local(new_oop)) {
6540       // We emptied our work_queue; check if there's stuff that can
6541       // be gotten from the overflow stack.
6542       if (CMSConcMarkingTask::get_work_from_overflow_stack(
6543             _overflow_stack, _work_queue)) {
6544         do_yield_check();
6545         continue;
6546       } else {  // done
6547         break;
6548       }


6675 
6676 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
6677                      MemRegion span,
6678                      CMSBitMap* bitMap, CMSMarkStack*  markStack,
6679                      HeapWord* finger, MarkFromRootsClosure* parent) :
6680   MetadataAwareOopClosure(collector->ref_processor()),
6681   _collector(collector),
6682   _span(span),
6683   _bitMap(bitMap),
6684   _markStack(markStack),
6685   _finger(finger),
6686   _parent(parent)
6687 { }
6688 
6689 ParPushOrMarkClosure::ParPushOrMarkClosure(CMSCollector* collector,
6690                                            MemRegion span,
6691                                            CMSBitMap* bit_map,
6692                                            OopTaskQueue* work_queue,
6693                                            CMSMarkStack*  overflow_stack,
6694                                            HeapWord* finger,
6695                                            HeapWord* volatile* global_finger_addr,
6696                                            ParMarkFromRootsClosure* parent) :
6697   MetadataAwareOopClosure(collector->ref_processor()),
6698   _collector(collector),
6699   _whole_span(collector->_span),
6700   _span(span),
6701   _bit_map(bit_map),
6702   _work_queue(work_queue),
6703   _overflow_stack(overflow_stack),
6704   _finger(finger),
6705   _global_finger_addr(global_finger_addr),
6706   _parent(parent)
6707 { }
6708 
6709 // Assumes thread-safe access by callers, who are
6710 // responsible for mutual exclusion.
6711 void CMSCollector::lower_restart_addr(HeapWord* low) {
6712   assert(_span.contains(low), "Out of bounds addr");
6713   if (_restart_addr == NULL) {
6714     _restart_addr = low;
6715   } else {


< prev index next >