60 using PtrQueue::byte_width_of_index;
61
62 static ByteSize byte_offset_of_buf() {
63 return PtrQueue::byte_offset_of_buf<G1DirtyCardQueue>();
64 }
65 using PtrQueue::byte_width_of_buf;
66
67 };
68
69 class G1DirtyCardQueueSet: public PtrQueueSet {
70 // Head and tail of a list of BufferNodes, linked through their next()
71 // fields. Similar to G1BufferNodeList, but without the _entry_count.
72 struct HeadTail {
73 BufferNode* _head;
74 BufferNode* _tail;
75 HeadTail() : _head(NULL), _tail(NULL) {}
76 HeadTail(BufferNode* head, BufferNode* tail) : _head(head), _tail(tail) {}
77 };
78
79 // A lock-free FIFO of BufferNodes, linked through their next() fields.
80 // This class has a restriction that pop() cannot return the last buffer
81 // in the queue, or what was the last buffer for a concurrent push/append
82 // operation. It is expected that there will be a later push/append that
83 // will make that buffer available to a future pop(), or there will
84 // eventually be a complete transfer via take_all().
85 class Queue {
86 BufferNode* volatile _head;
87 DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(BufferNode*));
88 BufferNode* volatile _tail;
89 DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(BufferNode*));
90
91 NONCOPYABLE(Queue);
92
93 public:
94 Queue() : _head(NULL), _tail(NULL) {}
95 DEBUG_ONLY(~Queue();)
96
97 // Return the first buffer in the queue.
98 // Thread-safe, but the result may change immediately.
99 BufferNode* top() const;
100
101 // Thread-safe add the buffer to the end of the queue.
102 void push(BufferNode& node) { append(node, node); }
103
104 // Thread-safe add the buffers from first to last to the end of the queue.
105 void append(BufferNode& first, BufferNode& last);
106
107 // Thread-safe attempt to remove and return the first buffer in the queue.
108 // Returns NULL if the queue is empty, or if only one buffer is found.
109 // Uses GlobalCounter critical sections to address the ABA problem; this
110 // works with the buffer allocator's use of GlobalCounter synchronization.
111 BufferNode* pop();
112
113 // Take all the buffers from the queue, leaving the queue empty.
114 // Not thread-safe.
115 HeadTail take_all();
116 };
117
118 // Concurrent refinement may stop processing in the middle of a buffer if
119 // there is a pending safepoint, to avoid long delays to safepoint. A
120 // partially processed buffer needs to be recorded for processing by the
121 // safepoint if it's a GC safepoint; otherwise it needs to be recorded for
122 // further concurrent refinement work after the safepoint. But if the
123 // buffer was obtained from the completed buffer queue then it can't simply
124 // be added back to the queue, as that would introduce a new source of ABA
125 // for the queue.
126 //
127 // The PausedBuffer object is used to record such buffers for the upcoming
128 // safepoint, and provides access to the buffers recorded for previous
129 // safepoints. Before obtaining a buffer from the completed buffers queue,
130 // we first transfer any buffers from previous safepoints to the queue.
|
60 using PtrQueue::byte_width_of_index;
61
62 static ByteSize byte_offset_of_buf() {
63 return PtrQueue::byte_offset_of_buf<G1DirtyCardQueue>();
64 }
65 using PtrQueue::byte_width_of_buf;
66
67 };
68
69 class G1DirtyCardQueueSet: public PtrQueueSet {
70 // Head and tail of a list of BufferNodes, linked through their next()
71 // fields. Similar to G1BufferNodeList, but without the _entry_count.
72 struct HeadTail {
73 BufferNode* _head;
74 BufferNode* _tail;
75 HeadTail() : _head(NULL), _tail(NULL) {}
76 HeadTail(BufferNode* head, BufferNode* tail) : _head(head), _tail(tail) {}
77 };
78
79 // A lock-free FIFO of BufferNodes, linked through their next() fields.
80 // This class has a restriction that pop() may return NULL when there are
81 // buffers in the queue if there is a concurrent push/append operation.
82 class Queue {
83 BufferNode* volatile _head;
84 DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(BufferNode*));
85 BufferNode* volatile _tail;
86 DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(BufferNode*));
87
88 NONCOPYABLE(Queue);
89
90 public:
91 Queue() : _head(NULL), _tail(NULL) {}
92 DEBUG_ONLY(~Queue();)
93
94 // Return the first buffer in the queue.
95 // Thread-safe, but the result may change immediately.
96 BufferNode* top() const;
97
98 // Thread-safe add the buffer to the end of the queue.
99 void push(BufferNode& node) { append(node, node); }
100
101 // Thread-safe add the buffers from first to last to the end of the queue.
102 void append(BufferNode& first, BufferNode& last);
103
104 // Thread-safe attempt to remove and return the first buffer in the queue.
105 // Returns NULL if the queue is empty, or if a concurrent push/append
106 // interferes. Uses GlobalCounter critical sections to address the ABA
107 // problem; this works with the buffer allocator's use of GlobalCounter
108 // synchronization.
109 BufferNode* pop();
110
111 // Take all the buffers from the queue, leaving the queue empty.
112 // Not thread-safe.
113 HeadTail take_all();
114 };
115
116 // Concurrent refinement may stop processing in the middle of a buffer if
117 // there is a pending safepoint, to avoid long delays to safepoint. A
118 // partially processed buffer needs to be recorded for processing by the
119 // safepoint if it's a GC safepoint; otherwise it needs to be recorded for
120 // further concurrent refinement work after the safepoint. But if the
121 // buffer was obtained from the completed buffer queue then it can't simply
122 // be added back to the queue, as that would introduce a new source of ABA
123 // for the queue.
124 //
125 // The PausedBuffer object is used to record such buffers for the upcoming
126 // safepoint, and provides access to the buffers recorded for previous
127 // safepoints. Before obtaining a buffer from the completed buffers queue,
128 // we first transfer any buffers from previous safepoints to the queue.
|