13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_GC_G1_G1DIRTYCARDQUEUE_HPP
26 #define SHARE_GC_G1_G1DIRTYCARDQUEUE_HPP
27
28 #include "gc/shared/ptrQueue.hpp"
29 #include "memory/allocation.hpp"
30
31 class G1DirtyCardQueueSet;
32 class G1FreeIdSet;
33 class JavaThread;
34 class Monitor;
35
36 // A closure class for processing card table entries. Note that we don't
37 // require these closure objects to be stack-allocated.
38 class G1CardTableEntryClosure: public CHeapObj<mtGC> {
39 public:
40 // Process the card whose card table entry is "card_ptr". If returns
41 // "false", terminate the iteration early.
42 virtual bool do_card_ptr(jbyte* card_ptr, uint worker_i) = 0;
43 };
44
45 // A ptrQueue whose elements are "oops", pointers to object heads.
46 class G1DirtyCardQueue: public PtrQueue {
47 public:
48 G1DirtyCardQueue(G1DirtyCardQueueSet* qset, bool permanent = false);
49
50 // Flush before destroying; queue may be used to capture pending work while
51 // doing something else, with auto-flush on completion.
52 ~G1DirtyCardQueue();
53
111 jint _processed_buffers_rs_thread;
112
113 // Current buffer node used for parallel iteration.
114 BufferNode* volatile _cur_par_buffer_node;
115
116 void concatenate_log(G1DirtyCardQueue& dcq);
117
118 public:
119 G1DirtyCardQueueSet(bool notify_when_complete = true);
120 ~G1DirtyCardQueueSet();
121
122 void initialize(Monitor* cbl_mon,
123 BufferNode::Allocator* allocator,
124 Mutex* lock,
125 bool init_free_ids = false);
126
127 // The number of parallel ids that can be claimed to allow collector or
128 // mutator threads to do card-processing work.
129 static uint num_par_ids();
130
131 static void handle_zero_index_for_thread(JavaThread* t);
132
133 // Apply G1RefineCardConcurrentlyClosure to completed buffers until there are stop_at
134 // completed buffers remaining.
135 bool refine_completed_buffer_concurrently(uint worker_i, size_t stop_at);
136
137 // Apply the given closure to all completed buffers. The given closure's do_card_ptr
138 // must never return false. Must only be called during GC.
139 bool apply_closure_during_gc(G1CardTableEntryClosure* cl, uint worker_i);
140
141 void reset_for_par_iteration() { _cur_par_buffer_node = completed_buffers_head(); }
142 // Applies the current closure to all completed buffers, non-consumptively.
143 // Can be used in parallel, all callers using the iteration state initialized
144 // by reset_for_par_iteration.
145 void par_apply_closure_to_all_completed_buffers(G1CardTableEntryClosure* cl);
146
147 G1DirtyCardQueue* shared_dirty_card_queue() {
148 return &_shared_dirty_card_queue;
149 }
150
151 // If a full collection is happening, reset partial logs, and ignore
|
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_GC_G1_G1DIRTYCARDQUEUE_HPP
26 #define SHARE_GC_G1_G1DIRTYCARDQUEUE_HPP
27
28 #include "gc/shared/ptrQueue.hpp"
29 #include "memory/allocation.hpp"
30
31 class G1DirtyCardQueueSet;
32 class G1FreeIdSet;
33 class Thread;
34 class Monitor;
35
36 // A closure class for processing card table entries. Note that we don't
37 // require these closure objects to be stack-allocated.
38 class G1CardTableEntryClosure: public CHeapObj<mtGC> {
39 public:
40 // Process the card whose card table entry is "card_ptr". If returns
41 // "false", terminate the iteration early.
42 virtual bool do_card_ptr(jbyte* card_ptr, uint worker_i) = 0;
43 };
44
45 // A ptrQueue whose elements are "oops", pointers to object heads.
46 class G1DirtyCardQueue: public PtrQueue {
47 public:
48 G1DirtyCardQueue(G1DirtyCardQueueSet* qset, bool permanent = false);
49
50 // Flush before destroying; queue may be used to capture pending work while
51 // doing something else, with auto-flush on completion.
52 ~G1DirtyCardQueue();
53
111 jint _processed_buffers_rs_thread;
112
113 // Current buffer node used for parallel iteration.
114 BufferNode* volatile _cur_par_buffer_node;
115
116 void concatenate_log(G1DirtyCardQueue& dcq);
117
118 public:
119 G1DirtyCardQueueSet(bool notify_when_complete = true);
120 ~G1DirtyCardQueueSet();
121
122 void initialize(Monitor* cbl_mon,
123 BufferNode::Allocator* allocator,
124 Mutex* lock,
125 bool init_free_ids = false);
126
127 // The number of parallel ids that can be claimed to allow collector or
128 // mutator threads to do card-processing work.
129 static uint num_par_ids();
130
131 static void handle_zero_index_for_thread(Thread* t);
132
133 // Apply G1RefineCardConcurrentlyClosure to completed buffers until there are stop_at
134 // completed buffers remaining.
135 bool refine_completed_buffer_concurrently(uint worker_i, size_t stop_at);
136
137 // Apply the given closure to all completed buffers. The given closure's do_card_ptr
138 // must never return false. Must only be called during GC.
139 bool apply_closure_during_gc(G1CardTableEntryClosure* cl, uint worker_i);
140
141 void reset_for_par_iteration() { _cur_par_buffer_node = completed_buffers_head(); }
142 // Applies the current closure to all completed buffers, non-consumptively.
143 // Can be used in parallel, all callers using the iteration state initialized
144 // by reset_for_par_iteration.
145 void par_apply_closure_to_all_completed_buffers(G1CardTableEntryClosure* cl);
146
147 G1DirtyCardQueue* shared_dirty_card_queue() {
148 return &_shared_dirty_card_queue;
149 }
150
151 // If a full collection is happening, reset partial logs, and ignore
|