28 #include "gc/shared/ptrQueue.hpp"
29 #include "memory/allocation.hpp"
30
31 class G1DirtyCardQueueSet;
32 class G1FreeIdSet;
33 class Thread;
34 class Monitor;
35
36 // A closure class for processing card table entries. Note that we don't
37 // require these closure objects to be stack-allocated.
38 class G1CardTableEntryClosure: public CHeapObj<mtGC> {
39 public:
40 // Process the card whose card table entry is "card_ptr". If returns
41 // "false", terminate the iteration early.
42 virtual bool do_card_ptr(jbyte* card_ptr, uint worker_i) = 0;
43 };
44
45 // A ptrQueue whose elements are "oops", pointers to object heads.
46 class G1DirtyCardQueue: public PtrQueue {
47 public:
48 G1DirtyCardQueue(G1DirtyCardQueueSet* qset, bool permanent = false);
49
50 // Flush before destroying; queue may be used to capture pending work while
51 // doing something else, with auto-flush on completion.
52 ~G1DirtyCardQueue();
53
54 // Process queue entries and release resources.
55 void flush() { flush_impl(); }
56
57 // Compiler support.
58 static ByteSize byte_offset_of_index() {
59 return PtrQueue::byte_offset_of_index<G1DirtyCardQueue>();
60 }
61 using PtrQueue::byte_width_of_index;
62
63 static ByteSize byte_offset_of_buf() {
64 return PtrQueue::byte_offset_of_buf<G1DirtyCardQueue>();
65 }
66 using PtrQueue::byte_width_of_buf;
67
68 };
69
70
71
72 class G1DirtyCardQueueSet: public PtrQueueSet {
73 G1DirtyCardQueue _shared_dirty_card_queue;
74
75 // Apply the closure to the elements of "node" from it's index to
76 // buffer_size. If all closure applications return true, then
77 // returns true. Stops processing after the first closure
78 // application that returns false, and returns false from this
79 // function. If "consume" is true, the node's index is updated to
80 // exclude the processed elements, e.g. up to the element for which
81 // the closure returned false.
82 bool apply_closure_to_buffer(G1CardTableEntryClosure* cl,
83 BufferNode* node,
84 bool consume,
85 uint worker_i = 0);
86
87 // If there are more than stop_at completed buffers, pop one, apply
88 // the specified closure to its active elements, and return true.
89 // Otherwise return false.
90 //
91 // A completely processed buffer is freed. However, if a closure
92 // invocation returns false, processing is stopped and the partially
93 // processed buffer (with its index updated to exclude the processed
94 // elements, e.g. up to the element for which the closure returned
96 //
97 // If during_pause is true, stop_at must be zero, and the closure
98 // must never return false.
99 bool apply_closure_to_completed_buffer(G1CardTableEntryClosure* cl,
100 uint worker_i,
101 size_t stop_at,
102 bool during_pause);
103
104 bool mut_process_buffer(BufferNode* node);
105
106 G1FreeIdSet* _free_ids;
107
108 // The number of completed buffers processed by mutator and rs thread,
109 // respectively.
110 jint _processed_buffers_mut;
111 jint _processed_buffers_rs_thread;
112
113 // Current buffer node used for parallel iteration.
114 BufferNode* volatile _cur_par_buffer_node;
115
116 void concatenate_log(G1DirtyCardQueue& dcq);
117
118 public:
119 G1DirtyCardQueueSet(bool notify_when_complete = true);
120 ~G1DirtyCardQueueSet();
121
122 void initialize(Monitor* cbl_mon,
123 BufferNode::Allocator* allocator,
124 Mutex* lock,
125 bool init_free_ids = false);
126
127 // The number of parallel ids that can be claimed to allow collector or
128 // mutator threads to do card-processing work.
129 static uint num_par_ids();
130
131 static void handle_zero_index_for_thread(Thread* t);
132
133 // Apply G1RefineCardConcurrentlyClosure to completed buffers until there are stop_at
134 // completed buffers remaining.
135 bool refine_completed_buffer_concurrently(uint worker_i, size_t stop_at);
136
137 // Apply the given closure to all completed buffers. The given closure's do_card_ptr
138 // must never return false. Must only be called during GC.
139 bool apply_closure_during_gc(G1CardTableEntryClosure* cl, uint worker_i);
140
141 void reset_for_par_iteration() { _cur_par_buffer_node = completed_buffers_head(); }
142 // Applies the current closure to all completed buffers, non-consumptively.
143 // Can be used in parallel, all callers using the iteration state initialized
144 // by reset_for_par_iteration.
145 void par_apply_closure_to_all_completed_buffers(G1CardTableEntryClosure* cl);
146
147 G1DirtyCardQueue* shared_dirty_card_queue() {
148 return &_shared_dirty_card_queue;
149 }
150
151 // If a full collection is happening, reset partial logs, and ignore
152 // completed ones: the full collection will make them all irrelevant.
153 void abandon_logs();
154
155 // If any threads have partial logs, add them to the global list of logs.
156 void concatenate_logs();
157
158 jint processed_buffers_mut() {
159 return _processed_buffers_mut;
160 }
161 jint processed_buffers_rs_thread() {
162 return _processed_buffers_rs_thread;
163 }
164
165 };
166
167 #endif // SHARE_GC_G1_G1DIRTYCARDQUEUE_HPP
|
28 #include "gc/shared/ptrQueue.hpp"
29 #include "memory/allocation.hpp"
30
31 class G1DirtyCardQueueSet;
32 class G1FreeIdSet;
33 class Thread;
34 class Monitor;
35
36 // A closure class for processing card table entries. Note that we don't
37 // require these closure objects to be stack-allocated.
38 class G1CardTableEntryClosure: public CHeapObj<mtGC> {
39 public:
40 // Process the card whose card table entry is "card_ptr". If returns
41 // "false", terminate the iteration early.
42 virtual bool do_card_ptr(jbyte* card_ptr, uint worker_i) = 0;
43 };
44
45 // A ptrQueue whose elements are "oops", pointers to object heads.
46 class G1DirtyCardQueue: public PtrQueue {
47 public:
48 G1DirtyCardQueue(G1DirtyCardQueueSet* qset);
49
50 // Flush before destroying; queue may be used to capture pending work while
51 // doing something else, with auto-flush on completion.
52 ~G1DirtyCardQueue();
53
54 // Process queue entries and release resources.
55 void flush() { flush_impl(); }
56
57 // Compiler support.
58 static ByteSize byte_offset_of_index() {
59 return PtrQueue::byte_offset_of_index<G1DirtyCardQueue>();
60 }
61 using PtrQueue::byte_width_of_index;
62
63 static ByteSize byte_offset_of_buf() {
64 return PtrQueue::byte_offset_of_buf<G1DirtyCardQueue>();
65 }
66 using PtrQueue::byte_width_of_buf;
67
68 };
69
70
71
72 class G1DirtyCardQueueSet: public PtrQueueSet {
73 // Apply the closure to the elements of "node" from it's index to
74 // buffer_size. If all closure applications return true, then
75 // returns true. Stops processing after the first closure
76 // application that returns false, and returns false from this
77 // function. If "consume" is true, the node's index is updated to
78 // exclude the processed elements, e.g. up to the element for which
79 // the closure returned false.
80 bool apply_closure_to_buffer(G1CardTableEntryClosure* cl,
81 BufferNode* node,
82 bool consume,
83 uint worker_i = 0);
84
85 // If there are more than stop_at completed buffers, pop one, apply
86 // the specified closure to its active elements, and return true.
87 // Otherwise return false.
88 //
89 // A completely processed buffer is freed. However, if a closure
90 // invocation returns false, processing is stopped and the partially
91 // processed buffer (with its index updated to exclude the processed
92 // elements, e.g. up to the element for which the closure returned
94 //
95 // If during_pause is true, stop_at must be zero, and the closure
96 // must never return false.
97 bool apply_closure_to_completed_buffer(G1CardTableEntryClosure* cl,
98 uint worker_i,
99 size_t stop_at,
100 bool during_pause);
101
102 bool mut_process_buffer(BufferNode* node);
103
104 G1FreeIdSet* _free_ids;
105
106 // The number of completed buffers processed by mutator and rs thread,
107 // respectively.
108 jint _processed_buffers_mut;
109 jint _processed_buffers_rs_thread;
110
111 // Current buffer node used for parallel iteration.
112 BufferNode* volatile _cur_par_buffer_node;
113
114 public:
115 G1DirtyCardQueueSet(bool notify_when_complete = true);
116 ~G1DirtyCardQueueSet();
117
118 void initialize(Monitor* cbl_mon,
119 BufferNode::Allocator* allocator,
120 bool init_free_ids = false);
121
122 // The number of parallel ids that can be claimed to allow collector or
123 // mutator threads to do card-processing work.
124 static uint num_par_ids();
125
126 static void handle_zero_index_for_thread(Thread* t);
127
128 // Apply G1RefineCardConcurrentlyClosure to completed buffers until there are stop_at
129 // completed buffers remaining.
130 bool refine_completed_buffer_concurrently(uint worker_i, size_t stop_at);
131
132 // Apply the given closure to all completed buffers. The given closure's do_card_ptr
133 // must never return false. Must only be called during GC.
134 bool apply_closure_during_gc(G1CardTableEntryClosure* cl, uint worker_i);
135
136 void reset_for_par_iteration() { _cur_par_buffer_node = completed_buffers_head(); }
137 // Applies the current closure to all completed buffers, non-consumptively.
138 // Can be used in parallel, all callers using the iteration state initialized
139 // by reset_for_par_iteration.
140 void par_apply_closure_to_all_completed_buffers(G1CardTableEntryClosure* cl);
141
142 // If a full collection is happening, reset partial logs, and ignore
143 // completed ones: the full collection will make them all irrelevant.
144 void abandon_logs();
145
146 // If any threads have partial logs, add them to the global list of logs.
147 void concatenate_logs();
148
149 jint processed_buffers_mut() {
150 return _processed_buffers_mut;
151 }
152 jint processed_buffers_rs_thread() {
153 return _processed_buffers_rs_thread;
154 }
155
156 };
157
158 #endif // SHARE_GC_G1_G1DIRTYCARDQUEUE_HPP
|