35 // require these closure objects to be stack-allocated.
36 class CardTableEntryClosure: public CHeapObj<mtGC> {
37 public:
38 // Process the card whose card table entry is "card_ptr". If returns
39 // "false", terminate the iteration early.
40 virtual bool do_card_ptr(jbyte* card_ptr, uint worker_i = 0) = 0;
41 };
42
43 // A ptrQueue whose elements are "oops", pointers to object heads.
44 class DirtyCardQueue: public PtrQueue {
45 public:
46 DirtyCardQueue(DirtyCardQueueSet* qset, bool permanent = false);
47
48 // Flush before destroying; queue may be used to capture pending work while
49 // doing something else, with auto-flush on completion.
50 ~DirtyCardQueue();
51
52 // Process queue entries and release resources.
53 void flush() { flush_impl(); }
54
55 // Apply the closure to all elements, and reset the index to make the
56 // buffer empty. If a closure application returns "false", return
57 // "false" immediately, halting the iteration. If "consume" is true,
58 // deletes processed entries from logs.
59 bool apply_closure(CardTableEntryClosure* cl,
60 bool consume = true,
61 uint worker_i = 0);
62
63 // Apply the closure to all elements of "buf", down to "index"
64 // (inclusive.) If returns "false", then a closure application returned
65 // "false", and we return immediately. If "consume" is true, entries are
66 // set to NULL as they are processed, so they will not be processed again
67 // later.
68 static bool apply_closure_to_buffer(CardTableEntryClosure* cl,
69 void** buf, size_t index, size_t sz,
70 bool consume = true,
71 uint worker_i = 0);
72 void **get_buf() { return _buf;}
73 size_t get_index() { return _index;}
74 void reinitialize() { _buf = 0; _sz = 0; _index = 0;}
75
76 // Compiler support.
77 static ByteSize byte_offset_of_index() {
78 return PtrQueue::byte_offset_of_index<DirtyCardQueue>();
79 }
80 using PtrQueue::byte_width_of_index;
81
82 static ByteSize byte_offset_of_buf() {
83 return PtrQueue::byte_offset_of_buf<DirtyCardQueue>();
84 }
85 using PtrQueue::byte_width_of_buf;
86
87 };
88
89
90
91 class DirtyCardQueueSet: public PtrQueueSet {
92 // The closure used in mut_process_buffer().
93 CardTableEntryClosure* _mut_process_closure;
94
95 DirtyCardQueue _shared_dirty_card_queue;
96
97 // Override.
98 bool mut_process_buffer(void** buf);
99
100 // Protected by the _cbl_mon.
101 FreeIdSet* _free_ids;
102
103 // The number of completed buffers processed by mutator and rs thread,
104 // respectively.
105 jint _processed_buffers_mut;
106 jint _processed_buffers_rs_thread;
107
108 // Current buffer node used for parallel iteration.
109 BufferNode* volatile _cur_par_buffer_node;
110 public:
111 DirtyCardQueueSet(bool notify_when_complete = true);
112
113 void initialize(CardTableEntryClosure* cl,
114 Monitor* cbl_mon,
115 Mutex* fl_lock,
116 int process_completed_threshold,
117 int max_completed_queue,
118 Mutex* lock,
119 DirtyCardQueueSet* fl_owner,
120 bool init_free_ids = false);
121
122 // The number of parallel ids that can be claimed to allow collector or
123 // mutator threads to do card-processing work.
124 static uint num_par_ids();
125
126 static void handle_zero_index_for_thread(JavaThread* t);
127
128 // If there exists some completed buffer, pop it, then apply the
129 // specified closure to all its elements, nulling out those elements
130 // processed. If all elements are processed, returns "true". If no
131 // completed buffers exist, returns false. If a completed buffer exists,
132 // but is only partially completed before a "yield" happens, the
133 // partially completed buffer (with its processed elements set to NULL)
134 // is returned to the completed buffer set, and this call returns false.
135 bool apply_closure_to_completed_buffer(CardTableEntryClosure* cl,
136 uint worker_i,
137 size_t stop_at,
138 bool during_pause);
139
140 BufferNode* get_completed_buffer(size_t stop_at);
141
142 // Applies the current closure to all completed buffers,
143 // non-consumptively.
144 void apply_closure_to_all_completed_buffers(CardTableEntryClosure* cl);
145
146 void reset_for_par_iteration() { _cur_par_buffer_node = _completed_buffers_head; }
147 // Applies the current closure to all completed buffers, non-consumptively.
148 // Parallel version.
149 void par_apply_closure_to_all_completed_buffers(CardTableEntryClosure* cl);
150
151 DirtyCardQueue* shared_dirty_card_queue() {
152 return &_shared_dirty_card_queue;
153 }
154
155 // Deallocate any completed log buffers
156 void clear();
157
158 // If a full collection is happening, reset partial logs, and ignore
159 // completed ones: the full collection will make them all irrelevant.
160 void abandon_logs();
161
162 // If any threads have partial logs, add them to the global list of logs.
163 void concatenate_logs();
164 void clear_n_completed_buffers() { _n_completed_buffers = 0;}
165
166 jint processed_buffers_mut() {
167 return _processed_buffers_mut;
168 }
|
35 // require these closure objects to be stack-allocated.
36 class CardTableEntryClosure: public CHeapObj<mtGC> {
37 public:
38 // Process the card whose card table entry is "card_ptr". If returns
39 // "false", terminate the iteration early.
40 virtual bool do_card_ptr(jbyte* card_ptr, uint worker_i = 0) = 0;
41 };
42
43 // A ptrQueue whose elements are "oops", pointers to object heads.
44 class DirtyCardQueue: public PtrQueue {
45 public:
46 DirtyCardQueue(DirtyCardQueueSet* qset, bool permanent = false);
47
48 // Flush before destroying; queue may be used to capture pending work while
49 // doing something else, with auto-flush on completion.
50 ~DirtyCardQueue();
51
52 // Process queue entries and release resources.
53 void flush() { flush_impl(); }
54
55 // Apply the closure to all active elements, from index to size. If
56 // all closure applications return true, then returns true. Stops
57 // processing after the first false closure application and returns
58 // false. If "consume" is true, index is updated to follow the last
59 // processed element.
60 bool apply_closure(CardTableEntryClosure* cl,
61 bool consume = true,
62 uint worker_i = 0);
63
64 // Apply the closure to all active elements of "node", from it's
65 // index to sz. If all closure applications return true, then
66 // returns true. Stops processing after the first false closure
67 // application and returns false. If "consume" is true, the node's
68 // index is updated to follow the last processed element.
69 static bool apply_closure_to_buffer(CardTableEntryClosure* cl,
70 BufferNode* node, size_t sz,
71 bool consume = true,
72 uint worker_i = 0);
73 void **get_buf() { return _buf;}
74 size_t get_index() { return _index;}
75 void reinitialize() { _buf = 0; _sz = 0; _index = 0;}
76
77 // Compiler support.
78 static ByteSize byte_offset_of_index() {
79 return PtrQueue::byte_offset_of_index<DirtyCardQueue>();
80 }
81 using PtrQueue::byte_width_of_index;
82
83 static ByteSize byte_offset_of_buf() {
84 return PtrQueue::byte_offset_of_buf<DirtyCardQueue>();
85 }
86 using PtrQueue::byte_width_of_buf;
87
88 };
89
90
91
92 class DirtyCardQueueSet: public PtrQueueSet {
93 // The closure used in mut_process_buffer().
94 CardTableEntryClosure* _mut_process_closure;
95
96 DirtyCardQueue _shared_dirty_card_queue;
97
98 // Override.
99 bool mut_process_buffer(BufferNode* node);
100
101 // Protected by the _cbl_mon.
102 FreeIdSet* _free_ids;
103
104 // The number of completed buffers processed by mutator and rs thread,
105 // respectively.
106 jint _processed_buffers_mut;
107 jint _processed_buffers_rs_thread;
108
109 // Current buffer node used for parallel iteration.
110 BufferNode* volatile _cur_par_buffer_node;
111
112 void concatenate_log(DirtyCardQueue& dcq);
113
114 public:
115 DirtyCardQueueSet(bool notify_when_complete = true);
116
117 void initialize(CardTableEntryClosure* cl,
118 Monitor* cbl_mon,
119 Mutex* fl_lock,
120 int process_completed_threshold,
121 int max_completed_queue,
122 Mutex* lock,
123 DirtyCardQueueSet* fl_owner,
124 bool init_free_ids = false);
125
126 // The number of parallel ids that can be claimed to allow collector or
127 // mutator threads to do card-processing work.
128 static uint num_par_ids();
129
130 static void handle_zero_index_for_thread(JavaThread* t);
131
132 // If there exists some completed buffer, pop it, then apply the
133 // specified closure to its active elements. If all active elements
134 // are processed, returns "true". If no completed buffers exist,
135 // returns false. If a completed buffer exists, but is only
136 // partially completed before a "yield" happens, the partially
137 // completed buffer (with its index updated to exclude the processed
138 // eleemnts) is returned to the completed buffer set, and this call
139 // returns false.
140 bool apply_closure_to_completed_buffer(CardTableEntryClosure* cl,
141 uint worker_i,
142 size_t stop_at,
143 bool during_pause);
144
145 BufferNode* get_completed_buffer(size_t stop_at);
146
147 void reset_for_par_iteration() { _cur_par_buffer_node = _completed_buffers_head; }
148 // Applies the current closure to all completed buffers, non-consumptively.
149 // Can be used in parallel, all callers using the iteration state initialized
150 // by reset_for_par_iteration.
151 void par_apply_closure_to_all_completed_buffers(CardTableEntryClosure* cl);
152
153 DirtyCardQueue* shared_dirty_card_queue() {
154 return &_shared_dirty_card_queue;
155 }
156
157 // Deallocate any completed log buffers
158 void clear();
159
160 // If a full collection is happening, reset partial logs, and ignore
161 // completed ones: the full collection will make them all irrelevant.
162 void abandon_logs();
163
164 // If any threads have partial logs, add them to the global list of logs.
165 void concatenate_logs();
166 void clear_n_completed_buffers() { _n_completed_buffers = 0;}
167
168 jint processed_buffers_mut() {
169 return _processed_buffers_mut;
170 }
|