< prev index next >

src/share/vm/gc/g1/dirtyCardQueue.cpp

Print this page
rev 10597 : [mq]: yield_inc1


 158   bool result = true;
 159   void** buf = BufferNode::make_buffer_from_node(node);
 160   size_t limit = DirtyCardQueue::byte_index_to_index(buffer_size());
 161   size_t i = DirtyCardQueue::byte_index_to_index(node->index());
 162   for ( ; i < limit; ++i) {
 163     jbyte* card_ptr = static_cast<jbyte*>(buf[i]);
 164     assert(card_ptr != NULL, "invariant");
 165     if (!cl->do_card_ptr(card_ptr, worker_i)) {
 166       result = false;           // Incomplete processing.
 167       break;
 168     }
 169   }
 170   if (consume) {
 171     size_t new_index = DirtyCardQueue::index_to_byte_index(i);
 172     assert(new_index <= buffer_size(), "invariant");
 173     node->set_index(new_index);
 174   }
 175   return result;
 176 }
 177 







 178 bool DirtyCardQueueSet::mut_process_buffer(BufferNode* node) {
 179   guarantee(_free_ids != NULL, "must be");
 180 
 181   uint worker_i = _free_ids->claim_par_id(); // temporarily claim an id
 182   bool result = apply_closure_to_buffer(_mut_process_closure, node, true, worker_i);
 183   _free_ids->release_par_id(worker_i); // release the id
 184 
 185   if (result) {
 186     assert(node->index() == buffer_size(), "apply said fully consumed");
 187     Atomic::inc(&_processed_buffers_mut);
 188   }
 189   return result;
 190 }
 191 
 192 
 193 BufferNode* DirtyCardQueueSet::get_completed_buffer(size_t stop_at) {
 194   BufferNode* nd = NULL;
 195   MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
 196 
 197   if (_n_completed_buffers <= stop_at) {
 198     _process_completed = false;
 199     return NULL;
 200   }
 201 
 202   if (_completed_buffers_head != NULL) {
 203     nd = _completed_buffers_head;
 204     assert(_n_completed_buffers > 0, "Invariant");
 205     _completed_buffers_head = nd->next();
 206     _n_completed_buffers--;
 207     if (_completed_buffers_head == NULL) {
 208       assert(_n_completed_buffers == 0, "Invariant");
 209       _completed_buffers_tail = NULL;
 210     }
 211   }
 212   DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
 213   return nd;
 214 }
 215 
 216 bool DirtyCardQueueSet::apply_closure_to_completed_buffer(CardTableEntryClosure* cl,
 217                                                           uint worker_i,
 218                                                           size_t stop_at,
 219                                                           bool during_pause) {
 220   assert(!during_pause || stop_at == 0, "Should not leave any completed buffers during a pause");
 221   BufferNode* nd = get_completed_buffer(stop_at);
 222   if (nd == NULL) {
 223     return false;
 224   } else {
 225     if (apply_closure_to_buffer(cl, nd, true, worker_i)) {
 226       assert(nd->index() == buffer_size(), "apply said fully consumed");
 227       // Done with fully processed buffer.
 228       deallocate_buffer(nd);
 229       Atomic::inc(&_processed_buffers_rs_thread);
 230     } else {
 231       // Return partially processed buffer to the queue.
 232       guarantee(!during_pause, "Should never stop early");
 233       enqueue_complete_buffer(nd);
 234     }
 235     return true;
 236   }
 237 }
 238 
 239 void DirtyCardQueueSet::par_apply_closure_to_all_completed_buffers(CardTableEntryClosure* cl) {
 240   BufferNode* nd = _cur_par_buffer_node;
 241   while (nd != NULL) {
 242     BufferNode* next = nd->next();
 243     void* actual = Atomic::cmpxchg_ptr(next, &_cur_par_buffer_node, nd);
 244     if (actual == nd) {
 245       bool b = apply_closure_to_buffer(cl, nd, false);
 246       guarantee(b, "Should not stop early.");




 158   bool result = true;
 159   void** buf = BufferNode::make_buffer_from_node(node);
 160   size_t limit = DirtyCardQueue::byte_index_to_index(buffer_size());
 161   size_t i = DirtyCardQueue::byte_index_to_index(node->index());
 162   for ( ; i < limit; ++i) {
 163     jbyte* card_ptr = static_cast<jbyte*>(buf[i]);
 164     assert(card_ptr != NULL, "invariant");
 165     if (!cl->do_card_ptr(card_ptr, worker_i)) {
 166       result = false;           // Incomplete processing.
 167       break;
 168     }
 169   }
 170   if (consume) {
 171     size_t new_index = DirtyCardQueue::index_to_byte_index(i);
 172     assert(new_index <= buffer_size(), "invariant");
 173     node->set_index(new_index);
 174   }
 175   return result;
 176 }
 177 
 178 inline void assert_fully_consumed(BufferNode* node, size_t buffer_size) {
 179   assert(node->index() == buffer_size,
 180          "Buffer was not fully consumed as claimed: index: " SIZE_FORMAT
 181          ", size: " SIZE_FORMAT,
 182          node->index(), buffer_size);
 183 }
 184 
 185 bool DirtyCardQueueSet::mut_process_buffer(BufferNode* node) {
 186   guarantee(_free_ids != NULL, "must be");
 187 
 188   uint worker_i = _free_ids->claim_par_id(); // temporarily claim an id
 189   bool result = apply_closure_to_buffer(_mut_process_closure, node, true, worker_i);
 190   _free_ids->release_par_id(worker_i); // release the id
 191 
 192   if (result) {
 193     assert_fully_consumed(node, buffer_size());
 194     Atomic::inc(&_processed_buffers_mut);
 195   }
 196   return result;
 197 }
 198 
 199 
 200 BufferNode* DirtyCardQueueSet::get_completed_buffer(size_t stop_at) {
 201   BufferNode* nd = NULL;
 202   MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
 203 
 204   if (_n_completed_buffers <= stop_at) {
 205     _process_completed = false;
 206     return NULL;
 207   }
 208 
 209   if (_completed_buffers_head != NULL) {
 210     nd = _completed_buffers_head;
 211     assert(_n_completed_buffers > 0, "Invariant");
 212     _completed_buffers_head = nd->next();
 213     _n_completed_buffers--;
 214     if (_completed_buffers_head == NULL) {
 215       assert(_n_completed_buffers == 0, "Invariant");
 216       _completed_buffers_tail = NULL;
 217     }
 218   }
 219   DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
 220   return nd;
 221 }
 222 
 223 bool DirtyCardQueueSet::apply_closure_to_completed_buffer(CardTableEntryClosure* cl,
 224                                                           uint worker_i,
 225                                                           size_t stop_at,
 226                                                           bool during_pause) {
 227   assert(!during_pause || stop_at == 0, "Should not leave any completed buffers during a pause");
 228   BufferNode* nd = get_completed_buffer(stop_at);
 229   if (nd == NULL) {
 230     return false;
 231   } else {
 232     if (apply_closure_to_buffer(cl, nd, true, worker_i)) {
 233       assert_fully_consumed(nd, buffer_size());
 234       // Done with fully processed buffer.
 235       deallocate_buffer(nd);
 236       Atomic::inc(&_processed_buffers_rs_thread);
 237     } else {
 238       // Return partially processed buffer to the queue.
 239       guarantee(!during_pause, "Should never stop early");
 240       enqueue_complete_buffer(nd);
 241     }
 242     return true;
 243   }
 244 }
 245 
 246 void DirtyCardQueueSet::par_apply_closure_to_all_completed_buffers(CardTableEntryClosure* cl) {
 247   BufferNode* nd = _cur_par_buffer_node;
 248   while (nd != NULL) {
 249     BufferNode* next = nd->next();
 250     void* actual = Atomic::cmpxchg_ptr(next, &_cur_par_buffer_node, nd);
 251     if (actual == nd) {
 252       bool b = apply_closure_to_buffer(cl, nd, false);
 253       guarantee(b, "Should not stop early.");


< prev index next >