< prev index next >

src/hotspot/share/gc/g1/g1DirtyCardQueue.cpp

Print this page
rev 56151 : imported patch take_buffers
rev 56154 : imported patch refine_buffer


  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/g1BufferNodeList.hpp"
  27 #include "gc/g1/g1CardTableEntryClosure.hpp"
  28 #include "gc/g1/g1CollectedHeap.inline.hpp"
  29 #include "gc/g1/g1DirtyCardQueue.hpp"
  30 #include "gc/g1/g1FreeIdSet.hpp"
  31 #include "gc/g1/g1RedirtyCardsQueue.hpp"
  32 #include "gc/g1/g1RemSet.hpp"
  33 #include "gc/g1/g1ThreadLocalData.hpp"
  34 #include "gc/g1/heapRegionRemSet.hpp"
  35 #include "gc/shared/suspendibleThreadSet.hpp"
  36 #include "gc/shared/workgroup.hpp"
  37 #include "runtime/atomic.hpp"
  38 #include "runtime/flags/flagSetting.hpp"
  39 #include "runtime/mutexLocker.hpp"
  40 #include "runtime/safepoint.hpp"
  41 #include "runtime/thread.inline.hpp"
  42 #include "runtime/threadSMR.hpp"
  43 
  44 // Closure used for updating remembered sets and recording references that
  45 // point into the collection set while the mutator is running.
  46 // Assumed to be only executed concurrently with the mutator. Yields via
  47 // SuspendibleThreadSet after every card.
  48 class G1RefineCardConcurrentlyClosure: public G1CardTableEntryClosure {
  49 public:
  50   bool do_card_ptr(CardValue* card_ptr, uint worker_i) {
  51     G1CollectedHeap::heap()->rem_set()->refine_card_concurrently(card_ptr, worker_i);
  52 
  53     if (SuspendibleThreadSet::should_yield()) {
  54       // Caller will actually yield.
  55       return false;
  56     }
  57     // Otherwise, we finished successfully; return true.
  58     return true;
  59   }
  60 };
  61 
  62 G1DirtyCardQueue::G1DirtyCardQueue(G1DirtyCardQueueSet* qset) :
  63   // Dirty card queues are always active, so we create them with their
  64   // active field set to true.
  65   PtrQueue(qset, true /* active */)
  66 { }
  67 
  68 G1DirtyCardQueue::~G1DirtyCardQueue() {
  69   flush();
  70 }
  71 
  72 void G1DirtyCardQueue::handle_completed_buffer() {
  73   assert(_buf != NULL, "precondition");
  74   BufferNode* node = BufferNode::make_node_from_buffer(_buf, index());
  75   G1DirtyCardQueueSet* dcqs = dirty_card_qset();
  76   if (dcqs->process_or_enqueue_completed_buffer(node)) {
  77     reset();                    // Buffer fully processed, reset index.
  78   } else {
  79     allocate_buffer();          // Buffer enqueued, get a new one.
  80   }
  81 }


 211   if (from._head == NULL) return;
 212 
 213   MutexLocker x(_cbl_mon, Mutex::_no_safepoint_check_flag);
 214   if (_completed_buffers_tail == NULL) {
 215     assert(_completed_buffers_head == NULL, "Well-formedness");
 216     _completed_buffers_head = from._head;
 217     _completed_buffers_tail = from._tail;
 218   } else {
 219     assert(_completed_buffers_head != NULL, "Well formedness");
 220     _completed_buffers_tail->set_next(from._head);
 221     _completed_buffers_tail = from._tail;
 222   }
 223   _num_cards += from._entry_count;
 224 
 225   assert(_completed_buffers_head == NULL && _completed_buffers_tail == NULL ||
 226          _completed_buffers_head != NULL && _completed_buffers_tail != NULL,
 227          "Sanity");
 228   verify_num_cards();
 229 }
 230 
 231 bool G1DirtyCardQueueSet::apply_closure_to_buffer(G1CardTableEntryClosure* cl,
 232                                                   BufferNode* node,
 233                                                   uint worker_i) {
 234   if (cl == NULL) return true;
 235   bool result = true;
 236   void** buf = BufferNode::make_buffer_from_node(node);







 237   size_t i = node->index();
 238   size_t limit = buffer_size();
 239   for ( ; i < limit; ++i) {
 240     CardTable::CardValue* card_ptr = static_cast<CardTable::CardValue*>(buf[i]);
 241     assert(card_ptr != NULL, "invariant");
 242     if (!cl->do_card_ptr(card_ptr, worker_i)) {
 243       result = false;           // Incomplete processing.
 244       break;
 245     }
 246   }
 247   assert(i <= buffer_size(), "invariant");
 248   node->set_index(i);
 249   return result;
 250 }
 251 
 252 #ifndef ASSERT
 253 #define assert_fully_consumed(node, buffer_size)
 254 #else
 255 #define assert_fully_consumed(node, buffer_size)                \
 256   do {                                                          \
 257     size_t _afc_index = (node)->index();                        \
 258     size_t _afc_size = (buffer_size);                           \
 259     assert(_afc_index == _afc_size,                             \
 260            "Buffer was not fully consumed as claimed: index: "  \
 261            SIZE_FORMAT ", size: " SIZE_FORMAT,                  \
 262             _afc_index, _afc_size);                             \
 263   } while (0)
 264 #endif // ASSERT
 265 
 266 bool G1DirtyCardQueueSet::process_or_enqueue_completed_buffer(BufferNode* node) {
 267   if (Thread::current()->is_Java_thread()) {
 268     // If the number of buffers exceeds the limit, make this Java
 269     // thread do the processing itself.  We don't lock to access
 270     // buffer count or padding; it is fine to be imprecise here.  The
 271     // add of padding could overflow, which is treated as unlimited.
 272     size_t limit = max_cards() + max_cards_padding();
 273     if ((num_cards() > limit) && (limit >= max_cards())) {
 274       if (mut_process_buffer(node)) {
 275         return true;
 276       }
 277     }
 278   }
 279   enqueue_completed_buffer(node);
 280   return false;
 281 }
 282 
 283 bool G1DirtyCardQueueSet::mut_process_buffer(BufferNode* node) {
 284   uint worker_id = _free_ids.claim_par_id(); // temporarily claim an id
 285   G1RefineCardConcurrentlyClosure cl;
 286   bool result = apply_closure_to_buffer(&cl, node, worker_id);
 287   _free_ids.release_par_id(worker_id); // release the id
 288 
 289   if (result) {
 290     assert_fully_consumed(node, buffer_size());
 291     Atomic::inc(&_processed_buffers_mut);
 292   }
 293   return result;
 294 }
 295 
 296 bool G1DirtyCardQueueSet::refine_completed_buffer_concurrently(uint worker_i, size_t stop_at) {
 297   G1RefineCardConcurrentlyClosure cl;
 298   return apply_closure_to_completed_buffer(&cl, worker_i, stop_at, false);
 299 }
 300 
 301 bool G1DirtyCardQueueSet::apply_closure_during_gc(G1CardTableEntryClosure* cl, uint worker_i) {
 302   assert_at_safepoint();
 303   return apply_closure_to_completed_buffer(cl, worker_i, 0, true);
 304 }
 305 
 306 bool G1DirtyCardQueueSet::apply_closure_to_completed_buffer(G1CardTableEntryClosure* cl,
 307                                                             uint worker_i,
 308                                                             size_t stop_at,
 309                                                             bool during_pause) {
 310   assert(!during_pause || stop_at == 0, "Should not leave any completed buffers during a pause");
 311   BufferNode* nd = get_completed_buffer(stop_at);
 312   if (nd == NULL) {
 313     return false;
 314   } else {
 315     if (apply_closure_to_buffer(cl, nd, worker_i)) {
 316       assert_fully_consumed(nd, buffer_size());
 317       // Done with fully processed buffer.
 318       deallocate_buffer(nd);
 319       Atomic::inc(&_processed_buffers_rs_thread);

 320     } else {
 321       // Return partially processed buffer to the queue.
 322       guarantee(!during_pause, "Should never stop early");
 323       enqueue_completed_buffer(nd);
 324     }
 325     return true;
 326   }
 327 }
 328 
 329 void G1DirtyCardQueueSet::abandon_logs() {
 330   assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
 331   abandon_completed_buffers();
 332 
 333   // Since abandon is done only at safepoints, we can safely manipulate
 334   // these queues.
 335   struct AbandonThreadLogClosure : public ThreadClosure {
 336     virtual void do_thread(Thread* t) {
 337       G1ThreadLocalData::dirty_card_queue(t).reset();
 338     }
 339   } closure;
 340   Threads::threads_do(&closure);
 341 
 342   G1BarrierSet::shared_dirty_card_queue().reset();
 343 }
 344 




  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/g1BufferNodeList.hpp"
  27 #include "gc/g1/g1CardTableEntryClosure.hpp"
  28 #include "gc/g1/g1CollectedHeap.inline.hpp"
  29 #include "gc/g1/g1DirtyCardQueue.hpp"
  30 #include "gc/g1/g1FreeIdSet.hpp"
  31 #include "gc/g1/g1RedirtyCardsQueue.hpp"
  32 #include "gc/g1/g1RemSet.hpp"
  33 #include "gc/g1/g1ThreadLocalData.hpp"
  34 #include "gc/g1/heapRegionRemSet.hpp"
  35 #include "gc/shared/suspendibleThreadSet.hpp"
  36 #include "gc/shared/workgroup.hpp"
  37 #include "runtime/atomic.hpp"
  38 #include "runtime/flags/flagSetting.hpp"
  39 #include "runtime/mutexLocker.hpp"
  40 #include "runtime/safepoint.hpp"
  41 #include "runtime/thread.inline.hpp"
  42 #include "runtime/threadSMR.hpp"
  43 


















  44 G1DirtyCardQueue::G1DirtyCardQueue(G1DirtyCardQueueSet* qset) :
  45   // Dirty card queues are always active, so we create them with their
  46   // active field set to true.
  47   PtrQueue(qset, true /* active */)
  48 { }
  49 
  50 G1DirtyCardQueue::~G1DirtyCardQueue() {
  51   flush();
  52 }
  53 
  54 void G1DirtyCardQueue::handle_completed_buffer() {
  55   assert(_buf != NULL, "precondition");
  56   BufferNode* node = BufferNode::make_node_from_buffer(_buf, index());
  57   G1DirtyCardQueueSet* dcqs = dirty_card_qset();
  58   if (dcqs->process_or_enqueue_completed_buffer(node)) {
  59     reset();                    // Buffer fully processed, reset index.
  60   } else {
  61     allocate_buffer();          // Buffer enqueued, get a new one.
  62   }
  63 }


 193   if (from._head == NULL) return;
 194 
 195   MutexLocker x(_cbl_mon, Mutex::_no_safepoint_check_flag);
 196   if (_completed_buffers_tail == NULL) {
 197     assert(_completed_buffers_head == NULL, "Well-formedness");
 198     _completed_buffers_head = from._head;
 199     _completed_buffers_tail = from._tail;
 200   } else {
 201     assert(_completed_buffers_head != NULL, "Well formedness");
 202     _completed_buffers_tail->set_next(from._head);
 203     _completed_buffers_tail = from._tail;
 204   }
 205   _num_cards += from._entry_count;
 206 
 207   assert(_completed_buffers_head == NULL && _completed_buffers_tail == NULL ||
 208          _completed_buffers_head != NULL && _completed_buffers_tail != NULL,
 209          "Sanity");
 210   verify_num_cards();
 211 }
 212 
 213 G1BufferNodeList G1DirtyCardQueueSet::take_all_completed_buffers() {
 214   MutexLocker x(_cbl_mon, Mutex::_no_safepoint_check_flag);
 215   G1BufferNodeList result(_completed_buffers_head, _completed_buffers_tail, _num_cards);
 216   _completed_buffers_head = NULL;
 217   _completed_buffers_tail = NULL;
 218   _num_cards = 0;
 219   return result;
 220 }
 221 
 222 bool G1DirtyCardQueueSet::refine_buffer(BufferNode* node, uint worker_id) {
 223   G1RemSet* rem_set = G1CollectedHeap::heap()->rem_set();
 224   size_t size = buffer_size();
 225   void** buffer = BufferNode::make_buffer_from_node(node);
 226   size_t i = node->index();
 227   assert(i <= size, "invariant");
 228   for ( ; (i < size) && !SuspendibleThreadSet::should_yield(); ++i) {
 229     CardTable::CardValue* cp = static_cast<CardTable::CardValue*>(buffer[i]);
 230     rem_set->refine_card_concurrently(cp, worker_id);



 231   }


 232   node->set_index(i);
 233   return i == size;
 234 }
 235 
 236 #ifndef ASSERT
 237 #define assert_fully_consumed(node, buffer_size)
 238 #else
 239 #define assert_fully_consumed(node, buffer_size)                \
 240   do {                                                          \
 241     size_t _afc_index = (node)->index();                        \
 242     size_t _afc_size = (buffer_size);                           \
 243     assert(_afc_index == _afc_size,                             \
 244            "Buffer was not fully consumed as claimed: index: "  \
 245            SIZE_FORMAT ", size: " SIZE_FORMAT,                  \
 246             _afc_index, _afc_size);                             \
 247   } while (0)
 248 #endif // ASSERT
 249 
 250 bool G1DirtyCardQueueSet::process_or_enqueue_completed_buffer(BufferNode* node) {
 251   if (Thread::current()->is_Java_thread()) {
 252     // If the number of buffers exceeds the limit, make this Java
 253     // thread do the processing itself.  We don't lock to access
 254     // buffer count or padding; it is fine to be imprecise here.  The
 255     // add of padding could overflow, which is treated as unlimited.
 256     size_t limit = max_cards() + max_cards_padding();
 257     if ((num_cards() > limit) && (limit >= max_cards())) {
 258       if (mut_process_buffer(node)) {
 259         return true;
 260       }
 261     }
 262   }
 263   enqueue_completed_buffer(node);
 264   return false;
 265 }
 266 
 267 bool G1DirtyCardQueueSet::mut_process_buffer(BufferNode* node) {
 268   uint worker_id = _free_ids.claim_par_id(); // temporarily claim an id
 269   bool result = refine_buffer(node, worker_id);

 270   _free_ids.release_par_id(worker_id); // release the id
 271 
 272   if (result) {
 273     assert_fully_consumed(node, buffer_size());
 274     Atomic::inc(&_processed_buffers_mut);
 275   }
 276   return result;
 277 }
 278 
 279 bool G1DirtyCardQueueSet::refine_completed_buffer_concurrently(uint worker_id, size_t stop_at) {
 280   BufferNode* node = get_completed_buffer(stop_at);
 281   if (node == NULL) {














 282     return false;
 283   } else if (refine_buffer(node, worker_id)) {
 284     assert_fully_consumed(node, buffer_size());

 285     // Done with fully processed buffer.
 286     deallocate_buffer(node);
 287     Atomic::inc(&_processed_buffers_rs_thread);
 288     return true;
 289   } else {
 290     // Return partially processed buffer to the queue.
 291     enqueue_completed_buffer(node);


 292     return true;
 293   }
 294 }
 295 
 296 void G1DirtyCardQueueSet::abandon_logs() {
 297   assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
 298   abandon_completed_buffers();
 299 
 300   // Since abandon is done only at safepoints, we can safely manipulate
 301   // these queues.
 302   struct AbandonThreadLogClosure : public ThreadClosure {
 303     virtual void do_thread(Thread* t) {
 304       G1ThreadLocalData::dirty_card_queue(t).reset();
 305     }
 306   } closure;
 307   Threads::threads_do(&closure);
 308 
 309   G1BarrierSet::shared_dirty_card_queue().reset();
 310 }
 311 


< prev index next >