< prev index next >

src/hotspot/share/gc/g1/g1DirtyCardQueue.cpp

Print this page
rev 53864 : imported patch queue_access
rev 53865 : imported patch njt_iterate


  82 G1DirtyCardQueueSet::~G1DirtyCardQueueSet() {
  83   delete _free_ids;
  84 }
  85 
  86 // Determines how many mutator threads can process the buffers in parallel.
  87 uint G1DirtyCardQueueSet::num_par_ids() {
  88   return (uint)os::initial_active_processor_count();
  89 }
  90 
  91 void G1DirtyCardQueueSet::initialize(Monitor* cbl_mon,
  92                                      BufferNode::Allocator* allocator,
  93                                      Mutex* lock,
  94                                      bool init_free_ids) {
  95   PtrQueueSet::initialize(cbl_mon, allocator);
  96   _shared_dirty_card_queue.set_lock(lock);
  97   if (init_free_ids) {
  98     _free_ids = new G1FreeIdSet(0, num_par_ids());
  99   }
 100 }
 101 
 102 void G1DirtyCardQueueSet::handle_zero_index_for_thread(JavaThread* t) {
 103   G1ThreadLocalData::dirty_card_queue(t).handle_zero_index();
 104 }
 105 
 106 bool G1DirtyCardQueueSet::apply_closure_to_buffer(G1CardTableEntryClosure* cl,
 107                                                   BufferNode* node,
 108                                                   bool consume,
 109                                                   uint worker_i) {
 110   if (cl == NULL) return true;
 111   bool result = true;
 112   void** buf = BufferNode::make_buffer_from_node(node);
 113   size_t i = node->index();
 114   size_t limit = buffer_size();
 115   for ( ; i < limit; ++i) {
 116     jbyte* card_ptr = static_cast<jbyte*>(buf[i]);
 117     assert(card_ptr != NULL, "invariant");
 118     if (!cl->do_card_ptr(card_ptr, worker_i)) {
 119       result = false;           // Incomplete processing.
 120       break;
 121     }
 122   }


 190 }
 191 
 192 void G1DirtyCardQueueSet::par_apply_closure_to_all_completed_buffers(G1CardTableEntryClosure* cl) {
 193   BufferNode* nd = _cur_par_buffer_node;
 194   while (nd != NULL) {
 195     BufferNode* next = nd->next();
 196     BufferNode* actual = Atomic::cmpxchg(next, &_cur_par_buffer_node, nd);
 197     if (actual == nd) {
 198       bool b = apply_closure_to_buffer(cl, nd, false);
 199       guarantee(b, "Should not stop early.");
 200       nd = next;
 201     } else {
 202       nd = actual;
 203     }
 204   }
 205 }
 206 
 207 void G1DirtyCardQueueSet::abandon_logs() {
 208   assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
 209   abandon_completed_buffers();

 210   // Since abandon is done only at safepoints, we can safely manipulate
 211   // these queues.
 212   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {

 213     G1ThreadLocalData::dirty_card_queue(t).reset();
 214   }



 215   shared_dirty_card_queue()->reset();
 216 }
 217 
 218 void G1DirtyCardQueueSet::concatenate_log(G1DirtyCardQueue& dcq) {
 219   if (!dcq.is_empty()) {
 220     dcq.flush();
 221   }
 222 }
 223 
 224 void G1DirtyCardQueueSet::concatenate_logs() {
 225   // Iterate over all the threads, if we find a partial log add it to
 226   // the global list of logs.  Temporarily turn off the limit on the number
 227   // of outstanding buffers.
 228   assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
 229   size_t old_limit = max_completed_buffers();
 230   set_max_completed_buffers(MaxCompletedBuffersUnlimited);
 231   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
 232     concatenate_log(G1ThreadLocalData::dirty_card_queue(t));





 233   }



 234   concatenate_log(_shared_dirty_card_queue);
 235   set_max_completed_buffers(old_limit);
 236 }


  82 G1DirtyCardQueueSet::~G1DirtyCardQueueSet() {
  83   delete _free_ids;
  84 }
  85 
  86 // Determines how many mutator threads can process the buffers in parallel.
  87 uint G1DirtyCardQueueSet::num_par_ids() {
  88   return (uint)os::initial_active_processor_count();
  89 }
  90 
  91 void G1DirtyCardQueueSet::initialize(Monitor* cbl_mon,
  92                                      BufferNode::Allocator* allocator,
  93                                      Mutex* lock,
  94                                      bool init_free_ids) {
  95   PtrQueueSet::initialize(cbl_mon, allocator);
  96   _shared_dirty_card_queue.set_lock(lock);
  97   if (init_free_ids) {
  98     _free_ids = new G1FreeIdSet(0, num_par_ids());
  99   }
 100 }
 101 
 102 void G1DirtyCardQueueSet::handle_zero_index_for_thread(Thread* t) {
 103   G1ThreadLocalData::dirty_card_queue(t).handle_zero_index();
 104 }
 105 
 106 bool G1DirtyCardQueueSet::apply_closure_to_buffer(G1CardTableEntryClosure* cl,
 107                                                   BufferNode* node,
 108                                                   bool consume,
 109                                                   uint worker_i) {
 110   if (cl == NULL) return true;
 111   bool result = true;
 112   void** buf = BufferNode::make_buffer_from_node(node);
 113   size_t i = node->index();
 114   size_t limit = buffer_size();
 115   for ( ; i < limit; ++i) {
 116     jbyte* card_ptr = static_cast<jbyte*>(buf[i]);
 117     assert(card_ptr != NULL, "invariant");
 118     if (!cl->do_card_ptr(card_ptr, worker_i)) {
 119       result = false;           // Incomplete processing.
 120       break;
 121     }
 122   }


 190 }
 191 
 192 void G1DirtyCardQueueSet::par_apply_closure_to_all_completed_buffers(G1CardTableEntryClosure* cl) {
 193   BufferNode* nd = _cur_par_buffer_node;
 194   while (nd != NULL) {
 195     BufferNode* next = nd->next();
 196     BufferNode* actual = Atomic::cmpxchg(next, &_cur_par_buffer_node, nd);
 197     if (actual == nd) {
 198       bool b = apply_closure_to_buffer(cl, nd, false);
 199       guarantee(b, "Should not stop early.");
 200       nd = next;
 201     } else {
 202       nd = actual;
 203     }
 204   }
 205 }
 206 
 207 void G1DirtyCardQueueSet::abandon_logs() {
 208   assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
 209   abandon_completed_buffers();
 210 
 211   // Since abandon is done only at safepoints, we can safely manipulate
 212   // these queues.
 213   struct AbandonThreadLogClosure : public ThreadClosure {
 214     virtual void do_thread(Thread* t) {
 215       G1ThreadLocalData::dirty_card_queue(t).reset();
 216     }
 217   } closure;
 218   Threads::threads_do(&closure);
 219 
 220   shared_dirty_card_queue()->reset();
 221 }
 222 
 223 void G1DirtyCardQueueSet::concatenate_log(G1DirtyCardQueue& dcq) {
 224   if (!dcq.is_empty()) {
 225     dcq.flush();
 226   }
 227 }
 228 
 229 void G1DirtyCardQueueSet::concatenate_logs() {
 230   // Iterate over all the threads, if we find a partial log add it to
 231   // the global list of logs.  Temporarily turn off the limit on the number
 232   // of outstanding buffers.
 233   assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
 234   size_t old_limit = max_completed_buffers();
 235   set_max_completed_buffers(MaxCompletedBuffersUnlimited);
 236 
 237   class ConcatenateThreadLogClosure : public ThreadClosure {
 238     G1DirtyCardQueueSet* _qset;
 239   public:
 240     ConcatenateThreadLogClosure(G1DirtyCardQueueSet* qset) : _qset(qset) {}
 241     virtual void do_thread(Thread* t) {
 242       _qset->concatenate_log(G1ThreadLocalData::dirty_card_queue(t));
 243     }
 244   } closure(this);
 245   Threads::threads_do(&closure);
 246 
 247   concatenate_log(_shared_dirty_card_queue);
 248   set_max_completed_buffers(old_limit);
 249 }
< prev index next >