src/share/vm/gc_implementation/g1/ptrQueue.hpp

Print this page
rev 4974 : imported patch conditional-storeload-young.diff


  63   // If there is a lock associated with this buffer, this is that lock.
  64   Mutex* _lock;
  65 
  66   PtrQueueSet* qset() { return _qset; }
  67 
  68 public:
  69   // Initialize this queue to contain a null buffer, and be part of the
  70   // given PtrQueueSet.
  71   PtrQueue(PtrQueueSet* qset, bool perm = false, bool active = false);
  72   // Release any contained resources.
  73   virtual void flush();
  74   // Calls flush() when destroyed.
  75   ~PtrQueue() { flush(); }
  76 
  77   // Associate a lock with a ptr queue.
  78   void set_lock(Mutex* lock) { _lock = lock; }
  79 
  80   void reset() { if (_buf != NULL) _index = _sz; }
  81 
  82   // Enqueues the given "obj".




  83   void enqueue(void* ptr) {
  84     if (!_active) return;
  85     else enqueue_known_active(ptr);
  86   }
  87 
  88   // This method is called when we're doing the zero index handling
  89   // and gives a chance to the queues to do any pre-enqueueing
  90   // processing they might want to do on the buffer. It should return
  91   // true if the buffer should be enqueued, or false if enough
  92   // entries were cleared from it so that it can be re-used. It should
  93   // not return false if the buffer is still full (otherwise we can
  94   // get into an infinite loop).
  95   virtual bool should_enqueue_buffer() { return true; }
  96   void handle_zero_index();
  97   void locking_enqueue_completed_buffer(void** buf);
  98 
  99   void enqueue_known_active(void* ptr);
 100 
 101   size_t size() {
 102     assert(_sz >= _index, "Invariant.");




  63   // If there is a lock associated with this buffer, this is that lock.
  64   Mutex* _lock;
  65 
  66   PtrQueueSet* qset() { return _qset; }
  67 
  68 public:
  69   // Initialize this queue to contain a null buffer, and be part of the
  70   // given PtrQueueSet.
  71   PtrQueue(PtrQueueSet* qset, bool perm = false, bool active = false);
  72   // Release any contained resources.
  73   virtual void flush();
  74   // Calls flush() when destroyed.
  75   ~PtrQueue() { flush(); }
  76 
  77   // Associate a lock with a ptr queue.
  78   void set_lock(Mutex* lock) { _lock = lock; }
  79 
  80   void reset() { if (_buf != NULL) _index = _sz; }
  81 
  82   // Enqueues the given "obj".
  83   void enqueue(volatile void* ptr) {
  84     enqueue(const_cast<void*>(ptr));
  85   }
  86 
  87   void enqueue(void* ptr) {
  88     if (!_active) return;
  89     else enqueue_known_active(ptr);
  90   }
  91 
  92   // This method is called when we're doing the zero index handling
  93   // and gives a chance to the queues to do any pre-enqueueing
  94   // processing they might want to do on the buffer. It should return
  95   // true if the buffer should be enqueued, or false if enough
  96   // entries were cleared from it so that it can be re-used. It should
  97   // not return false if the buffer is still full (otherwise we can
  98   // get into an infinite loop).
  99   virtual bool should_enqueue_buffer() { return true; }
 100   void handle_zero_index();
 101   void locking_enqueue_completed_buffer(void** buf);
 102 
 103   void enqueue_known_active(void* ptr);
 104 
 105   size_t size() {
 106     assert(_sz >= _index, "Invariant.");