1 /*
   2  * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_GC_G1_G1DIRTYCARDQUEUE_HPP
  26 #define SHARE_GC_G1_G1DIRTYCARDQUEUE_HPP
  27 
  28 #include "gc/g1/g1FreeIdSet.hpp"
  29 #include "gc/shared/ptrQueue.hpp"
  30 #include "memory/allocation.hpp"
  31 
  32 class G1CardTableEntryClosure;
  33 class G1DirtyCardQueueSet;
  34 class G1RedirtyCardsQueueSet;
  35 class Thread;
  36 class Monitor;
  37 
  38 // A ptrQueue whose elements are "oops", pointers to object heads.
  39 class G1DirtyCardQueue: public PtrQueue {
  40 protected:
  41   virtual void handle_completed_buffer();
  42 
  43 public:
  44   G1DirtyCardQueue(G1DirtyCardQueueSet* qset);
  45 
  46   // Flush before destroying; queue may be used to capture pending work while
  47   // doing something else, with auto-flush on completion.
  48   ~G1DirtyCardQueue();
  49 
  50   // Process queue entries and release resources.
  51   void flush() { flush_impl(); }
  52 
  53   inline G1DirtyCardQueueSet* dirty_card_qset() const;
  54 
  55   // Compiler support.
  56   static ByteSize byte_offset_of_index() {
  57     return PtrQueue::byte_offset_of_index<G1DirtyCardQueue>();
  58   }
  59   using PtrQueue::byte_width_of_index;
  60 
  61   static ByteSize byte_offset_of_buf() {
  62     return PtrQueue::byte_offset_of_buf<G1DirtyCardQueue>();
  63   }
  64   using PtrQueue::byte_width_of_buf;
  65 
  66 };
  67 
  68 class G1DirtyCardQueueSet: public PtrQueueSet {
  69   Monitor* _cbl_mon;  // Protects the list and count members.
  70   BufferNode* _completed_buffers_head;
  71   BufferNode* _completed_buffers_tail;
  72 
  73   // Number of actual cards in the list of completed buffers.
  74   volatile size_t _num_cards;
  75 
  76   size_t _process_cards_threshold;
  77   volatile bool _process_completed_buffers;
  78 
  79   void abandon_completed_buffers();
  80 
  81   // Apply the closure to the elements of "node" from it's index to
  82   // buffer_size.  If all closure applications return true, then
  83   // returns true.  Stops processing after the first closure
  84   // application that returns false, and returns false from this
  85   // function.  The node's index is updated to exclude the processed
  86   // elements, e.g. up to the element for which the closure returned
  87   // false, or one past the last element if the closure always
  88   // returned true.
  89   bool apply_closure_to_buffer(G1CardTableEntryClosure* cl,
  90                                BufferNode* node,
  91                                uint worker_i = 0);
  92 
  93   // If there are more than stop_at completed buffers, pop one, apply
  94   // the specified closure to its active elements, and return true.
  95   // Otherwise return false.
  96   //
  97   // A completely processed buffer is freed.  However, if a closure
  98   // invocation returns false, processing is stopped and the partially
  99   // processed buffer (with its index updated to exclude the processed
 100   // elements, e.g. up to the element for which the closure returned
 101   // false) is returned to the completed buffer set.
 102   //
 103   // If during_pause is true, stop_at must be zero, and the closure
 104   // must never return false.
 105   bool apply_closure_to_completed_buffer(G1CardTableEntryClosure* cl,
 106                                          uint worker_i,
 107                                          size_t stop_at,
 108                                          bool during_pause);
 109 
 110   bool mut_process_buffer(BufferNode* node);
 111 
 112   // If the queue contains more cards than configured here, the
 113   // mutator must start doing some of the concurrent refinement work.
 114   size_t _max_cards;
 115   size_t _max_cards_padding;
 116   static const size_t MaxCardsUnlimited = SIZE_MAX;
 117 
 118   G1FreeIdSet _free_ids;
 119 
 120   // The number of completed buffers processed by mutator and rs thread,
 121   // respectively.
 122   jint _processed_buffers_mut;
 123   jint _processed_buffers_rs_thread;
 124 
 125 public:
 126   G1DirtyCardQueueSet(Monitor* cbl_mon, BufferNode::Allocator* allocator);
 127   ~G1DirtyCardQueueSet();
 128 
 129   // The number of parallel ids that can be claimed to allow collector or
 130   // mutator threads to do card-processing work.
 131   static uint num_par_ids();
 132 
 133   static void handle_zero_index_for_thread(Thread* t);
 134 
 135   // Either process the entire buffer and return true, or enqueue the
 136   // buffer and return false.  If the buffer is completely processed,
 137   // it can be reused in place.
 138   bool process_or_enqueue_completed_buffer(BufferNode* node);
 139 
 140   virtual void enqueue_completed_buffer(BufferNode* node);
 141 
 142   // If the number of completed buffers is > stop_at, then remove and
 143   // return a completed buffer from the list.  Otherwise, return NULL.
 144   BufferNode* get_completed_buffer(size_t stop_at = 0);
 145 
 146   // The number of cards in completed buffers. Read without synchronization.
 147   size_t num_cards() const { return _num_cards; }
 148 
 149   // Verify that _num_cards is equal to the sum of actual cards
 150   // in the completed buffers.
 151   void verify_num_cards() const NOT_DEBUG_RETURN;
 152 
 153   bool process_completed_buffers() { return _process_completed_buffers; }
 154   void set_process_completed_buffers(bool x) { _process_completed_buffers = x; }
 155 
 156   // Get/Set the number of cards that triggers log processing.
 157   // Log processing should be done when the number of cards exceeds the
 158   // threshold.
 159   void set_process_cards_threshold(size_t sz) {
 160     _process_cards_threshold = sz;
 161   }
 162   size_t process_cards_threshold() const {
 163     return _process_cards_threshold;
 164   }
 165   static const size_t ProcessCardsThresholdNever = SIZE_MAX;
 166 
 167   // Notify the consumer if the number of buffers crossed the threshold
 168   void notify_if_necessary();
 169 
 170   void merge_bufferlists(G1RedirtyCardsQueueSet* src);
 171 
 172   // Apply G1RefineCardConcurrentlyClosure to completed buffers until there are stop_at
 173   // completed buffers remaining.
 174   bool refine_completed_buffer_concurrently(uint worker_i, size_t stop_at);
 175 
 176   // Apply the given closure to all completed buffers. The given closure's do_card_ptr
 177   // must never return false. Must only be called during GC.
 178   bool apply_closure_during_gc(G1CardTableEntryClosure* cl, uint worker_i);
 179 
 180   // If a full collection is happening, reset partial logs, and release
 181   // completed ones: the full collection will make them all irrelevant.
 182   void abandon_logs();
 183 
 184   // If any threads have partial logs, add them to the global list of logs.
 185   void concatenate_logs();
 186 
 187   void set_max_cards(size_t m) {
 188     _max_cards = m;
 189   }
 190   size_t max_cards() const {
 191     return _max_cards;
 192   }
 193 
 194   void set_max_cards_padding(size_t padding) {
 195     _max_cards_padding = padding;
 196   }
 197   size_t max_cards_padding() const {
 198     return _max_cards_padding;
 199   }
 200 
 201   jint processed_buffers_mut() {
 202     return _processed_buffers_mut;
 203   }
 204   jint processed_buffers_rs_thread() {
 205     return _processed_buffers_rs_thread;
 206   }
 207 
 208 };
 209 
 210 inline G1DirtyCardQueueSet* G1DirtyCardQueue::dirty_card_qset() const {
 211   return static_cast<G1DirtyCardQueueSet*>(qset());
 212 }
 213 
 214 #endif // SHARE_GC_G1_G1DIRTYCARDQUEUE_HPP