1 /* 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_G1_DIRTYCARDQUEUE_HPP 26 #define SHARE_VM_GC_G1_DIRTYCARDQUEUE_HPP 27 28 #include "gc/g1/ptrQueue.hpp" 29 #include "memory/allocation.hpp" 30 31 class FreeIdSet; 32 33 // A closure class for processing card table entries. Note that we don't 34 // require these closure objects to be stack-allocated. 35 class CardTableEntryClosure: public CHeapObj<mtGC> { 36 public: 37 // Process the card whose card table entry is "card_ptr". If returns 38 // "false", terminate the iteration early. 39 virtual bool do_card_ptr(jbyte* card_ptr, uint worker_i = 0) = 0; 40 }; 41 42 // A ptrQueue whose elements are "oops", pointers to object heads. 43 class DirtyCardQueue: public PtrQueue { 44 public: 45 DirtyCardQueue(PtrQueueSet* qset_, bool perm = false) : 46 // Dirty card queues are always active, so we create them with their 47 // active field set to true. 48 PtrQueue(qset_, perm, true /* active */) { } 49 50 // Flush before destroying; queue may be used to capture pending work while 51 // doing something else, with auto-flush on completion. 52 ~DirtyCardQueue() { if (!is_permanent()) flush(); } 53 54 // Process queue entries and release resources. 55 void flush() { flush_impl(); } 56 57 // Apply the closure to all elements, and reset the index to make the 58 // buffer empty. If a closure application returns "false", return 59 // "false" immediately, halting the iteration. If "consume" is true, 60 // deletes processed entries from logs. 61 bool apply_closure(CardTableEntryClosure* cl, 62 bool consume = true, 63 uint worker_i = 0); 64 65 // Apply the closure to all elements of "buf", down to "index" 66 // (inclusive.) If returns "false", then a closure application returned 67 // "false", and we return immediately. If "consume" is true, entries are 68 // set to NULL as they are processed, so they will not be processed again 69 // later. 70 static bool apply_closure_to_buffer(CardTableEntryClosure* cl, 71 void** buf, size_t index, size_t sz, 72 bool consume = true, 73 uint worker_i = 0); 74 void **get_buf() { return _buf;} 75 void set_buf(void **buf) {_buf = buf;} 76 size_t get_index() { return _index;} 77 void reinitialize() { _buf = 0; _sz = 0; _index = 0;} 78 }; 79 80 81 82 class DirtyCardQueueSet: public PtrQueueSet { 83 DirtyCardQueue _shared_dirty_card_queue; 84 85 // Override. 86 bool mut_process_buffer(void** buf); 87 88 // Protected by the _cbl_mon. 89 FreeIdSet* _free_ids; 90 91 // The number of completed buffers processed by mutator and rs thread, 92 // respectively. 93 jint _processed_buffers_mut; 94 jint _processed_buffers_rs_thread; 95 96 // Current buffer node used for parallel iteration. 97 BufferNode* volatile _cur_par_buffer_node; 98 99 bool _should_do_processing; 100 public: 101 DirtyCardQueueSet(bool notify_when_complete = true); 102 103 void initialize(bool should_do_processing, Monitor* cbl_mon, Mutex* fl_lock, 104 int process_completed_threshold, 105 int max_completed_queue, 106 Mutex* lock, PtrQueueSet* fl_owner = NULL); 107 108 // The number of parallel ids that can be claimed to allow collector or 109 // mutator threads to do card-processing work. 110 static uint num_par_ids(); 111 112 static void handle_zero_index_for_thread(JavaThread* t); 113 114 // Apply the given closure to all entries in all currently-active buffers. 115 // This should only be applied at a safepoint. (Currently must not be called 116 // in parallel; this should change in the future.) If "consume" is true, 117 // processed entries are discarded. 118 void iterate_closure_all_threads(CardTableEntryClosure* cl, 119 bool consume = true, 120 uint worker_i = 0); 121 122 // If there exists some completed buffer, pop it, then apply the 123 // specified closure to all its elements, nulling out those elements 124 // processed. If all elements are processed, returns "true". If no 125 // completed buffers exist, returns false. If a completed buffer exists, 126 // but is only partially completed before a "yield" happens, the 127 // partially completed buffer (with its processed elements set to NULL) 128 // is returned to the completed buffer set, and this call returns false. 129 bool apply_closure_to_completed_buffer(CardTableEntryClosure* cl, 130 uint worker_i = 0, 131 int stop_at = 0, 132 bool during_pause = false); 133 134 // Helper routine for the above. 135 bool apply_closure_to_completed_buffer_helper(CardTableEntryClosure* cl, 136 uint worker_i, 137 BufferNode* nd); 138 139 BufferNode* get_completed_buffer(int stop_at); 140 141 // Applies the current closure to all completed buffers, 142 // non-consumptively. 143 void apply_closure_to_all_completed_buffers(CardTableEntryClosure* cl); 144 145 void reset_for_par_iteration() { _cur_par_buffer_node = _completed_buffers_head; } 146 // Applies the current closure to all completed buffers, non-consumptively. 147 // Parallel version. 148 void par_apply_closure_to_all_completed_buffers(CardTableEntryClosure* cl); 149 150 DirtyCardQueue* shared_dirty_card_queue() { 151 return &_shared_dirty_card_queue; 152 } 153 154 // Deallocate any completed log buffers 155 void clear(); 156 157 // If a full collection is happening, reset partial logs, and ignore 158 // completed ones: the full collection will make them all irrelevant. 159 void abandon_logs(); 160 161 // If any threads have partial logs, add them to the global list of logs. 162 void concatenate_logs(); 163 void clear_n_completed_buffers() { _n_completed_buffers = 0;} 164 165 jint processed_buffers_mut() { 166 return _processed_buffers_mut; 167 } 168 jint processed_buffers_rs_thread() { 169 return _processed_buffers_rs_thread; 170 } 171 172 }; 173 174 #endif // SHARE_VM_GC_G1_DIRTYCARDQUEUE_HPP