1 /* 2 * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_SHARED_CARDTABLEMODREFBSFORCTRS_HPP 26 #define SHARE_VM_GC_SHARED_CARDTABLEMODREFBSFORCTRS_HPP 27 28 #include "gc/shared/cardTableModRefBS.hpp" 29 30 class CardTableRS; 31 class DirtyCardToOopClosure; 32 class OopsInGenClosure; 33 34 // A specialization for the CardTableRS gen rem set. 35 class CardTableModRefBSForCTRS: public CardTableModRefBS { 36 friend class CardTableRS; 37 38 public: 39 CardTableModRefBSForCTRS(MemRegion whole_heap); 40 ~CardTableModRefBSForCTRS(); 41 42 virtual void initialize(); 43 44 void set_CTRS(CardTableRS* rs) { _rs = rs; } 45 46 virtual bool card_mark_must_follow_store() const { 47 return UseConcMarkSweepGC; 48 } 49 50 virtual bool is_in_young(oop obj) const; 51 52 private: 53 CardTableRS* _rs; 54 55 // *** Support for parallel card scanning. 56 57 // dirty and precleaned are equivalent wrt younger_refs_iter. 58 static bool card_is_dirty_wrt_gen_iter(jbyte cv) { 59 return cv == dirty_card || cv == precleaned_card; 60 } 61 62 // Returns "true" iff the value "cv" will cause the card containing it 63 // to be scanned in the current traversal. May be overridden by 64 // subtypes. 65 bool card_will_be_scanned(jbyte cv); 66 67 // Returns "true" iff the value "cv" may have represented a dirty card at 68 // some point. 69 bool card_may_have_been_dirty(jbyte cv); 70 71 // Iterate over the portion of the card-table which covers the given 72 // region mr in the given space and apply cl to any dirty sub-regions 73 // of mr. Clears the dirty cards as they are processed. 74 void non_clean_card_iterate_possibly_parallel(Space* sp, MemRegion mr, 75 OopsInGenClosure* cl, CardTableRS* ct, 76 uint n_threads); 77 78 // Work method used to implement non_clean_card_iterate_possibly_parallel() 79 // above in the parallel case. 80 void non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr, 81 OopsInGenClosure* cl, CardTableRS* ct, 82 uint n_threads); 83 84 // This is an array, one element per covered region of the card table. 85 // Each entry is itself an array, with one element per chunk in the 86 // covered region. Each entry of these arrays is the lowest non-clean 87 // card of the corresponding chunk containing part of an object from the 88 // previous chunk, or else NULL. 89 typedef jbyte* CardPtr; 90 typedef CardPtr* CardArr; 91 CardArr* _lowest_non_clean; 92 size_t* _lowest_non_clean_chunk_size; 93 uintptr_t* _lowest_non_clean_base_chunk_index; 94 volatile int* _last_LNC_resizing_collection; 95 96 // Initializes "lowest_non_clean" to point to the array for the region 97 // covering "sp", and "lowest_non_clean_base_chunk_index" to the chunk 98 // index of the corresponding to the first element of that array. 99 // Ensures that these arrays are of sufficient size, allocating if necessary. 100 // May be called by several threads concurrently. 101 void get_LNC_array_for_space(Space* sp, 102 jbyte**& lowest_non_clean, 103 uintptr_t& lowest_non_clean_base_chunk_index, 104 size_t& lowest_non_clean_chunk_size); 105 106 // Returns the number of chunks necessary to cover "mr". 107 size_t chunks_to_cover(MemRegion mr) { 108 return (size_t)(addr_to_chunk_index(mr.last()) - 109 addr_to_chunk_index(mr.start()) + 1); 110 } 111 112 // Returns the index of the chunk in a stride which 113 // covers the given address. 114 uintptr_t addr_to_chunk_index(const void* addr) { 115 uintptr_t card = (uintptr_t) byte_for(addr); 116 return card / ParGCCardsPerStrideChunk; 117 } 118 119 // Apply cl, which must either itself apply dcto_cl or be dcto_cl, 120 // to the cards in the stride (of n_strides) within the given space. 121 void process_stride(Space* sp, 122 MemRegion used, 123 jint stride, int n_strides, 124 OopsInGenClosure* cl, 125 CardTableRS* ct, 126 jbyte** lowest_non_clean, 127 uintptr_t lowest_non_clean_base_chunk_index, 128 size_t lowest_non_clean_chunk_size); 129 130 // Makes sure that chunk boundaries are handled appropriately, by 131 // adjusting the min_done of dcto_cl, and by using a special card-table 132 // value to indicate how min_done should be set. 133 void process_chunk_boundaries(Space* sp, 134 DirtyCardToOopClosure* dcto_cl, 135 MemRegion chunk_mr, 136 MemRegion used, 137 jbyte** lowest_non_clean, 138 uintptr_t lowest_non_clean_base_chunk_index, 139 size_t lowest_non_clean_chunk_size); 140 141 }; 142 143 template<> 144 struct BarrierSet::GetName<CardTableModRefBSForCTRS> { 145 static const BarrierSet::Name value = BarrierSet::CardTableForRS; 146 }; 147 148 template<> 149 struct BarrierSet::GetType<BarrierSet::CardTableForRS> { 150 typedef CardTableModRefBSForCTRS type; 151 }; 152 153 #endif // SHARE_VM_GC_SHARED_CARDTABLEMODREFBSFORCTRS_HPP