1 /* 2 * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_SHARED_CARDTABLEBARRIERSET_HPP 26 #define SHARE_VM_GC_SHARED_CARDTABLEBARRIERSET_HPP 27 28 #include "gc/shared/modRefBarrierSet.hpp" 29 #include "utilities/align.hpp" 30 31 class CardTable; 32 33 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and 34 // enumerate ref fields that have been modified (since the last 35 // enumeration.) 36 37 // As it currently stands, this barrier is *imprecise*: when a ref field in 38 // an object "o" is modified, the card table entry for the card containing 39 // the head of "o" is dirtied, not necessarily the card containing the 40 // modified field itself. For object arrays, however, the barrier *is* 41 // precise; only the card containing the modified element is dirtied. 42 // Closures used to scan dirty cards should take these 43 // considerations into account. 44 45 class CardTableBarrierSet: public ModRefBarrierSet { 46 // Some classes get to look at some private stuff. 47 friend class VMStructs; 48 protected: 49 50 // Used in support of ReduceInitialCardMarks; only consulted if COMPILER2 51 // or INCLUDE_JVMCI is being used 52 bool _defer_initial_card_mark; 53 CardTable* _card_table; 54 55 CardTableBarrierSet(BarrierSetAssembler* barrier_set_assembler, 56 BarrierSetC1* barrier_set_c1, 57 CardTable* card_table, 58 const BarrierSet::FakeRtti& fake_rtti); 59 60 public: 61 CardTableBarrierSet(CardTable* card_table); 62 ~CardTableBarrierSet(); 63 64 CardTable* card_table() const { return _card_table; } 65 66 virtual void initialize(); 67 68 void write_region(MemRegion mr) { 69 invalidate(mr); 70 } 71 72 void write_ref_array_work(MemRegion mr); 73 74 public: 75 // Record a reference update. Note that these versions are precise! 76 // The scanning code has to handle the fact that the write barrier may be 77 // either precise or imprecise. We make non-virtual inline variants of 78 // these functions here for performance. 79 template <DecoratorSet decorators, typename T> 80 void write_ref_field_post(T* field, oop newVal); 81 82 virtual void invalidate(MemRegion mr); 83 84 // ReduceInitialCardMarks 85 void initialize_deferred_card_mark_barriers(); 86 87 // If the CollectedHeap was asked to defer a store barrier above, 88 // this informs it to flush such a deferred store barrier to the 89 // remembered set. 90 void flush_deferred_card_mark_barrier(JavaThread* thread); 91 92 // Can a compiler initialize a new object without store barriers? 93 // This permission only extends from the creation of a new object 94 // via a TLAB up to the first subsequent safepoint. If such permission 95 // is granted for this heap type, the compiler promises to call 96 // defer_store_barrier() below on any slow path allocation of 97 // a new object for which such initializing store barriers will 98 // have been elided. G1, like CMS, allows this, but should be 99 // ready to provide a compensating write barrier as necessary 100 // if that storage came out of a non-young region. The efficiency 101 // of this implementation depends crucially on being able to 102 // answer very efficiently in constant time whether a piece of 103 // storage in the heap comes from a young region or not. 104 // See ReduceInitialCardMarks. 105 virtual bool can_elide_tlab_store_barriers() const { 106 return true; 107 } 108 109 // If a compiler is eliding store barriers for TLAB-allocated objects, 110 // we will be informed of a slow-path allocation by a call 111 // to on_slowpath_allocation_exit() below. Such a call precedes the 112 // initialization of the object itself, and no post-store-barriers will 113 // be issued. Some heap types require that the barrier strictly follows 114 // the initializing stores. (This is currently implemented by deferring the 115 // barrier until the next slow-path allocation or gc-related safepoint.) 116 // This interface answers whether a particular barrier type needs the card 117 // mark to be thus strictly sequenced after the stores. 118 virtual bool card_mark_must_follow_store() const; 119 120 virtual void on_slowpath_allocation_exit(JavaThread* thread, oop new_obj); 121 virtual void on_thread_detach(JavaThread* thread); 122 123 virtual void make_parsable(JavaThread* thread) { flush_deferred_card_mark_barrier(thread); } 124 125 virtual void print_on(outputStream* st) const; 126 127 template <DecoratorSet decorators, typename BarrierSetT = CardTableBarrierSet> 128 class AccessBarrier: public ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT> {}; 129 }; 130 131 template<> 132 struct BarrierSet::GetName<CardTableBarrierSet> { 133 static const BarrierSet::Name value = BarrierSet::CardTableBarrierSet; 134 }; 135 136 template<> 137 struct BarrierSet::GetType<BarrierSet::CardTableBarrierSet> { 138 typedef ::CardTableBarrierSet type; 139 }; 140 141 #endif // SHARE_VM_GC_SHARED_CARDTABLEBARRIERSET_HPP