1 /*
  2  * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "gc/g1/g1CollectedHeap.inline.hpp"
 27 #include "gc/g1/g1SATBCardTableModRefBS.inline.hpp"
 28 #include "gc/g1/heapRegion.hpp"
 29 #include "gc/g1/satbMarkQueue.hpp"
 30 #include "gc/shared/memset_with_concurrent_readers.hpp"
 31 #include "logging/log.hpp"
 32 #include "oops/oop.inline.hpp"
 33 #include "runtime/atomic.hpp"
 34 #include "runtime/mutexLocker.hpp"
 35 #include "runtime/orderAccess.inline.hpp"
 36 #include "runtime/thread.inline.hpp"
 37 
 38 G1SATBCardTableModRefBS::G1SATBCardTableModRefBS(
 39   MemRegion whole_heap,
 40   const BarrierSet::FakeRtti& fake_rtti) :
 41   CardTableModRefBS(whole_heap, fake_rtti.add_tag(BarrierSet::G1SATBCT))
 42 { }
 43 
 44 void G1SATBCardTableModRefBS::enqueue(oop pre_val) {
 45   // Nulls should have been already filtered.
 46   assert(oopDesc::is_oop(pre_val, true), "Error");
 47 
 48   if (!JavaThread::satb_mark_queue_set().is_active()) return;
 49   Thread* thr = Thread::current();
 50   if (thr->is_Java_thread()) {
 51     JavaThread* jt = (JavaThread*)thr;
 52     jt->satb_mark_queue().enqueue(pre_val);
 53   } else {
 54     MutexLockerEx x(Shared_SATB_Q_lock, Mutex::_no_safepoint_check_flag);
 55     JavaThread::satb_mark_queue_set().shared_satb_queue()->enqueue(pre_val);
 56   }
 57 }
 58 
 59 template <class T> void
 60 G1SATBCardTableModRefBS::write_ref_array_pre_work(T* dst, int count) {
 61   if (!JavaThread::satb_mark_queue_set().is_active()) return;
 62   T* elem_ptr = dst;
 63   for (int i = 0; i < count; i++, elem_ptr++) {
 64     T heap_oop = oopDesc::load_heap_oop(elem_ptr);
 65     if (!oopDesc::is_null(heap_oop)) {
 66       enqueue(oopDesc::decode_heap_oop_not_null(heap_oop));
 67     }
 68   }
 69 }
 70 
 71 void G1SATBCardTableModRefBS::write_ref_array_pre(oop* dst, int count, bool dest_uninitialized) {
 72   if (!dest_uninitialized) {
 73     write_ref_array_pre_work(dst, count);
 74   }
 75 }
 76 
 77 void G1SATBCardTableModRefBS::write_ref_array_pre(narrowOop* dst, int count, bool dest_uninitialized) {
 78   if (!dest_uninitialized) {
 79     write_ref_array_pre_work(dst, count);
 80   }
 81 }
 82 
 83 bool G1SATBCardTableModRefBS::mark_card_deferred(size_t card_index) {
 84   jbyte val = _byte_map[card_index];
 85   // It's already processed
 86   if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) {
 87     return false;
 88   }
 89 
 90   // Cached bit can be installed either on a clean card or on a claimed card.
 91   jbyte new_val = val;
 92   if (val == clean_card_val()) {
 93     new_val = (jbyte)deferred_card_val();
 94   } else {
 95     if (val & claimed_card_val()) {
 96       new_val = val | (jbyte)deferred_card_val();
 97     }
 98   }
 99   if (new_val != val) {
100     Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
101   }
102   return true;
103 }
104 
105 void G1SATBCardTableModRefBS::g1_mark_as_young(const MemRegion& mr) {
106   jbyte *const first = byte_for(mr.start());
107   jbyte *const last = byte_after(mr.last());
108 
109   memset_with_concurrent_readers(first, g1_young_gen, last - first);
110 }
111 
112 #ifndef PRODUCT
113 void G1SATBCardTableModRefBS::verify_g1_young_region(MemRegion mr) {
114   verify_region(mr, g1_young_gen,  true);
115 }
116 #endif
117 
118 void G1SATBCardTableLoggingModRefBSChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
119   // Default value for a clean card on the card table is -1. So we cannot take advantage of the zero_filled parameter.
120   MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_idx), num_regions * HeapRegion::GrainWords);
121   _card_table->clear(mr);
122 }
123 
124 G1SATBCardTableLoggingModRefBS::
125 G1SATBCardTableLoggingModRefBS(MemRegion whole_heap) :
126   G1SATBCardTableModRefBS(whole_heap, BarrierSet::FakeRtti(G1SATBCTLogging)),
127   _dcqs(JavaThread::dirty_card_queue_set()),
128   _listener()
129 {
130   _listener.set_card_table(this);
131 }
132 
133 void G1SATBCardTableLoggingModRefBS::initialize(G1RegionToSpaceMapper* mapper) {
134   mapper->set_mapping_changed_listener(&_listener);
135 
136   _byte_map_size = mapper->reserved().byte_size();
137 
138   _guard_index = cards_required(_whole_heap.word_size()) - 1;
139   _last_valid_index = _guard_index - 1;
140 
141   HeapWord* low_bound  = _whole_heap.start();
142   HeapWord* high_bound = _whole_heap.end();
143 
144   _cur_covered_regions = 1;
145   _covered[0] = _whole_heap;
146 
147   _byte_map = (jbyte*) mapper->reserved().start();
148   byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
149   assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
150   assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
151 
152   log_trace(gc, barrier)("G1SATBCardTableModRefBS::G1SATBCardTableModRefBS: ");
153   log_trace(gc, barrier)("    &_byte_map[0]: " INTPTR_FORMAT "  &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
154                          p2i(&_byte_map[0]), p2i(&_byte_map[_last_valid_index]));
155   log_trace(gc, barrier)("    byte_map_base: " INTPTR_FORMAT,  p2i(byte_map_base));
156 }
157 
158 void G1SATBCardTableLoggingModRefBS::write_ref_field_post_slow(volatile jbyte* byte) {
159   // In the slow path, we know a card is not young
160   assert(*byte != g1_young_gen, "slow path invoked without filtering");
161   OrderAccess::storeload();
162   if (*byte != dirty_card) {
163     *byte = dirty_card;
164     Thread* thr = Thread::current();
165     if (thr->is_Java_thread()) {
166       JavaThread* jt = (JavaThread*)thr;
167       jt->dirty_card_queue().enqueue(byte);
168     } else {
169       MutexLockerEx x(Shared_DirtyCardQ_lock,
170                       Mutex::_no_safepoint_check_flag);
171       _dcqs.shared_dirty_card_queue()->enqueue(byte);
172     }
173   }
174 }
175 
176 void
177 G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr) {
178   if (mr.is_empty()) {
179     return;
180   }
181   volatile jbyte* byte = byte_for(mr.start());
182   jbyte* last_byte = byte_for(mr.last());
183   Thread* thr = Thread::current();
184     // skip all consecutive young cards
185   for (; byte <= last_byte && *byte == g1_young_gen; byte++);
186 
187   if (byte <= last_byte) {
188     OrderAccess::storeload();
189     // Enqueue if necessary.
190     if (thr->is_Java_thread()) {
191       JavaThread* jt = (JavaThread*)thr;
192       for (; byte <= last_byte; byte++) {
193         if (*byte == g1_young_gen) {
194           continue;
195         }
196         if (*byte != dirty_card) {
197           *byte = dirty_card;
198           jt->dirty_card_queue().enqueue(byte);
199         }
200       }
201     } else {
202       MutexLockerEx x(Shared_DirtyCardQ_lock,
203                       Mutex::_no_safepoint_check_flag);
204       for (; byte <= last_byte; byte++) {
205         if (*byte == g1_young_gen) {
206           continue;
207         }
208         if (*byte != dirty_card) {
209           *byte = dirty_card;
210           _dcqs.shared_dirty_card_queue()->enqueue(byte);
211         }
212       }
213     }
214   }
215 }