1 /*
   2  * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/g1BarrierSet.hpp"
  27 #include "gc/g1/g1BSCodeGen.hpp"
  28 #include "gc/g1/g1CollectedHeap.inline.hpp"
  29 #include "gc/g1/c1G1BSCodeGen.hpp"
  30 #include "gc/g1/c2G1BSCodeGen.hpp"
  31 #include "gc/g1/heapRegion.hpp"
  32 #include "gc/g1/satbMarkQueue.hpp"
  33 #include "logging/log.hpp"
  34 #include "oops/oop.inline.hpp"
  35 #include "runtime/interfaceSupport.hpp"
  36 #include "runtime/mutexLocker.hpp"
  37 #include "runtime/safepoint.hpp"
  38 #include "runtime/thread.inline.hpp"
  39 
  40 // Set of all SATB queues.
  41 static SATBMarkQueueSet _satb_mark_queue_set;
  42 // Set of all dirty card queues.
  43 static DirtyCardQueueSet _dirty_card_queue_set;
  44 
  45 // SATB marking queue support
  46 SATBMarkQueueSet& G1BarrierSet::satb_mark_queue_set() {
  47   return _satb_mark_queue_set;
  48 }
  49 
  50 // Dirty card queue support
  51 DirtyCardQueueSet& G1BarrierSet::dirty_card_queue_set() {
  52   return _dirty_card_queue_set;
  53 }
  54 
  55 G1BarrierSet::G1BarrierSet(CardTable* card_table) :
  56   CardTableModRefBS(card_table, BarrierSet::FakeRtti(BarrierSet::G1BarrierSet))
  57 { }
  58 
  59 void G1BarrierSet::satb_enqueue(oop pre_val) {
  60   // Nulls should have been already filtered.
  61   assert(pre_val->is_oop(true), "Error");
  62 
  63   if (!_satb_mark_queue_set.is_active()) return;
  64   Thread* thr = Thread::current();
  65   if (thr->is_Java_thread()) {
  66     JavaThread* jt = (JavaThread*)thr;
  67     jt->satb_mark_queue().enqueue(pre_val);
  68   } else {
  69     MutexLockerEx x(Shared_SATB_Q_lock, Mutex::_no_safepoint_check_flag);
  70     _satb_mark_queue_set.shared_satb_queue()->enqueue(pre_val);
  71   }
  72 }
  73 
  74 void G1BarrierSet::card_enqueue(volatile jbyte* card) {
  75   Thread* thr = Thread::current();
  76   if (thr->is_Java_thread()) {
  77     JavaThread* jt = (JavaThread*)thr;
  78     jt->dirty_card_queue().enqueue(card);
  79   } else {
  80     MutexLockerEx x(Shared_DirtyCardQ_lock,
  81                     Mutex::_no_safepoint_check_flag);
  82     dirty_card_queue_set().shared_dirty_card_queue()->enqueue(card);
  83   }
  84 }
  85 
  86 BarrierSetCodeGen *G1BarrierSet::make_code_gen() {
  87   return new G1BSCodeGen();
  88 }
  89 
  90 C1BarrierSetCodeGen* G1BarrierSet::make_c1_code_gen() {
  91   return new C1G1BSCodeGen();
  92 }
  93 
  94 C2BarrierSetCodeGen* G1BarrierSet::make_c2_code_gen() {
  95   return new C2G1BSCodeGen();
  96 }
  97 
  98 template <class T> void
  99 G1BarrierSet::write_ref_array_pre_work(T* dst, int count) {
 100   if (!_satb_mark_queue_set.is_active()) return;
 101   T* elem_ptr = dst;
 102   for (int i = 0; i < count; i++, elem_ptr++) {
 103     T heap_oop = oopDesc::load_heap_oop(elem_ptr);
 104     if (!oopDesc::is_null(heap_oop)) {
 105       satb_enqueue(oopDesc::decode_heap_oop_not_null(heap_oop));
 106     }
 107   }
 108 }
 109 
 110 void G1BarrierSet::write_ref_array_pre(oop* dst, int count, bool dest_uninitialized) {
 111   if (!dest_uninitialized) {
 112     write_ref_array_pre_work(dst, count);
 113   }
 114 }
 115 
 116 void G1BarrierSet::write_ref_array_pre(narrowOop* dst, int count, bool dest_uninitialized) {
 117   if (!dest_uninitialized) {
 118     write_ref_array_pre_work(dst, count);
 119   }
 120 }
 121 
 122 void G1BarrierSet::invalidate(MemRegion mr) {
 123   volatile jbyte* byte = _card_table->byte_for(mr.start());
 124   jbyte* last_byte = _card_table->byte_for(mr.last());
 125   Thread* thr = Thread::current();
 126     // skip all consecutive young cards
 127   for (; byte <= last_byte && *byte == G1CardTable::g1_young_card_val(); byte++);
 128 
 129   if (byte <= last_byte) {
 130     OrderAccess::storeload();
 131     // Enqueue if necessary.
 132     if (thr->is_Java_thread()) {
 133       JavaThread* jt = (JavaThread*)thr;
 134       for (; byte <= last_byte; byte++) {
 135         if (*byte == G1CardTable::g1_young_card_val()) {
 136           continue;
 137         }
 138         if (*byte != G1CardTable::dirty_card_val()) {
 139           *byte = G1CardTable::dirty_card_val();
 140           jt->dirty_card_queue().enqueue(byte);
 141         }
 142       }
 143     } else {
 144       MutexLockerEx x(Shared_DirtyCardQ_lock,
 145                       Mutex::_no_safepoint_check_flag);
 146       for (; byte <= last_byte; byte++) {
 147         if (*byte == G1CardTable::g1_young_card_val()) {
 148           continue;
 149         }
 150         if (*byte != G1CardTable::dirty_card_val()) {
 151           *byte = G1CardTable::dirty_card_val();
 152           _dirty_card_queue_set.shared_dirty_card_queue()->enqueue(byte);
 153         }
 154       }
 155     }
 156   }
 157 }
 158 
 159 void G1BarrierSet::write_ref_nmethod_post(nmethod* nm, oop obj) {
 160   if (obj != NULL) {
 161     G1CollectedHeap* g1h = G1CollectedHeap::heap();
 162     HeapRegion* hr = g1h->heap_region_containing(obj);
 163     hr->add_strong_code_root(nm);
 164   }
 165 }
 166 
 167 class G1EnsureLastRefToRegion : public OopClosure {
 168   G1CollectedHeap* _g1h;
 169   HeapRegion* _hr;
 170   oop* _dst;
 171 
 172   bool _value;
 173 public:
 174   G1EnsureLastRefToRegion(G1CollectedHeap* g1h, HeapRegion* hr, oop* dst) :
 175     _g1h(g1h), _hr(hr), _dst(dst), _value(true) {}
 176 
 177   void do_oop(oop* p) {
 178     if (_value && p != _dst) {
 179       oop obj = oopDesc::load_heap_oop(p);
 180       if (obj != NULL) {
 181         HeapRegion* hr = _g1h->heap_region_containing(obj);
 182         if (hr == _hr) {
 183           // Another reference to the same region.
 184           _value = false;
 185         }
 186       }
 187     }
 188   }
 189   void do_oop(narrowOop* p) { ShouldNotReachHere(); }
 190   bool value() const        { return _value;  }
 191 };
 192 
 193 void G1BarrierSet::write_ref_nmethod_pre(nmethod* nm, ptrdiff_t offset) {
 194   oop* dst = (oop*)(intptr_t(nm) + offset);
 195   oop obj = oopDesc::load_heap_oop(dst);
 196   if (obj != NULL) {
 197     G1CollectedHeap* g1h = G1CollectedHeap::heap();
 198     HeapRegion* hr = g1h->heap_region_containing(obj);
 199     G1EnsureLastRefToRegion ensure_last_ref(g1h, hr, dst);
 200     nm->oops_do(&ensure_last_ref);
 201     if (ensure_last_ref.value()) {
 202       // Last reference to this region, remove the nmethod from the rset.
 203       hr->remove_strong_code_root(nm);
 204     }
 205   }
 206 }
 207 
 208 void G1BarrierSet::on_add_thread(JavaThread* thread) {
 209   assert(!SafepointSynchronize::is_at_safepoint(),
 210          "we should not be at a safepoint");
 211 
 212   SATBMarkQueue& satb_queue = thread->satb_mark_queue();
 213   SATBMarkQueueSet& satb_queue_set = satb_mark_queue_set();
 214   // The SATB queue should have been constructed with its active
 215   // field set to false.
 216   assert(!satb_queue.is_active(), "SATB queue should not be active");
 217   assert(satb_queue.is_empty(), "SATB queue should be empty");
 218   // If we are creating the thread during a marking cycle, we should
 219   // set the active field of the SATB queue to true.
 220   if (satb_queue_set.is_active()) {
 221     satb_queue.set_active(true);
 222   }
 223 
 224   DirtyCardQueue& dirty_queue = thread->dirty_card_queue();
 225   // The dirty card queue should have been constructed with its
 226   // active field set to true.
 227   assert(dirty_queue.is_active(), "dirty card queue should be active");
 228 }
 229 
 230 void G1BarrierSet::on_destroy_thread(JavaThread* thread) {
 231   CardTableModRefBS::on_destroy_thread(thread);
 232   thread->satb_mark_queue().flush();
 233   thread->dirty_card_queue().flush();
 234 }
 235 
 236 bool G1BarrierSet::is_referent_field(oop base, ptrdiff_t offset) {
 237   Klass* k;
 238   return offset == java_lang_ref_Reference::referent_offset &&
 239     (k = base->klass()) &&
 240     InstanceKlass::cast(k)->reference_type() != REF_NONE;
 241 }
 242 
 243 // G1 write-barrier pre: executed before a pointer store.
 244 JRT_LEAF(void, G1BarrierSet::g1_wb_pre(oopDesc* orig, JavaThread *thread))
 245   if (orig == NULL) {
 246     assert(false, "should be optimized out");
 247     return;
 248   }
 249   assert(orig->is_oop(true /* ignore mark word */), "Error");
 250   // store the original value that was in the field reference
 251   thread->satb_mark_queue().enqueue(orig);
 252 JRT_END
 253 
 254 // G1 write-barrier post: executed after a pointer store.
 255 JRT_LEAF(void, G1BarrierSet::g1_wb_post(void* card_addr, JavaThread* thread))
 256   thread->dirty_card_queue().enqueue(card_addr);
 257 JRT_END