1 /*
   2  * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  27 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
  28 #include "gc_implementation/g1/heapRegion.hpp"
  29 #include "gc_implementation/g1/satbQueue.hpp"
  30 #include "runtime/atomic.inline.hpp"
  31 #include "runtime/mutexLocker.hpp"
  32 #include "runtime/orderAccess.inline.hpp"
  33 #include "runtime/thread.inline.hpp"
  34 
  35 G1SATBCardTableModRefBS::G1SATBCardTableModRefBS(MemRegion whole_heap,
  36                                                  int max_covered_regions) :
  37     CardTableModRefBSForCTRS(whole_heap, max_covered_regions)
  38 {
  39   _kind = G1SATBCT;
  40 }
  41 
  42 void G1SATBCardTableModRefBS::enqueue(oop pre_val) {
  43   // Nulls should have been already filtered.
  44   assert(pre_val->is_oop(true), "Error");
  45 
  46   if (!JavaThread::satb_mark_queue_set().is_active()) return;
  47   Thread* thr = Thread::current();
  48   if (thr->is_Java_thread()) {
  49     JavaThread* jt = (JavaThread*)thr;
  50     jt->satb_mark_queue().enqueue(pre_val);
  51   } else {
  52     MutexLockerEx x(Shared_SATB_Q_lock, Mutex::_no_safepoint_check_flag);
  53     JavaThread::satb_mark_queue_set().shared_satb_queue()->enqueue(pre_val);
  54   }
  55 }
  56 
  57 template <class T> void
  58 G1SATBCardTableModRefBS::write_ref_array_pre_work(T* dst, int count) {
  59   if (!JavaThread::satb_mark_queue_set().is_active()) return;
  60   T* elem_ptr = dst;
  61   for (int i = 0; i < count; i++, elem_ptr++) {
  62     T heap_oop = oopDesc::load_heap_oop(elem_ptr);
  63     if (!oopDesc::is_null(heap_oop)) {
  64       enqueue(oopDesc::decode_heap_oop_not_null(heap_oop));
  65     }
  66   }
  67 }
  68 
  69 void G1SATBCardTableModRefBS::write_ref_array_pre(oop* dst, int count, bool dest_uninitialized) {
  70   if (!dest_uninitialized) {
  71     write_ref_array_pre_work(dst, count);
  72   }
  73 }
  74 void G1SATBCardTableModRefBS::write_ref_array_pre(narrowOop* dst, int count, bool dest_uninitialized) {
  75   if (!dest_uninitialized) {
  76     write_ref_array_pre_work(dst, count);
  77   }
  78 }
  79 
  80 bool G1SATBCardTableModRefBS::mark_card_deferred(size_t card_index) {
  81   jbyte val = _byte_map[card_index];
  82   // It's already processed
  83   if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) {
  84     return false;
  85   }
  86 
  87   if  (val == g1_young_gen) {
  88     // the card is for a young gen region. We don't need to keep track of all pointers into young
  89     return false;
  90   }
  91 
  92   // Cached bit can be installed either on a clean card or on a claimed card.
  93   jbyte new_val = val;
  94   if (val == clean_card_val()) {
  95     new_val = (jbyte)deferred_card_val();
  96   } else {
  97     if (val & claimed_card_val()) {
  98       new_val = val | (jbyte)deferred_card_val();
  99     }
 100   }
 101   if (new_val != val) {
 102     Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
 103   }
 104   return true;
 105 }
 106 
 107 void G1SATBCardTableModRefBS::g1_mark_as_young(const MemRegion& mr) {
 108   jbyte *const first = byte_for(mr.start());
 109   jbyte *const last = byte_after(mr.last());
 110 
 111   // Below we may use an explicit loop instead of memset() because on
 112   // certain platforms memset() can give concurrent readers phantom zeros.
 113   if (UseMemSetInBOT) {
 114     memset(first, g1_young_gen, last - first);
 115   } else {
 116     for (jbyte* i = first; i < last; i++) {
 117       *i = g1_young_gen;
 118     }
 119   }
 120 }
 121 
 122 #ifndef PRODUCT
 123 void G1SATBCardTableModRefBS::verify_g1_young_region(MemRegion mr) {
 124   verify_region(mr, g1_young_gen,  true);
 125 }
 126 #endif
 127 
 128 void G1SATBCardTableLoggingModRefBSChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
 129   // Default value for a clean card on the card table is -1. So we cannot take advantage of the zero_filled parameter.
 130   MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_idx), num_regions * HeapRegion::GrainWords);
 131   _card_table->clear(mr);
 132 }
 133 
 134 G1SATBCardTableLoggingModRefBS::
 135 G1SATBCardTableLoggingModRefBS(MemRegion whole_heap,
 136                                int max_covered_regions) :
 137   G1SATBCardTableModRefBS(whole_heap, max_covered_regions),
 138   _dcqs(JavaThread::dirty_card_queue_set()),
 139   _listener()
 140 {
 141   _kind = G1SATBCTLogging;
 142   _listener.set_card_table(this);
 143 }
 144 
 145 void G1SATBCardTableLoggingModRefBS::initialize(G1RegionToSpaceMapper* mapper) {
 146   mapper->set_mapping_changed_listener(&_listener);
 147 
 148   _byte_map_size = mapper->reserved().byte_size();
 149 
 150   _guard_index = cards_required(_whole_heap.word_size()) - 1;
 151   _last_valid_index = _guard_index - 1;
 152 
 153   HeapWord* low_bound  = _whole_heap.start();
 154   HeapWord* high_bound = _whole_heap.end();
 155 
 156   _cur_covered_regions = 1;
 157   _covered[0] = _whole_heap;
 158 
 159   _byte_map = (jbyte*) mapper->reserved().start();
 160   byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
 161   assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
 162   assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
 163 
 164   if (TraceCardTableModRefBS) {
 165     gclog_or_tty->print_cr("G1SATBCardTableModRefBS::G1SATBCardTableModRefBS: ");
 166     gclog_or_tty->print_cr("  "
 167                   "  &_byte_map[0]: " INTPTR_FORMAT
 168                   "  &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
 169                   p2i(&_byte_map[0]),
 170                   p2i(&_byte_map[_last_valid_index]));
 171     gclog_or_tty->print_cr("  "
 172                   "  byte_map_base: " INTPTR_FORMAT,
 173                   p2i(byte_map_base));
 174   }
 175 }
 176 
 177 void
 178 G1SATBCardTableLoggingModRefBS::write_ref_field_work(void* field,
 179                                                      oop new_val,
 180                                                      bool release) {
 181   volatile jbyte* byte = byte_for(field);
 182   if (*byte == g1_young_gen) {
 183     return;
 184   }
 185   OrderAccess::storeload();
 186   if (*byte != dirty_card) {
 187     *byte = dirty_card;
 188     Thread* thr = Thread::current();
 189     if (thr->is_Java_thread()) {
 190       JavaThread* jt = (JavaThread*)thr;
 191       jt->dirty_card_queue().enqueue(byte);
 192     } else {
 193       MutexLockerEx x(Shared_DirtyCardQ_lock,
 194                       Mutex::_no_safepoint_check_flag);
 195       _dcqs.shared_dirty_card_queue()->enqueue(byte);
 196     }
 197   }
 198 }
 199 
 200 void
 201 G1SATBCardTableLoggingModRefBS::write_ref_field_static(void* field,
 202                                                        oop new_val) {
 203   uintptr_t field_uint = (uintptr_t)field;
 204   uintptr_t new_val_uint = cast_from_oop<uintptr_t>(new_val);
 205   uintptr_t comb = field_uint ^ new_val_uint;
 206   comb = comb >> HeapRegion::LogOfHRGrainBytes;
 207   if (comb == 0) return;
 208   if (new_val == NULL) return;
 209   // Otherwise, log it.
 210   G1SATBCardTableLoggingModRefBS* g1_bs =
 211     (G1SATBCardTableLoggingModRefBS*)Universe::heap()->barrier_set();
 212   g1_bs->write_ref_field_work(field, new_val);
 213 }
 214 
 215 void
 216 G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr, bool whole_heap) {
 217   volatile jbyte* byte = byte_for(mr.start());
 218   jbyte* last_byte = byte_for(mr.last());
 219   Thread* thr = Thread::current();
 220   if (whole_heap) {
 221     while (byte <= last_byte) {
 222       *byte = dirty_card;
 223       byte++;
 224     }
 225   } else {
 226     // skip all consecutive young cards
 227     for (; byte <= last_byte && *byte == g1_young_gen; byte++);
 228 
 229     if (byte <= last_byte) {
 230       OrderAccess::storeload();
 231       // Enqueue if necessary.
 232       if (thr->is_Java_thread()) {
 233         JavaThread* jt = (JavaThread*)thr;
 234         for (; byte <= last_byte; byte++) {
 235           if (*byte == g1_young_gen) {
 236             continue;
 237           }
 238           if (*byte != dirty_card) {
 239             *byte = dirty_card;
 240             jt->dirty_card_queue().enqueue(byte);
 241           }
 242         }
 243       } else {
 244         MutexLockerEx x(Shared_DirtyCardQ_lock,
 245                         Mutex::_no_safepoint_check_flag);
 246         for (; byte <= last_byte; byte++) {
 247           if (*byte == g1_young_gen) {
 248             continue;
 249           }
 250           if (*byte != dirty_card) {
 251             *byte = dirty_card;
 252             _dcqs.shared_dirty_card_queue()->enqueue(byte);
 253           }
 254         }
 255       }
 256     }
 257   }
 258 }