1 /*
   2  * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  27 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
  28 #include "gc_implementation/g1/heapRegion.hpp"
  29 #include "gc_implementation/g1/satbQueue.hpp"
  30 #include "runtime/atomic.inline.hpp"
  31 #include "runtime/mutexLocker.hpp"
  32 #include "runtime/orderAccess.inline.hpp"
  33 #include "runtime/thread.inline.hpp"
  34 
  35 G1SATBCardTableModRefBS::G1SATBCardTableModRefBS(MemRegion whole_heap,
  36                                                  int max_covered_regions) :
  37     CardTableModRefBSForCTRS(whole_heap, max_covered_regions)
  38 {
  39   _kind = G1SATBCT;
  40 }
  41 
  42 void G1SATBCardTableModRefBS::enqueue(oop pre_val) {
  43   // Nulls should have been already filtered.
  44   assert(pre_val->is_oop(true), "Error");
  45 
  46   if (!JavaThread::satb_mark_queue_set().is_active()) return;
  47   Thread* thr = Thread::current();
  48   if (thr->is_Java_thread()) {
  49     JavaThread* jt = (JavaThread*)thr;
  50     jt->satb_mark_queue().enqueue(pre_val);
  51   } else {
  52     MutexLockerEx x(Shared_SATB_Q_lock, Mutex::_no_safepoint_check_flag);
  53     JavaThread::satb_mark_queue_set().shared_satb_queue()->enqueue(pre_val);
  54   }
  55 }
  56 
  57 template <class T> void
  58 G1SATBCardTableModRefBS::write_ref_array_pre_work(T* dst, int count) {
  59   if (!JavaThread::satb_mark_queue_set().is_active()) return;
  60   T* elem_ptr = dst;
  61   for (int i = 0; i < count; i++, elem_ptr++) {
  62     T heap_oop = oopDesc::load_heap_oop(elem_ptr);
  63     if (!oopDesc::is_null(heap_oop)) {
  64       enqueue(oopDesc::decode_heap_oop_not_null(heap_oop));
  65     }
  66   }
  67 }
  68 
  69 void G1SATBCardTableModRefBS::write_ref_array_pre(oop* dst, int count, bool dest_uninitialized) {
  70   if (!dest_uninitialized) {
  71     write_ref_array_pre_work(dst, count);
  72   }
  73 }
  74 void G1SATBCardTableModRefBS::write_ref_array_pre(narrowOop* dst, int count, bool dest_uninitialized) {
  75   if (!dest_uninitialized) {
  76     write_ref_array_pre_work(dst, count);
  77   }
  78 }
  79 
  80 bool G1SATBCardTableModRefBS::mark_card_deferred(size_t card_index) {
  81   jbyte val = _byte_map[card_index];
  82   // It's already processed
  83   if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) {
  84     return false;
  85   }
  86 
  87   if  (val == g1_young_gen) {
  88     // the card is for a young gen region. We don't need to keep track of all pointers into young
  89     return false;
  90   }
  91 
  92   // Cached bit can be installed either on a clean card or on a claimed card.
  93   jbyte new_val = val;
  94   if (val == clean_card_val()) {
  95     new_val = (jbyte)deferred_card_val();
  96   } else {
  97     if (val & claimed_card_val()) {
  98       new_val = val | (jbyte)deferred_card_val();
  99     }
 100   }
 101   if (new_val != val) {
 102     Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
 103   }
 104   return true;
 105 }
 106 
 107 void G1SATBCardTableModRefBS::g1_mark_as_young(const MemRegion& mr) {
 108   jbyte *const first = byte_for(mr.start());
 109   jbyte *const last = byte_after(mr.last());
 110 
 111   // Below we may use an explicit loop instead of memset() because on
 112   // certain platforms memset() can give concurrent readers phantom zeros.
 113   if (UseMemSetInBOT) {
 114     memset(first, g1_young_gen, last - first);
 115   } else {
 116     for (jbyte* i = first; i < last; i++) {
 117       *i = g1_young_gen;
 118     }
 119   }
 120 }
 121 
 122 #ifndef PRODUCT
 123 void G1SATBCardTableModRefBS::verify_g1_young_region(MemRegion mr) {
 124   verify_region(mr, g1_young_gen,  true);
 125 }
 126 #endif
 127 
 128 void G1SATBCardTableLoggingModRefBSChangedListener::on_commit(uint start_idx, size_t num_regions) {
 129   MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_idx), num_regions * HeapRegion::GrainWords);
 130   _card_table->clear(mr);
 131 }
 132 
 133 G1SATBCardTableLoggingModRefBS::
 134 G1SATBCardTableLoggingModRefBS(MemRegion whole_heap,
 135                                int max_covered_regions) :
 136   G1SATBCardTableModRefBS(whole_heap, max_covered_regions),
 137   _dcqs(JavaThread::dirty_card_queue_set()),
 138   _listener()
 139 {
 140   _kind = G1SATBCTLogging;
 141   _listener.set_card_table(this);
 142 }
 143 
 144 void G1SATBCardTableLoggingModRefBS::initialize(G1RegionToSpaceMapper* mapper) {
 145   mapper->set_mapping_changed_listener(&_listener);
 146 
 147   _byte_map_size = mapper->reserved().byte_size();
 148 
 149   _guard_index = cards_required(_whole_heap.word_size()) - 1;
 150   _last_valid_index = _guard_index - 1;
 151 
 152   HeapWord* low_bound  = _whole_heap.start();
 153   HeapWord* high_bound = _whole_heap.end();
 154 
 155   _cur_covered_regions = 1;
 156   _covered[0] = _whole_heap;
 157 
 158   _byte_map = (jbyte*) mapper->reserved().start();
 159   byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
 160   assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
 161   assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
 162 
 163   if (TraceCardTableModRefBS) {
 164     gclog_or_tty->print_cr("G1SATBCardTableModRefBS::G1SATBCardTableModRefBS: ");
 165     gclog_or_tty->print_cr("  "
 166                   "  &_byte_map[0]: " INTPTR_FORMAT
 167                   "  &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
 168                   p2i(&_byte_map[0]),
 169                   p2i(&_byte_map[_last_valid_index]));
 170     gclog_or_tty->print_cr("  "
 171                   "  byte_map_base: " INTPTR_FORMAT,
 172                   p2i(byte_map_base));
 173   }
 174 }
 175 
 176 void
 177 G1SATBCardTableLoggingModRefBS::write_ref_field_work(void* field,
 178                                                      oop new_val,
 179                                                      bool release) {
 180   volatile jbyte* byte = byte_for(field);
 181   if (*byte == g1_young_gen) {
 182     return;
 183   }
 184   OrderAccess::storeload();
 185   if (*byte != dirty_card) {
 186     *byte = dirty_card;
 187     Thread* thr = Thread::current();
 188     if (thr->is_Java_thread()) {
 189       JavaThread* jt = (JavaThread*)thr;
 190       jt->dirty_card_queue().enqueue(byte);
 191     } else {
 192       MutexLockerEx x(Shared_DirtyCardQ_lock,
 193                       Mutex::_no_safepoint_check_flag);
 194       _dcqs.shared_dirty_card_queue()->enqueue(byte);
 195     }
 196   }
 197 }
 198 
 199 void
 200 G1SATBCardTableLoggingModRefBS::write_ref_field_static(void* field,
 201                                                        oop new_val) {
 202   uintptr_t field_uint = (uintptr_t)field;
 203   uintptr_t new_val_uint = cast_from_oop<uintptr_t>(new_val);
 204   uintptr_t comb = field_uint ^ new_val_uint;
 205   comb = comb >> HeapRegion::LogOfHRGrainBytes;
 206   if (comb == 0) return;
 207   if (new_val == NULL) return;
 208   // Otherwise, log it.
 209   G1SATBCardTableLoggingModRefBS* g1_bs =
 210     (G1SATBCardTableLoggingModRefBS*)Universe::heap()->barrier_set();
 211   g1_bs->write_ref_field_work(field, new_val);
 212 }
 213 
 214 void
 215 G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr, bool whole_heap) {
 216   volatile jbyte* byte = byte_for(mr.start());
 217   jbyte* last_byte = byte_for(mr.last());
 218   Thread* thr = Thread::current();
 219   if (whole_heap) {
 220     while (byte <= last_byte) {
 221       *byte = dirty_card;
 222       byte++;
 223     }
 224   } else {
 225     // skip all consecutive young cards
 226     for (; byte <= last_byte && *byte == g1_young_gen; byte++);
 227 
 228     if (byte <= last_byte) {
 229       OrderAccess::storeload();
 230       // Enqueue if necessary.
 231       if (thr->is_Java_thread()) {
 232         JavaThread* jt = (JavaThread*)thr;
 233         for (; byte <= last_byte; byte++) {
 234           if (*byte == g1_young_gen) {
 235             continue;
 236           }
 237           if (*byte != dirty_card) {
 238             *byte = dirty_card;
 239             jt->dirty_card_queue().enqueue(byte);
 240           }
 241         }
 242       } else {
 243         MutexLockerEx x(Shared_DirtyCardQ_lock,
 244                         Mutex::_no_safepoint_check_flag);
 245         for (; byte <= last_byte; byte++) {
 246           if (*byte == g1_young_gen) {
 247             continue;
 248           }
 249           if (*byte != dirty_card) {
 250             *byte = dirty_card;
 251             _dcqs.shared_dirty_card_queue()->enqueue(byte);
 252           }
 253         }
 254       }
 255     }
 256   }
 257 }