1 /*
   2  * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
  27 #include "gc_implementation/g1/heapRegion.hpp"
  28 #include "gc_implementation/g1/satbQueue.hpp"
  29 #include "runtime/atomic.inline.hpp"
  30 #include "runtime/mutexLocker.hpp"
  31 #include "runtime/orderAccess.inline.hpp"
  32 #include "runtime/thread.inline.hpp"
  33 
  34 G1SATBCardTableModRefBS::G1SATBCardTableModRefBS(MemRegion whole_heap,
  35                                                  int max_covered_regions) :
  36     CardTableModRefBSForCTRS(whole_heap, max_covered_regions)
  37 {
  38   _kind = G1SATBCT;
  39 }
  40 
  41 
  42 void G1SATBCardTableModRefBS::enqueue(oop pre_val) {
  43   // Nulls should have been already filtered.
  44   assert(pre_val->is_oop(true), "Error");
  45 
  46   if (!JavaThread::satb_mark_queue_set().is_active()) return;
  47   Thread* thr = Thread::current();
  48   if (thr->is_Java_thread()) {
  49     JavaThread* jt = (JavaThread*)thr;
  50     jt->satb_mark_queue().enqueue(pre_val);
  51   } else {
  52     MutexLockerEx x(Shared_SATB_Q_lock, Mutex::_no_safepoint_check_flag);
  53     JavaThread::satb_mark_queue_set().shared_satb_queue()->enqueue(pre_val);
  54   }
  55 }
  56 
  57 template <class T> void
  58 G1SATBCardTableModRefBS::write_ref_array_pre_work(T* dst, int count) {
  59   if (!JavaThread::satb_mark_queue_set().is_active()) return;
  60   T* elem_ptr = dst;
  61   for (int i = 0; i < count; i++, elem_ptr++) {
  62     T heap_oop = oopDesc::load_heap_oop(elem_ptr);
  63     if (!oopDesc::is_null(heap_oop)) {
  64       enqueue(oopDesc::decode_heap_oop_not_null(heap_oop));
  65     }
  66   }
  67 }
  68 
  69 void G1SATBCardTableModRefBS::write_ref_array_pre(oop* dst, int count, bool dest_uninitialized) {
  70   if (!dest_uninitialized) {
  71     write_ref_array_pre_work(dst, count);
  72   }
  73 }
  74 void G1SATBCardTableModRefBS::write_ref_array_pre(narrowOop* dst, int count, bool dest_uninitialized) {
  75   if (!dest_uninitialized) {
  76     write_ref_array_pre_work(dst, count);
  77   }
  78 }
  79 
  80 bool G1SATBCardTableModRefBS::mark_card_deferred(size_t card_index) {
  81   jbyte val = _byte_map[card_index];
  82   // It's already processed
  83   if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) {
  84     return false;
  85   }
  86 
  87   if  (val == g1_young_gen) {
  88     // the card is for a young gen region. We don't need to keep track of all pointers into young
  89     return false;
  90   }
  91 
  92   // Cached bit can be installed either on a clean card or on a claimed card.
  93   jbyte new_val = val;
  94   if (val == clean_card_val()) {
  95     new_val = (jbyte)deferred_card_val();
  96   } else {
  97     if (val & claimed_card_val()) {
  98       new_val = val | (jbyte)deferred_card_val();
  99     }
 100   }
 101   if (new_val != val) {
 102     Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
 103   }
 104   return true;
 105 }
 106 
 107 void G1SATBCardTableModRefBS::g1_mark_as_young(const MemRegion& mr) {
 108   jbyte *const first = byte_for(mr.start());
 109   jbyte *const last = byte_after(mr.last());
 110 
 111   // Below we may use an explicit loop instead of memset() because on
 112   // certain platforms memset() can give concurrent readers phantom zeros.
 113   if (UseMemSetInBOT) {
 114     memset(first, g1_young_gen, last - first);
 115   } else {
 116     for (jbyte* i = first; i < last; i++) {
 117       *i = g1_young_gen;
 118     }
 119   }
 120 }
 121 
 122 #ifndef PRODUCT
 123 void G1SATBCardTableModRefBS::verify_g1_young_region(MemRegion mr) {
 124   verify_region(mr, g1_young_gen,  true);
 125 }
 126 #endif
 127 
 128 G1SATBCardTableLoggingModRefBS::
 129 G1SATBCardTableLoggingModRefBS(MemRegion whole_heap,
 130                                int max_covered_regions) :
 131   G1SATBCardTableModRefBS(whole_heap, max_covered_regions),
 132   _dcqs(JavaThread::dirty_card_queue_set())
 133 {
 134   _kind = G1SATBCTLogging;
 135 }
 136 
 137 void
 138 G1SATBCardTableLoggingModRefBS::write_ref_field_work(void* field,
 139                                                      oop new_val,
 140                                                      bool release) {
 141   volatile jbyte* byte = byte_for(field);
 142   if (*byte == g1_young_gen) {
 143     return;
 144   }
 145   OrderAccess::storeload();
 146   if (*byte != dirty_card) {
 147     *byte = dirty_card;
 148     Thread* thr = Thread::current();
 149     if (thr->is_Java_thread()) {
 150       JavaThread* jt = (JavaThread*)thr;
 151       jt->dirty_card_queue().enqueue(byte);
 152     } else {
 153       MutexLockerEx x(Shared_DirtyCardQ_lock,
 154                       Mutex::_no_safepoint_check_flag);
 155       _dcqs.shared_dirty_card_queue()->enqueue(byte);
 156     }
 157   }
 158 }
 159 
 160 void
 161 G1SATBCardTableLoggingModRefBS::write_ref_field_static(void* field,
 162                                                        oop new_val) {
 163   uintptr_t field_uint = (uintptr_t)field;
 164   uintptr_t new_val_uint = cast_from_oop<uintptr_t>(new_val);
 165   uintptr_t comb = field_uint ^ new_val_uint;
 166   comb = comb >> HeapRegion::LogOfHRGrainBytes;
 167   if (comb == 0) return;
 168   if (new_val == NULL) return;
 169   // Otherwise, log it.
 170   G1SATBCardTableLoggingModRefBS* g1_bs =
 171     (G1SATBCardTableLoggingModRefBS*)Universe::heap()->barrier_set();
 172   g1_bs->write_ref_field_work(field, new_val);
 173 }
 174 
 175 void
 176 G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr, bool whole_heap) {
 177   volatile jbyte* byte = byte_for(mr.start());
 178   jbyte* last_byte = byte_for(mr.last());
 179   Thread* thr = Thread::current();
 180   if (whole_heap) {
 181     while (byte <= last_byte) {
 182       *byte = dirty_card;
 183       byte++;
 184     }
 185   } else {
 186     // skip all consecutive young cards
 187     for (; byte <= last_byte && *byte == g1_young_gen; byte++);
 188 
 189     if (byte <= last_byte) {
 190       OrderAccess::storeload();
 191       // Enqueue if necessary.
 192       if (thr->is_Java_thread()) {
 193         JavaThread* jt = (JavaThread*)thr;
 194         for (; byte <= last_byte; byte++) {
 195           if (*byte == g1_young_gen) {
 196             continue;
 197           }
 198           if (*byte != dirty_card) {
 199             *byte = dirty_card;
 200             jt->dirty_card_queue().enqueue(byte);
 201           }
 202         }
 203       } else {
 204         MutexLockerEx x(Shared_DirtyCardQ_lock,
 205                         Mutex::_no_safepoint_check_flag);
 206         for (; byte <= last_byte; byte++) {
 207           if (*byte == g1_young_gen) {
 208             continue;
 209           }
 210           if (*byte != dirty_card) {
 211             *byte = dirty_card;
 212             _dcqs.shared_dirty_card_queue()->enqueue(byte);
 213           }
 214         }
 215       }
 216     }
 217   }
 218 }