1 /*
   2  * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/codeCache.hpp"
  27 #include "code/nmethod.hpp"
  28 #include "gc/g1/g1CodeRootSetTable.hpp"
  29 #include "gc/g1/g1CodeCacheRemSet.hpp"
  30 #include "gc/g1/heapRegion.hpp"
  31 #include "memory/heap.hpp"
  32 #include "memory/iterator.hpp"
  33 #include "oops/access.inline.hpp"
  34 #include "oops/oop.inline.hpp"
  35 #include "utilities/hashtable.inline.hpp"
  36 #include "utilities/stack.inline.hpp"
  37 
  38 G1CodeRootSetTable* volatile G1CodeRootSetTable::_purge_list = NULL;
  39 
  40 size_t G1CodeRootSetTable::mem_size() {
  41   return sizeof(G1CodeRootSetTable) + (entry_size() * number_of_entries()) + (sizeof(HashtableBucket<mtGC>) * table_size());
  42 }
  43 
  44 G1CodeRootSetTable::Entry* G1CodeRootSetTable::new_entry(nmethod* nm) {
  45   unsigned int hash = compute_hash(nm);
  46   Entry* entry = (Entry*) new_entry_free_list();
  47   if (entry == NULL) {
  48     entry = (Entry*) NEW_C_HEAP_ARRAY2(char, entry_size(), mtGC, CURRENT_PC);
  49   }
  50   entry->set_next(NULL);
  51   entry->set_hash(hash);
  52   entry->set_literal(nm);
  53   return entry;
  54 }
  55 
  56 void G1CodeRootSetTable::remove_entry(Entry* e, Entry* previous) {
  57   int index = hash_to_index(e->hash());
  58   assert((e == bucket(index)) == (previous == NULL), "if e is the first entry then previous should be null");
  59 
  60   if (previous == NULL) {
  61     set_entry(index, e->next());
  62   } else {
  63     previous->set_next(e->next());
  64   }
  65   free_entry(e);
  66 }
  67 
  68 G1CodeRootSetTable::~G1CodeRootSetTable() {
  69   for (int index = 0; index < table_size(); ++index) {
  70     for (Entry* e = bucket(index); e != NULL; ) {
  71       Entry* to_remove = e;
  72       // read next before freeing.
  73       e = e->next();
  74       unlink_entry(to_remove);
  75       FREE_C_HEAP_ARRAY(char, to_remove);
  76     }
  77   }
  78   assert(number_of_entries() == 0, "should have removed all entries");
  79   // Each of the entries in new_entry_free_list() have been allocated in
  80   // G1CodeRootSetTable::new_entry(). We never call the block allocator
  81   // in BasicHashtable::new_entry().
  82   for (BasicHashtableEntry<mtGC>* e = new_entry_free_list(); e != NULL; e = new_entry_free_list()) {
  83     FREE_C_HEAP_ARRAY(char, e);
  84   }
  85 }
  86 
  87 bool G1CodeRootSetTable::add(nmethod* nm) {
  88   if (!contains(nm)) {
  89     Entry* e = new_entry(nm);
  90     int index = hash_to_index(e->hash());
  91     add_entry(index, e);
  92     return true;
  93   }
  94   return false;
  95 }
  96 
  97 bool G1CodeRootSetTable::contains(nmethod* nm) {
  98   int index = hash_to_index(compute_hash(nm));
  99   for (Entry* e = bucket(index); e != NULL; e = e->next()) {
 100     if (e->literal() == nm) {
 101       return true;
 102     }
 103   }
 104   return false;
 105 }
 106 
 107 bool G1CodeRootSetTable::remove(nmethod* nm) {
 108   int index = hash_to_index(compute_hash(nm));
 109   Entry* previous = NULL;
 110   for (Entry* e = bucket(index); e != NULL; previous = e, e = e->next()) {
 111     if (e->literal() == nm) {
 112       remove_entry(e, previous);
 113       return true;
 114     }
 115   }
 116   return false;
 117 }
 118 
 119 void G1CodeRootSetTable::copy_to(G1CodeRootSetTable* new_table) {
 120   for (int index = 0; index < table_size(); ++index) {
 121     for (Entry* e = bucket(index); e != NULL; e = e->next()) {
 122       new_table->add(e->literal());
 123     }
 124   }
 125   new_table->copy_freelist(this);
 126 }
 127 
 128 void G1CodeRootSetTable::nmethods_do(CodeBlobClosure* blk) {
 129   for (int index = 0; index < table_size(); ++index) {
 130     for (Entry* e = bucket(index); e != NULL; e = e->next()) {
 131       blk->do_code_blob(e->literal());
 132     }
 133   }
 134 }
 135 
 136 template<typename CB>
 137 int G1CodeRootSetTable::remove_if(CB& should_remove) {
 138   int num_removed = 0;
 139   for (int index = 0; index < table_size(); ++index) {
 140     Entry* previous = NULL;
 141     Entry* e = bucket(index);
 142     while (e != NULL) {
 143       Entry* next = e->next();
 144       if (should_remove(e->literal())) {
 145         remove_entry(e, previous);
 146         ++num_removed;
 147       } else {
 148         previous = e;
 149       }
 150       e = next;
 151     }
 152   }
 153   return num_removed;
 154 }
 155 
 156 G1CodeRootSet::~G1CodeRootSet() {
 157   delete _table;
 158 }
 159 
 160 G1CodeRootSetTable* G1CodeRootSet::load_acquire_table() {
 161   return Atomic::load_acquire(&_table);
 162 }
 163 
 164 void G1CodeRootSet::allocate_small_table() {
 165   G1CodeRootSetTable* temp = new G1CodeRootSetTable(SmallSize);
 166 
 167   Atomic::release_store(&_table, temp);
 168 }
 169 
 170 void G1CodeRootSetTable::purge_list_append(G1CodeRootSetTable* table) {
 171   for (;;) {
 172     table->_purge_next = _purge_list;
 173     G1CodeRootSetTable* old = Atomic::cmpxchg(table, &_purge_list, table->_purge_next);
 174     if (old == table->_purge_next) {
 175       break;
 176     }
 177   }
 178 }
 179 
 180 void G1CodeRootSetTable::purge() {
 181   G1CodeRootSetTable* table = _purge_list;
 182   _purge_list = NULL;
 183   while (table != NULL) {
 184     G1CodeRootSetTable* to_purge = table;
 185     table = table->_purge_next;
 186     delete to_purge;
 187   }
 188 }
 189 
 190 void G1CodeRootSet::move_to_large() {
 191   G1CodeRootSetTable* temp = new G1CodeRootSetTable(LargeSize);
 192 
 193   _table->copy_to(temp);
 194 
 195   G1CodeRootSetTable::purge_list_append(_table);
 196 
 197   Atomic::release_store(&_table, temp);
 198 }
 199 
 200 void G1CodeRootSet::purge() {
 201   G1CodeRootSetTable::purge();
 202 }
 203 
 204 size_t G1CodeRootSet::static_mem_size() {
 205   return G1CodeRootSetTable::static_mem_size();
 206 }
 207 
 208 void G1CodeRootSet::add(nmethod* method) {
 209   bool added = false;
 210   if (is_empty()) {
 211     allocate_small_table();
 212   }
 213   added = _table->add(method);
 214   if (added) {
 215     if (_length == Threshold) {
 216       move_to_large();
 217     }
 218     ++_length;
 219   }
 220   assert(_length == (size_t)_table->number_of_entries(), "sizes should match");
 221 }
 222 
 223 bool G1CodeRootSet::remove(nmethod* method) {
 224   bool removed = false;
 225   if (_table != NULL) {
 226     removed = _table->remove(method);
 227   }
 228   if (removed) {
 229     _length--;
 230     if (_length == 0) {
 231       clear();
 232     }
 233   }
 234   assert((_length == 0 && _table == NULL) ||
 235          (_length == (size_t)_table->number_of_entries()), "sizes should match");
 236   return removed;
 237 }
 238 
 239 bool G1CodeRootSet::contains(nmethod* method) {
 240   G1CodeRootSetTable* table = load_acquire_table(); // contains() may be called outside of lock, so ensure mem sync.
 241   if (table != NULL) {
 242     return table->contains(method);
 243   }
 244   return false;
 245 }
 246 
 247 void G1CodeRootSet::clear() {
 248   delete _table;
 249   _table = NULL;
 250   _length = 0;
 251 }
 252 
 253 size_t G1CodeRootSet::mem_size() {
 254   return sizeof(*this) + (_table != NULL ? _table->mem_size() : 0);
 255 }
 256 
 257 void G1CodeRootSet::nmethods_do(CodeBlobClosure* blk) const {
 258   if (_table != NULL) {
 259     _table->nmethods_do(blk);
 260   }
 261 }
 262 
 263 class CleanCallback : public StackObj {
 264   class PointsIntoHRDetectionClosure : public OopClosure {
 265     HeapRegion* _hr;
 266    public:
 267     bool _points_into;
 268     PointsIntoHRDetectionClosure(HeapRegion* hr) : _hr(hr), _points_into(false) {}
 269 
 270     void do_oop(narrowOop* o) {
 271       do_oop_work(o);
 272     }
 273 
 274     void do_oop(oop* o) {
 275       do_oop_work(o);
 276     }
 277 
 278     template <typename T>
 279     void do_oop_work(T* p) {
 280       if (_hr->is_in(RawAccess<>::oop_load(p))) {
 281         _points_into = true;
 282       }
 283     }
 284   };
 285 
 286   PointsIntoHRDetectionClosure _detector;
 287   CodeBlobToOopClosure _blobs;
 288 
 289  public:
 290   CleanCallback(HeapRegion* hr) : _detector(hr), _blobs(&_detector, !CodeBlobToOopClosure::FixRelocations) {}
 291 
 292   bool operator() (nmethod* nm) {
 293     _detector._points_into = false;
 294     _blobs.do_code_blob(nm);
 295     return !_detector._points_into;
 296   }
 297 };
 298 
 299 void G1CodeRootSet::clean(HeapRegion* owner) {
 300   CleanCallback should_clean(owner);
 301   if (_table != NULL) {
 302     int removed = _table->remove_if(should_clean);
 303     assert((size_t)removed <= _length, "impossible");
 304     _length -= removed;
 305   }
 306   if (_length == 0) {
 307     clear();
 308   }
 309 }