1 /*
   2  * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "code/relocInfo.hpp"
  26 #include "code/nmethod.hpp"
  27 #include "code/icBuffer.hpp"
  28 #include "gc/shared/barrierSet.hpp"
  29 #include "gc/shared/barrierSetNMethod.hpp"
  30 #include "gc/z/zGlobals.hpp"
  31 #include "gc/z/zHash.inline.hpp"
  32 #include "gc/z/zLock.inline.hpp"
  33 #include "gc/z/zNMethodAllocator.hpp"
  34 #include "gc/z/zNMethodClosure.hpp"
  35 #include "gc/z/zNMethodData.hpp"
  36 #include "gc/z/zNMethodTable.hpp"
  37 #include "gc/z/zNMethodTableEntry.hpp"
  38 #include "gc/z/zNMethodTableIteration.hpp"
  39 #include "gc/z/zOopClosures.inline.hpp"
  40 #include "gc/z/zTask.hpp"
  41 #include "gc/z/zWorkers.hpp"
  42 #include "logging/log.hpp"
  43 #include "memory/allocation.hpp"
  44 #include "memory/resourceArea.hpp"
  45 #include "runtime/atomic.hpp"
  46 #include "runtime/orderAccess.hpp"
  47 #include "utilities/debug.hpp"
  48 
  49 ZNMethodTableEntry* ZNMethodTable::_table = NULL;
  50 size_t ZNMethodTable::_size = 0;
  51 size_t ZNMethodTable::_nregistered = 0;
  52 size_t ZNMethodTable::_nunregistered = 0;
  53 ZNMethodTableIteration ZNMethodTable::_iteration;
  54 
  55 ZNMethodTableEntry* ZNMethodTable::create(size_t size) {
  56   void* const mem = ZNMethodAllocator::allocate(size * sizeof(ZNMethodTableEntry));
  57   return ::new (mem) ZNMethodTableEntry[size];
  58 }
  59 
  60 void ZNMethodTable::destroy(ZNMethodTableEntry* table) {
  61   ZNMethodAllocator::free(table);
  62 }
  63 
  64 size_t ZNMethodTable::first_index(const nmethod* nm, size_t size) {
  65   assert(is_power_of_2(size), "Invalid size");
  66   const size_t mask = size - 1;
  67   const size_t hash = ZHash::address_to_uint32((uintptr_t)nm);
  68   return hash & mask;
  69 }
  70 
  71 size_t ZNMethodTable::next_index(size_t prev_index, size_t size) {
  72   assert(is_power_of_2(size), "Invalid size");
  73   const size_t mask = size - 1;
  74   return (prev_index + 1) & mask;
  75 }
  76 
  77 bool ZNMethodTable::register_entry(ZNMethodTableEntry* table, size_t size, nmethod* nm) {
  78   const ZNMethodTableEntry entry(nm);
  79   size_t index = first_index(nm, size);
  80 
  81   for (;;) {
  82     const ZNMethodTableEntry table_entry = table[index];
  83 
  84     if (!table_entry.registered() && !table_entry.unregistered()) {
  85       // Insert new entry
  86       table[index] = entry;
  87       return true;
  88     }
  89 
  90     if (table_entry.registered() && table_entry.method() == nm) {
  91       // Replace existing entry
  92       table[index] = entry;
  93       return false;
  94     }
  95 
  96     index = next_index(index, size);
  97   }
  98 }
  99 
 100 void ZNMethodTable::unregister_entry(ZNMethodTableEntry* table, size_t size, nmethod* nm) {
 101   size_t index = first_index(nm, size);
 102 
 103   for (;;) {
 104     const ZNMethodTableEntry table_entry = table[index];
 105     assert(table_entry.registered() || table_entry.unregistered(), "Entry not found");
 106 
 107     if (table_entry.registered() && table_entry.method() == nm) {
 108       // Remove entry
 109       table[index] = ZNMethodTableEntry(true /* unregistered */);
 110       return;
 111     }
 112 
 113     index = next_index(index, size);
 114   }
 115 }
 116 
 117 void ZNMethodTable::rebuild(size_t new_size) {
 118   assert(CodeCache_lock->owned_by_self(), "Lock must be held");
 119 
 120   assert(is_power_of_2(new_size), "Invalid size");
 121 
 122   log_debug(gc, nmethod)("Rebuilding NMethod Table: "
 123                          SIZE_FORMAT "->" SIZE_FORMAT " entries, "
 124                          SIZE_FORMAT "(%.0lf%%->%.0lf%%) registered, "
 125                          SIZE_FORMAT "(%.0lf%%->%.0lf%%) unregistered",
 126                          _size, new_size,
 127                          _nregistered, percent_of(_nregistered, _size), percent_of(_nregistered, new_size),
 128                          _nunregistered, percent_of(_nunregistered, _size), 0.0);
 129 
 130   // Allocate new table
 131   ZNMethodTableEntry* const new_table = ZNMethodTable::create(new_size);
 132 
 133   // Transfer all registered entries
 134   for (size_t i = 0; i < _size; i++) {
 135     const ZNMethodTableEntry entry = _table[i];
 136     if (entry.registered()) {
 137       register_entry(new_table, new_size, entry.method());
 138     }
 139   }
 140 
 141   // Free old table
 142   ZNMethodTable::destroy(_table);
 143 
 144   // Install new table
 145   _table = new_table;
 146   _size = new_size;
 147   _nunregistered = 0;
 148 }
 149 
 150 void ZNMethodTable::rebuild_if_needed() {
 151   // The hash table uses linear probing. To avoid wasting memory while
 152   // at the same time maintaining good hash collision behavior we want
 153   // to keep the table occupancy between 30% and 70%. The table always
 154   // grows/shrinks by doubling/halving its size. Pruning of unregistered
 155   // entries is done by rebuilding the table with or without resizing it.
 156   const size_t min_size = 1024;
 157   const size_t shrink_threshold = _size * 0.30;
 158   const size_t prune_threshold = _size * 0.65;
 159   const size_t grow_threshold = _size * 0.70;
 160 
 161   if (_size == 0) {
 162     // Initialize table
 163     rebuild(min_size);
 164   } else if (_nregistered < shrink_threshold && _size > min_size) {
 165     // Shrink table
 166     rebuild(_size / 2);
 167   } else if (_nregistered + _nunregistered > grow_threshold) {
 168     // Prune or grow table
 169     if (_nregistered < prune_threshold) {
 170       // Prune table
 171       rebuild(_size);
 172     } else {
 173       // Grow table
 174       rebuild(_size * 2);
 175     }
 176   }
 177 }
 178 
 179 size_t ZNMethodTable::registered_nmethods() {
 180   return _nregistered;
 181 }
 182 
 183 size_t ZNMethodTable::unregistered_nmethods() {
 184   return _nunregistered;
 185 }
 186 
 187 void ZNMethodTable::register_nmethod(nmethod* nm) {
 188   assert(CodeCache_lock->owned_by_self(), "Lock must be held");
 189 
 190   // Grow/Shrink/Prune table if needed
 191   rebuild_if_needed();
 192 
 193   // Insert new entry
 194   if (register_entry(_table, _size, nm)) {
 195     // New entry registered. When register_entry() instead returns
 196     // false the nmethod was already in the table so we do not want
 197     // to increase number of registered entries in that case.
 198     _nregistered++;
 199   }
 200 }
 201 
 202 void ZNMethodTable::wait_until_iteration_done() {
 203   assert(CodeCache_lock->owned_by_self(), "Lock must be held");
 204 
 205   while (_iteration.in_progress()) {
 206     CodeCache_lock->wait(Monitor::_no_safepoint_check_flag);
 207   }
 208 }
 209 
 210 void ZNMethodTable::unregister_nmethod(nmethod* nm) {
 211   assert(CodeCache_lock->owned_by_self(), "Lock must be held");
 212 
 213   // Remove entry
 214   unregister_entry(_table, _size, nm);
 215   _nunregistered++;
 216   _nregistered--;
 217 }
 218 
 219 void ZNMethodTable::nmethods_do_begin() {
 220   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 221 
 222   // Make sure we don't free data while iterating
 223   ZNMethodAllocator::activate_deferred_frees();
 224 
 225   // Prepare iteration
 226   _iteration.nmethods_do_begin(_table, _size);
 227 }
 228 
 229 void ZNMethodTable::nmethods_do_end() {
 230   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 231 
 232   // Finish iteration
 233   _iteration.nmethods_do_end();
 234 
 235   // Process deferred frees
 236   ZNMethodAllocator::deactivate_and_process_deferred_frees();
 237 
 238   // Notify iteration done
 239   CodeCache_lock->notify_all();
 240 }
 241 
 242 void ZNMethodTable::nmethods_do(ZNMethodClosure* cl) {
 243   _iteration.nmethods_do(cl);
 244 }