1 /*
   2  * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/altHashing.hpp"
  27 #include "classfile/javaClasses.hpp"
  28 #include "memory/allocation.inline.hpp"
  29 #include "memory/filemap.hpp"
  30 #include "memory/resourceArea.hpp"
  31 #include "oops/oop.inline.hpp"
  32 #include "runtime/safepoint.hpp"
  33 #include "utilities/dtrace.hpp"
  34 #include "utilities/hashtable.hpp"
  35 #include "utilities/hashtable.inline.hpp"
  36 #include "utilities/numberSeq.hpp"
  37 
  38 
  39 // This hashtable is implemented as an open hash table with a fixed number of buckets.
  40 
  41 template <MEMFLAGS F> BasicHashtableEntry<F>* BasicHashtable<F>::new_entry_free_list() {
  42   BasicHashtableEntry<F>* entry = NULL;
  43   if (_free_list != NULL) {
  44     entry = _free_list;
  45     _free_list = _free_list->next();
  46   }
  47   return entry;
  48 }
  49 
  50 // HashtableEntrys are allocated in blocks to reduce the space overhead.
  51 template <MEMFLAGS F> BasicHashtableEntry<F>* BasicHashtable<F>::new_entry(unsigned int hashValue) {
  52   BasicHashtableEntry<F>* entry = new_entry_free_list();
  53 
  54   if (entry == NULL) {
  55     if (_first_free_entry + _entry_size >= _end_block) {
  56       int block_size = MIN2(512, MAX2((int)_table_size / 2, (int)_number_of_entries));
  57       int len = _entry_size * block_size;
  58       len = 1 << log2_int(len); // round down to power of 2
  59       assert(len >= _entry_size, "");
  60       _first_free_entry = NEW_C_HEAP_ARRAY2(char, len, F, CURRENT_PC);
  61       _end_block = _first_free_entry + len;
  62     }
  63     entry = (BasicHashtableEntry<F>*)_first_free_entry;
  64     _first_free_entry += _entry_size;
  65   }
  66 
  67   assert(_entry_size % HeapWordSize == 0, "");
  68   entry->set_hash(hashValue);
  69   return entry;
  70 }
  71 
  72 
  73 template <class T, MEMFLAGS F> HashtableEntry<T, F>* Hashtable<T, F>::new_entry(unsigned int hashValue, T obj) {
  74   HashtableEntry<T, F>* entry;
  75 
  76   entry = (HashtableEntry<T, F>*)BasicHashtable<F>::new_entry(hashValue);
  77   entry->set_literal(obj);
  78   return entry;
  79 }
  80 
  81 // Check to see if the hashtable is unbalanced.  The caller set a flag to
  82 // rehash at the next safepoint.  If this bucket is 60 times greater than the
  83 // expected average bucket length, it's an unbalanced hashtable.
  84 // This is somewhat an arbitrary heuristic but if one bucket gets to
  85 // rehash_count which is currently 100, there's probably something wrong.
  86 
  87 template <class T, MEMFLAGS F> bool RehashableHashtable<T, F>::check_rehash_table(int count) {
  88   assert(this->table_size() != 0, "underflow");
  89   if (count > (((double)this->number_of_entries()/(double)this->table_size())*rehash_multiple)) {
  90     // Set a flag for the next safepoint, which should be at some guaranteed
  91     // safepoint interval.
  92     return true;
  93   }
  94   return false;
  95 }
  96 
  97 template <class T, MEMFLAGS F> juint RehashableHashtable<T, F>::_seed = 0;
  98 
  99 // Create a new table and using alternate hash code, populate the new table
 100 // with the existing elements.   This can be used to change the hash code
 101 // and could in the future change the size of the table.
 102 
 103 template <class T, MEMFLAGS F> void RehashableHashtable<T, F>::move_to(RehashableHashtable<T, F>* new_table) {
 104 
 105   // Initialize the global seed for hashing.
 106   _seed = AltHashing::compute_seed();
 107   assert(seed() != 0, "shouldn't be zero");
 108 
 109   int saved_entry_count = this->number_of_entries();
 110 
 111   // Iterate through the table and create a new entry for the new table
 112   for (int i = 0; i < new_table->table_size(); ++i) {
 113     for (HashtableEntry<T, F>* p = this->bucket(i); p != NULL; ) {
 114       HashtableEntry<T, F>* next = p->next();
 115       T string = p->literal();
 116       // Use alternate hashing algorithm on the symbol in the first table
 117       unsigned int hashValue = string->new_hash(seed());
 118       // Get a new index relative to the new table (can also change size)
 119       int index = new_table->hash_to_index(hashValue);
 120       p->set_hash(hashValue);
 121       // Keep the shared bit in the Hashtable entry to indicate that this entry
 122       // can't be deleted.   The shared bit is the LSB in the _next field so
 123       // walking the hashtable past these entries requires
 124       // BasicHashtableEntry::make_ptr() call.
 125       bool keep_shared = p->is_shared();
 126       this->unlink_entry(p);
 127       new_table->add_entry(index, p);
 128       if (keep_shared) {
 129         p->set_shared();
 130       }
 131       p = next;
 132     }
 133   }
 134   // give the new table the free list as well
 135   new_table->copy_freelist(this);
 136   assert(new_table->number_of_entries() == saved_entry_count, "lost entry on dictionary copy?");
 137 
 138   // Destroy memory used by the buckets in the hashtable.  The memory
 139   // for the elements has been used in a new table and is not
 140   // destroyed.  The memory reuse will benefit resizing the SystemDictionary
 141   // to avoid a memory allocation spike at safepoint.
 142   BasicHashtable<F>::free_buckets();
 143 }
 144 
 145 template <MEMFLAGS F> void BasicHashtable<F>::free_buckets() {
 146   if (NULL != _buckets) {
 147     // Don't delete the buckets in the shared space.  They aren't
 148     // allocated by os::malloc
 149     if (!UseSharedSpaces ||
 150         !FileMapInfo::current_info()->is_in_shared_space(_buckets)) {
 151        FREE_C_HEAP_ARRAY(HashtableBucket, _buckets, F);
 152     }
 153     _buckets = NULL;
 154   }
 155 }
 156 
 157 
 158 // Reverse the order of elements in the hash buckets.
 159 
 160 template <MEMFLAGS F> void BasicHashtable<F>::reverse() {
 161 
 162   for (int i = 0; i < _table_size; ++i) {
 163     BasicHashtableEntry<F>* new_list = NULL;
 164     BasicHashtableEntry<F>* p = bucket(i);
 165     while (p != NULL) {
 166       BasicHashtableEntry<F>* next = p->next();
 167       p->set_next(new_list);
 168       new_list = p;
 169       p = next;
 170     }
 171     *bucket_addr(i) = new_list;
 172   }
 173 }
 174 
 175 template <MEMFLAGS F> void BasicHashtable<F>::BucketUnlinkContext::free_entry(BasicHashtableEntry<F>* entry) {
 176   entry->set_next(_removed_head);
 177   _removed_head = entry;
 178   if (_removed_tail == NULL) {
 179     _removed_tail = entry;
 180   }
 181   _num_removed++;
 182 }
 183 
 184 template <MEMFLAGS F> void BasicHashtable<F>::bulk_free_entries(BucketUnlinkContext* context) {
 185   if (context->_num_removed == 0) {
 186     assert(context->_removed_head == NULL && context->_removed_tail == NULL,
 187            err_msg("Zero entries in the unlink context, but elements linked from " PTR_FORMAT " to " PTR_FORMAT,
 188                    p2i(context->_removed_head), p2i(context->_removed_tail)));
 189     return;
 190   }
 191 
 192   // MT-safe add of the list of BasicHashTableEntrys from the context to the free list.
 193   BasicHashtableEntry<F>* current = _free_list;
 194   while (true) {
 195     context->_removed_tail->set_next(current);
 196     BasicHashtableEntry<F>* old = (BasicHashtableEntry<F>*)Atomic::cmpxchg_ptr(context->_removed_head, &_free_list, current);
 197     if (old == current) {
 198       break;
 199     }
 200     current = old;
 201   }
 202   Atomic::add(-context->_num_removed, &_number_of_entries);
 203 }
 204 
 205 // Copy the table to the shared space.
 206 
 207 template <MEMFLAGS F> void BasicHashtable<F>::copy_table(char** top, char* end) {
 208 
 209   // Dump the hash table entries.
 210 
 211   intptr_t *plen = (intptr_t*)(*top);
 212   *top += sizeof(*plen);
 213 
 214   int i;
 215   for (i = 0; i < _table_size; ++i) {
 216     for (BasicHashtableEntry<F>** p = _buckets[i].entry_addr();
 217                               *p != NULL;
 218                                p = (*p)->next_addr()) {
 219       if (*top + entry_size() > end) {
 220         report_out_of_shared_space(SharedMiscData);
 221       }
 222       *p = (BasicHashtableEntry<F>*)memcpy(*top, *p, entry_size());
 223       *top += entry_size();
 224     }
 225   }
 226   *plen = (char*)(*top) - (char*)plen - sizeof(*plen);
 227 
 228   // Set the shared bit.
 229 
 230   for (i = 0; i < _table_size; ++i) {
 231     for (BasicHashtableEntry<F>* p = bucket(i); p != NULL; p = p->next()) {
 232       p->set_shared();
 233     }
 234   }
 235 }
 236 
 237 
 238 
 239 // Reverse the order of elements in the hash buckets.
 240 
 241 template <class T, MEMFLAGS F> void Hashtable<T, F>::reverse(void* boundary) {
 242 
 243   for (int i = 0; i < this->table_size(); ++i) {
 244     HashtableEntry<T, F>* high_list = NULL;
 245     HashtableEntry<T, F>* low_list = NULL;
 246     HashtableEntry<T, F>* last_low_entry = NULL;
 247     HashtableEntry<T, F>* p = bucket(i);
 248     while (p != NULL) {
 249       HashtableEntry<T, F>* next = p->next();
 250       if ((void*)p->literal() >= boundary) {
 251         p->set_next(high_list);
 252         high_list = p;
 253       } else {
 254         p->set_next(low_list);
 255         low_list = p;
 256         if (last_low_entry == NULL) {
 257           last_low_entry = p;
 258         }
 259       }
 260       p = next;
 261     }
 262     if (low_list != NULL) {
 263       *bucket_addr(i) = low_list;
 264       last_low_entry->set_next(high_list);
 265     } else {
 266       *bucket_addr(i) = high_list;
 267     }
 268   }
 269 }
 270 
 271 // For oops and Strings the size of the literal is interesting. For other types, nobody cares.
 272 int literal_size(ConstantPool*) { return 0; }
 273 int literal_size(Klass*)        { return 0; }
 274 #if INCLUDE_ALL_GCS
 275 int literal_size(nmethod*)      { return 0; }
 276 #endif
 277 
 278 int literal_size(Symbol *symbol) {
 279   return symbol->size() * HeapWordSize;
 280 }
 281 
 282 int literal_size(oop obj) {
 283   // NOTE: this would over-count if (pre-JDK8) java_lang_Class::has_offset_field() is true,
 284   // and the String.value array is shared by several Strings. However, starting from JDK8,
 285   // the String.value array is not shared anymore.
 286   if (obj == NULL) {
 287     return 0;
 288   } else if (obj->klass() == SystemDictionary::String_klass()) {
 289     return (obj->size() + java_lang_String::value(obj)->size()) * HeapWordSize;
 290   } else {
 291     return obj->size();
 292   }
 293 }
 294 
 295 // Dump footprint and bucket length statistics
 296 //
 297 // Note: if you create a new subclass of Hashtable<MyNewType, F>, you will need to
 298 // add a new function Hashtable<T, F>::literal_size(MyNewType lit)
 299 
 300 template <class T, MEMFLAGS F> void RehashableHashtable<T, F>::dump_table(outputStream* st, const char *table_name) {
 301   NumberSeq summary;
 302   int literal_bytes = 0;
 303   for (int i = 0; i < this->table_size(); ++i) {
 304     int count = 0;
 305     for (HashtableEntry<T, F>* e = this->bucket(i);
 306        e != NULL; e = e->next()) {
 307       count++;
 308       literal_bytes += literal_size(e->literal());
 309     }
 310     summary.add((double)count);
 311   }
 312   double num_buckets = summary.num();
 313   double num_entries = summary.sum();
 314 
 315   int bucket_bytes = (int)num_buckets * sizeof(HashtableBucket<F>);
 316   int entry_bytes  = (int)num_entries * sizeof(HashtableEntry<T, F>);
 317   int total_bytes = literal_bytes +  bucket_bytes + entry_bytes;
 318 
 319   double bucket_avg  = (num_buckets <= 0) ? 0 : (bucket_bytes  / num_buckets);
 320   double entry_avg   = (num_entries <= 0) ? 0 : (entry_bytes   / num_entries);
 321   double literal_avg = (num_entries <= 0) ? 0 : (literal_bytes / num_entries);
 322 
 323   st->print_cr("%s statistics:", table_name);
 324   st->print_cr("Number of buckets       : %9d = %9d bytes, avg %7.3f", (int)num_buckets, bucket_bytes,  bucket_avg);
 325   st->print_cr("Number of entries       : %9d = %9d bytes, avg %7.3f", (int)num_entries, entry_bytes,   entry_avg);
 326   st->print_cr("Number of literals      : %9d = %9d bytes, avg %7.3f", (int)num_entries, literal_bytes, literal_avg);
 327   st->print_cr("Total footprint         : %9s = %9d bytes", "", total_bytes);
 328   st->print_cr("Average bucket size     : %9.3f", summary.avg());
 329   st->print_cr("Variance of bucket size : %9.3f", summary.variance());
 330   st->print_cr("Std. dev. of bucket size: %9.3f", summary.sd());
 331   st->print_cr("Maximum bucket size     : %9d", (int)summary.maximum());
 332 }
 333 
 334 
 335 // Dump the hash table buckets.
 336 
 337 template <MEMFLAGS F> void BasicHashtable<F>::copy_buckets(char** top, char* end) {
 338   intptr_t len = _table_size * sizeof(HashtableBucket<F>);
 339   *(intptr_t*)(*top) = len;
 340   *top += sizeof(intptr_t);
 341 
 342   *(intptr_t*)(*top) = _number_of_entries;
 343   *top += sizeof(intptr_t);
 344 
 345   if (*top + len > end) {
 346     report_out_of_shared_space(SharedMiscData);
 347   }
 348   _buckets = (HashtableBucket<F>*)memcpy(*top, _buckets, len);
 349   *top += len;
 350 }
 351 
 352 template <class T, MEMFLAGS F> TableStatistics Hashtable<T, F>::statistics_calculate(T (*literal_load_barrier)(HashtableEntry<T, F>*)) {
 353   NumberSeq summary;
 354   int literal_bytes = 0;
 355   for (int i = 0; i < this->table_size(); ++i) {
 356     int count = 0;
 357     for (HashtableEntry<T, F>* e = this->bucket(i);
 358          e != NULL; e = e->next()) {
 359       count++;
 360       T l = (literal_load_barrier != NULL) ? literal_load_barrier(e) : e->literal();
 361       literal_bytes += literal_size(l);
 362     }
 363     summary.add((double)count);
 364   }
 365   return TableStatistics(this->_stats_rate, summary, literal_bytes, sizeof(HashtableBucket<F>), sizeof(HashtableEntry<T, F>));
 366 }
 367 
 368 #ifndef PRODUCT
 369 
 370 template <class T, MEMFLAGS F> void Hashtable<T, F>::print() {
 371   ResourceMark rm;
 372 
 373   for (int i = 0; i < BasicHashtable<F>::table_size(); i++) {
 374     HashtableEntry<T, F>* entry = bucket(i);
 375     while(entry != NULL) {
 376       tty->print("%d : ", i);
 377       entry->literal()->print();
 378       tty->cr();
 379       entry = entry->next();
 380     }
 381   }
 382 }
 383 
 384 
 385 template <MEMFLAGS F> void BasicHashtable<F>::verify() {
 386   int count = 0;
 387   for (int i = 0; i < table_size(); i++) {
 388     for (BasicHashtableEntry<F>* p = bucket(i); p != NULL; p = p->next()) {
 389       ++count;
 390     }
 391   }
 392   assert(count == number_of_entries(), "number of hashtable entries incorrect");
 393 }
 394 
 395 
 396 #endif // PRODUCT
 397 
 398 
 399 #ifdef ASSERT
 400 
 401 template <MEMFLAGS F> void BasicHashtable<F>::verify_lookup_length(double load) {
 402   if ((double)_lookup_length / (double)_lookup_count > load * 2.0) {
 403     warning("Performance bug: SystemDictionary lookup_count=%d "
 404             "lookup_length=%d average=%lf load=%f",
 405             _lookup_count, _lookup_length,
 406             (double) _lookup_length / _lookup_count, load);
 407   }
 408 }
 409 
 410 #endif
 411 // Explicitly instantiate these types
 412 #if INCLUDE_ALL_GCS
 413 template class Hashtable<nmethod*, mtGC>;
 414 template class HashtableEntry<nmethod*, mtGC>;
 415 template class BasicHashtable<mtGC>;
 416 #endif
 417 template class Hashtable<ConstantPool*, mtClass>;
 418 template class RehashableHashtable<Symbol*, mtSymbol>;
 419 template class RehashableHashtable<oopDesc*, mtSymbol>;
 420 template class Hashtable<Symbol*, mtSymbol>;
 421 template class Hashtable<Klass*, mtClass>;
 422 template class Hashtable<oop, mtClass>;
 423 #if defined(SOLARIS) || defined(CHECK_UNHANDLED_OOPS)
 424 template class Hashtable<oop, mtSymbol>;
 425 template class RehashableHashtable<oop, mtSymbol>;
 426 #endif // SOLARIS || CHECK_UNHANDLED_OOPS
 427 template class Hashtable<oopDesc*, mtSymbol>;
 428 template class Hashtable<Symbol*, mtClass>;
 429 template class HashtableEntry<Symbol*, mtSymbol>;
 430 template class HashtableEntry<Symbol*, mtClass>;
 431 template class HashtableEntry<oop, mtSymbol>;
 432 template class BasicHashtableEntry<mtSymbol>;
 433 template class BasicHashtableEntry<mtCode>;
 434 template class BasicHashtable<mtClass>;
 435 template class BasicHashtable<mtSymbol>;
 436 template class BasicHashtable<mtCode>;
 437 template class BasicHashtable<mtInternal>;