1 /* 2 * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/altHashing.hpp" 27 #include "classfile/javaClasses.hpp" 28 #include "classfile/stringTable.hpp" 29 #include "memory/allocation.inline.hpp" 30 #include "memory/filemap.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "oops/oop.inline.hpp" 33 #include "runtime/safepoint.hpp" 34 #include "utilities/dtrace.hpp" 35 #include "utilities/hashtable.hpp" 36 #include "utilities/hashtable.inline.hpp" 37 #include "utilities/numberSeq.hpp" 38 39 40 // This hashtable is implemented as an open hash table with a fixed number of buckets. 41 42 template <MEMFLAGS F> BasicHashtableEntry<F>* BasicHashtable<F>::new_entry_free_list() { 43 BasicHashtableEntry<F>* entry = NULL; 44 if (_free_list != NULL) { 45 entry = _free_list; 46 _free_list = _free_list->next(); 47 } 48 return entry; 49 } 50 51 // HashtableEntrys are allocated in blocks to reduce the space overhead. 52 template <MEMFLAGS F> BasicHashtableEntry<F>* BasicHashtable<F>::new_entry(unsigned int hashValue) { 53 BasicHashtableEntry<F>* entry = new_entry_free_list(); 54 55 if (entry == NULL) { 56 if (_first_free_entry + _entry_size >= _end_block) { 57 int block_size = MIN2(512, MAX2((int)_table_size / 2, (int)_number_of_entries)); 58 int len = _entry_size * block_size; 59 len = 1 << log2_intptr(len); // round down to power of 2 60 assert(len >= _entry_size, ""); 61 _first_free_entry = NEW_C_HEAP_ARRAY2(char, len, F, CURRENT_PC); 62 _end_block = _first_free_entry + len; 63 } 64 entry = (BasicHashtableEntry<F>*)_first_free_entry; 65 _first_free_entry += _entry_size; 66 } 67 68 assert(_entry_size % HeapWordSize == 0, ""); 69 entry->set_hash(hashValue); 70 return entry; 71 } 72 73 74 template <class T, MEMFLAGS F> HashtableEntry<T, F>* Hashtable<T, F>::new_entry(unsigned int hashValue, T obj) { 75 HashtableEntry<T, F>* entry; 76 77 entry = (HashtableEntry<T, F>*)BasicHashtable<F>::new_entry(hashValue); 78 entry->set_literal(obj); 79 return entry; 80 } 81 82 // Check to see if the hashtable is unbalanced. The caller set a flag to 83 // rehash at the next safepoint. If this bucket is 60 times greater than the 84 // expected average bucket length, it's an unbalanced hashtable. 85 // This is somewhat an arbitrary heuristic but if one bucket gets to 86 // rehash_count which is currently 100, there's probably something wrong. 87 88 template <class T, MEMFLAGS F> bool RehashableHashtable<T, F>::check_rehash_table(int count) { 89 assert(this->table_size() != 0, "underflow"); 90 if (count > (((double)this->number_of_entries()/(double)this->table_size())*rehash_multiple)) { 91 // Set a flag for the next safepoint, which should be at some guaranteed 92 // safepoint interval. 93 return true; 94 } 95 return false; 96 } 97 98 template <class T, MEMFLAGS F> juint RehashableHashtable<T, F>::_seed = 0; 99 100 // Create a new table and using alternate hash code, populate the new table 101 // with the existing elements. This can be used to change the hash code 102 // and could in the future change the size of the table. 103 104 template <class T, MEMFLAGS F> void RehashableHashtable<T, F>::move_to(RehashableHashtable<T, F>* new_table) { 105 106 // Initialize the global seed for hashing. 107 _seed = AltHashing::compute_seed(); 108 assert(seed() != 0, "shouldn't be zero"); 109 110 int saved_entry_count = this->number_of_entries(); 111 112 // Iterate through the table and create a new entry for the new table 113 for (int i = 0; i < new_table->table_size(); ++i) { 114 for (HashtableEntry<T, F>* p = this->bucket(i); p != NULL; ) { 115 HashtableEntry<T, F>* next = p->next(); 116 T string = p->literal(); 117 // Use alternate hashing algorithm on the symbol in the first table 118 unsigned int hashValue = string->new_hash(seed()); 119 // Get a new index relative to the new table (can also change size) 120 int index = new_table->hash_to_index(hashValue); 121 p->set_hash(hashValue); 122 // Keep the shared bit in the Hashtable entry to indicate that this entry 123 // can't be deleted. The shared bit is the LSB in the _next field so 124 // walking the hashtable past these entries requires 125 // BasicHashtableEntry::make_ptr() call. 126 bool keep_shared = p->is_shared(); 127 this->unlink_entry(p); 128 new_table->add_entry(index, p); 129 if (keep_shared) { 130 p->set_shared(); 131 } 132 p = next; 133 } 134 } 135 // give the new table the free list as well 136 new_table->copy_freelist(this); 137 assert(new_table->number_of_entries() == saved_entry_count, "lost entry on dictionary copy?"); 138 139 // Destroy memory used by the buckets in the hashtable. The memory 140 // for the elements has been used in a new table and is not 141 // destroyed. The memory reuse will benefit resizing the SystemDictionary 142 // to avoid a memory allocation spike at safepoint. 143 BasicHashtable<F>::free_buckets(); 144 } 145 146 template <MEMFLAGS F> void BasicHashtable<F>::free_buckets() { 147 if (NULL != _buckets) { 148 // Don't delete the buckets in the shared space. They aren't 149 // allocated by os::malloc 150 if (!UseSharedSpaces || 151 !FileMapInfo::current_info()->is_in_shared_space(_buckets)) { 152 FREE_C_HEAP_ARRAY(HashtableBucket, _buckets); 153 } 154 _buckets = NULL; 155 } 156 } 157 158 159 // Reverse the order of elements in the hash buckets. 160 161 template <MEMFLAGS F> void BasicHashtable<F>::reverse() { 162 163 for (int i = 0; i < _table_size; ++i) { 164 BasicHashtableEntry<F>* new_list = NULL; 165 BasicHashtableEntry<F>* p = bucket(i); 166 while (p != NULL) { 167 BasicHashtableEntry<F>* next = p->next(); 168 p->set_next(new_list); 169 new_list = p; 170 p = next; 171 } 172 *bucket_addr(i) = new_list; 173 } 174 } 175 176 177 // Copy the table to the shared space. 178 179 template <MEMFLAGS F> void BasicHashtable<F>::copy_table(char** top, char* end) { 180 181 // Dump the hash table entries. 182 183 intptr_t *plen = (intptr_t*)(*top); 184 *top += sizeof(*plen); 185 186 int i; 187 for (i = 0; i < _table_size; ++i) { 188 for (BasicHashtableEntry<F>** p = _buckets[i].entry_addr(); 189 *p != NULL; 190 p = (*p)->next_addr()) { 191 if (*top + entry_size() > end) { 192 report_out_of_shared_space(SharedMiscData); 193 } 194 *p = (BasicHashtableEntry<F>*)memcpy(*top, *p, entry_size()); 195 *top += entry_size(); 196 } 197 } 198 *plen = (char*)(*top) - (char*)plen - sizeof(*plen); 199 200 // Set the shared bit. 201 202 for (i = 0; i < _table_size; ++i) { 203 for (BasicHashtableEntry<F>* p = bucket(i); p != NULL; p = p->next()) { 204 p->set_shared(); 205 } 206 } 207 } 208 209 210 211 // Reverse the order of elements in the hash buckets. 212 213 template <class T, MEMFLAGS F> void Hashtable<T, F>::reverse(void* boundary) { 214 215 for (int i = 0; i < this->table_size(); ++i) { 216 HashtableEntry<T, F>* high_list = NULL; 217 HashtableEntry<T, F>* low_list = NULL; 218 HashtableEntry<T, F>* last_low_entry = NULL; 219 HashtableEntry<T, F>* p = bucket(i); 220 while (p != NULL) { 221 HashtableEntry<T, F>* next = p->next(); 222 if ((void*)p->literal() >= boundary) { 223 p->set_next(high_list); 224 high_list = p; 225 } else { 226 p->set_next(low_list); 227 low_list = p; 228 if (last_low_entry == NULL) { 229 last_low_entry = p; 230 } 231 } 232 p = next; 233 } 234 if (low_list != NULL) { 235 *bucket_addr(i) = low_list; 236 last_low_entry->set_next(high_list); 237 } else { 238 *bucket_addr(i) = high_list; 239 } 240 } 241 } 242 243 template <class T, MEMFLAGS F> int RehashableHashtable<T, F>::literal_size(Symbol *symbol) { 244 return symbol->size() * HeapWordSize; 245 } 246 247 template <class T, MEMFLAGS F> int RehashableHashtable<T, F>::literal_size(oop oop) { 248 // NOTE: this would over-count if (pre-JDK8) java_lang_Class::has_offset_field() is true, 249 // and the String.value array is shared by several Strings. However, starting from JDK8, 250 // the String.value array is not shared anymore. 251 assert(oop != NULL && oop->klass() == SystemDictionary::String_klass(), "only strings are supported"); 252 return (oop->size() + java_lang_String::value(oop)->size()) * HeapWordSize; 253 } 254 255 // Dump footprint and bucket length statistics 256 // 257 // Note: if you create a new subclass of Hashtable<MyNewType, F>, you will need to 258 // add a new function Hashtable<T, F>::literal_size(MyNewType lit) 259 260 template <class T, MEMFLAGS F> void RehashableHashtable<T, F>::dump_table(outputStream* st, const char *table_name) { 261 NumberSeq summary; 262 int literal_bytes = 0; 263 for (int i = 0; i < this->table_size(); ++i) { 264 int count = 0; 265 for (HashtableEntry<T, F>* e = this->bucket(i); 266 e != NULL; e = e->next()) { 267 count++; 268 literal_bytes += literal_size(e->literal()); 269 } 270 summary.add((double)count); 271 } 272 double num_buckets = summary.num(); 273 double num_entries = summary.sum(); 274 275 int bucket_bytes = (int)num_buckets * sizeof(HashtableBucket<F>); 276 int entry_bytes = (int)num_entries * sizeof(HashtableEntry<T, F>); 277 int total_bytes = literal_bytes + bucket_bytes + entry_bytes; 278 279 double bucket_avg = (num_buckets <= 0) ? 0 : (bucket_bytes / num_buckets); 280 double entry_avg = (num_entries <= 0) ? 0 : (entry_bytes / num_entries); 281 double literal_avg = (num_entries <= 0) ? 0 : (literal_bytes / num_entries); 282 283 st->print_cr("%s statistics:", table_name); 284 st->print_cr("Number of buckets : %9d = %9d bytes, avg %7.3f", (int)num_buckets, bucket_bytes, bucket_avg); 285 st->print_cr("Number of entries : %9d = %9d bytes, avg %7.3f", (int)num_entries, entry_bytes, entry_avg); 286 st->print_cr("Number of literals : %9d = %9d bytes, avg %7.3f", (int)num_entries, literal_bytes, literal_avg); 287 st->print_cr("Total footprint : %9s = %9d bytes", "", total_bytes); 288 st->print_cr("Average bucket size : %9.3f", summary.avg()); 289 st->print_cr("Variance of bucket size : %9.3f", summary.variance()); 290 st->print_cr("Std. dev. of bucket size: %9.3f", summary.sd()); 291 st->print_cr("Maximum bucket size : %9d", (int)summary.maximum()); 292 } 293 294 295 // Dump the hash table buckets. 296 297 template <MEMFLAGS F> void BasicHashtable<F>::copy_buckets(char** top, char* end) { 298 intptr_t len = _table_size * sizeof(HashtableBucket<F>); 299 *(intptr_t*)(*top) = len; 300 *top += sizeof(intptr_t); 301 302 *(intptr_t*)(*top) = _number_of_entries; 303 *top += sizeof(intptr_t); 304 305 if (*top + len > end) { 306 report_out_of_shared_space(SharedMiscData); 307 } 308 _buckets = (HashtableBucket<F>*)memcpy(*top, _buckets, len); 309 *top += len; 310 } 311 312 313 #ifndef PRODUCT 314 315 template <class T, MEMFLAGS F> void Hashtable<T, F>::print() { 316 ResourceMark rm; 317 318 for (int i = 0; i < BasicHashtable<F>::table_size(); i++) { 319 HashtableEntry<T, F>* entry = bucket(i); 320 while(entry != NULL) { 321 tty->print("%d : ", i); 322 entry->literal()->print(); 323 tty->cr(); 324 entry = entry->next(); 325 } 326 } 327 } 328 329 330 template <MEMFLAGS F> void BasicHashtable<F>::verify() { 331 int count = 0; 332 for (int i = 0; i < table_size(); i++) { 333 for (BasicHashtableEntry<F>* p = bucket(i); p != NULL; p = p->next()) { 334 ++count; 335 } 336 } 337 assert(count == number_of_entries(), "number of hashtable entries incorrect"); 338 } 339 340 341 #endif // PRODUCT 342 343 #ifdef ASSERT 344 345 template <MEMFLAGS F> void BasicHashtable<F>::verify_lookup_length(double load) { 346 if ((double)_lookup_length / (double)_lookup_count > load * 2.0) { 347 warning("Performance bug: SystemDictionary lookup_count=%d " 348 "lookup_length=%d average=%lf load=%f", 349 _lookup_count, _lookup_length, 350 (double) _lookup_length / _lookup_count, load); 351 } 352 } 353 354 #endif 355 356 357 // Explicitly instantiate these types 358 #if INCLUDE_ALL_GCS 359 template class Hashtable<nmethod*, mtGC>; 360 template class HashtableEntry<nmethod*, mtGC>; 361 template class BasicHashtable<mtGC>; 362 #endif 363 template class Hashtable<ConstantPool*, mtClass>; 364 template class RehashableHashtable<Symbol*, mtSymbol>; 365 template class RehashableHashtable<oopDesc*, mtSymbol>; 366 template class Hashtable<Symbol*, mtSymbol>; 367 template class Hashtable<Klass*, mtClass>; 368 template class Hashtable<oop, mtClass>; 369 #if defined(SOLARIS) || defined(CHECK_UNHANDLED_OOPS) 370 template class Hashtable<oop, mtSymbol>; 371 template class RehashableHashtable<oop, mtSymbol>; 372 #endif // SOLARIS || CHECK_UNHANDLED_OOPS 373 template class Hashtable<oopDesc*, mtSymbol>; 374 template class Hashtable<Symbol*, mtClass>; 375 template class HashtableEntry<Symbol*, mtSymbol>; 376 template class HashtableEntry<Symbol*, mtClass>; 377 template class HashtableEntry<oop, mtSymbol>; 378 template class BasicHashtableEntry<mtSymbol>; 379 template class BasicHashtableEntry<mtCode>; 380 template class BasicHashtable<mtClass>; 381 template class BasicHashtable<mtSymbol>; 382 template class BasicHashtable<mtCode>; 383 template class BasicHashtable<mtInternal>; 384 template class BasicHashtable<mtCompiler>;