1 /* 2 * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/altHashing.hpp" 27 #include "classfile/dictionary.hpp" 28 #include "classfile/javaClasses.inline.hpp" 29 #include "classfile/moduleEntry.hpp" 30 #include "classfile/packageEntry.hpp" 31 #include "classfile/placeholders.hpp" 32 #include "classfile/protectionDomainCache.hpp" 33 #include "classfile/stringTable.hpp" 34 #include "memory/allocation.inline.hpp" 35 #include "memory/filemap.hpp" 36 #include "memory/resourceArea.hpp" 37 #include "oops/oop.inline.hpp" 38 #include "runtime/safepoint.hpp" 39 #include "utilities/dtrace.hpp" 40 #include "utilities/hashtable.hpp" 41 #include "utilities/hashtable.inline.hpp" 42 #include "utilities/numberSeq.hpp" 43 44 45 // This hashtable is implemented as an open hash table with a fixed number of buckets. 46 47 template <MEMFLAGS F> BasicHashtableEntry<F>* BasicHashtable<F>::new_entry_free_list() { 48 BasicHashtableEntry<F>* entry = NULL; 49 if (_free_list != NULL) { 50 entry = _free_list; 51 _free_list = _free_list->next(); 52 } 53 return entry; 54 } 55 56 // HashtableEntrys are allocated in blocks to reduce the space overhead. 57 template <MEMFLAGS F> BasicHashtableEntry<F>* BasicHashtable<F>::new_entry(unsigned int hashValue) { 58 BasicHashtableEntry<F>* entry = new_entry_free_list(); 59 60 if (entry == NULL) { 61 if (_first_free_entry + _entry_size >= _end_block) { 62 int block_size = MIN2(512, MAX2((int)_table_size / 2, (int)_number_of_entries)); 63 int len = _entry_size * block_size; 64 len = 1 << log2_intptr(len); // round down to power of 2 65 assert(len >= _entry_size, ""); 66 _first_free_entry = NEW_C_HEAP_ARRAY2(char, len, F, CURRENT_PC); 67 _end_block = _first_free_entry + len; 68 } 69 entry = (BasicHashtableEntry<F>*)_first_free_entry; 70 _first_free_entry += _entry_size; 71 } 72 73 assert(_entry_size % HeapWordSize == 0, ""); 74 entry->set_hash(hashValue); 75 return entry; 76 } 77 78 79 template <class T, MEMFLAGS F> HashtableEntry<T, F>* Hashtable<T, F>::new_entry(unsigned int hashValue, T obj) { 80 HashtableEntry<T, F>* entry; 81 82 entry = (HashtableEntry<T, F>*)BasicHashtable<F>::new_entry(hashValue); 83 entry->set_literal(obj); 84 return entry; 85 } 86 87 // Check to see if the hashtable is unbalanced. The caller set a flag to 88 // rehash at the next safepoint. If this bucket is 60 times greater than the 89 // expected average bucket length, it's an unbalanced hashtable. 90 // This is somewhat an arbitrary heuristic but if one bucket gets to 91 // rehash_count which is currently 100, there's probably something wrong. 92 93 template <class T, MEMFLAGS F> bool RehashableHashtable<T, F>::check_rehash_table(int count) { 94 assert(this->table_size() != 0, "underflow"); 95 if (count > (((double)this->number_of_entries()/(double)this->table_size())*rehash_multiple)) { 96 // Set a flag for the next safepoint, which should be at some guaranteed 97 // safepoint interval. 98 return true; 99 } 100 return false; 101 } 102 103 // Create a new table and using alternate hash code, populate the new table 104 // with the existing elements. This can be used to change the hash code 105 // and could in the future change the size of the table. 106 107 template <class T, MEMFLAGS F> void RehashableHashtable<T, F>::move_to(RehashableHashtable<T, F>* new_table) { 108 109 // Initialize the global seed for hashing. 110 _seed = AltHashing::compute_seed(); 111 assert(seed() != 0, "shouldn't be zero"); 112 113 int saved_entry_count = this->number_of_entries(); 114 115 // Iterate through the table and create a new entry for the new table 116 for (int i = 0; i < new_table->table_size(); ++i) { 117 for (HashtableEntry<T, F>* p = this->bucket(i); p != NULL; ) { 118 HashtableEntry<T, F>* next = p->next(); 119 T string = p->literal(); 120 // Use alternate hashing algorithm on the symbol in the first table 121 unsigned int hashValue = string->new_hash(seed()); 122 // Get a new index relative to the new table (can also change size) 123 int index = new_table->hash_to_index(hashValue); 124 p->set_hash(hashValue); 125 // Keep the shared bit in the Hashtable entry to indicate that this entry 126 // can't be deleted. The shared bit is the LSB in the _next field so 127 // walking the hashtable past these entries requires 128 // BasicHashtableEntry::make_ptr() call. 129 bool keep_shared = p->is_shared(); 130 this->unlink_entry(p); 131 new_table->add_entry(index, p); 132 if (keep_shared) { 133 p->set_shared(); 134 } 135 p = next; 136 } 137 } 138 // give the new table the free list as well 139 new_table->copy_freelist(this); 140 assert(new_table->number_of_entries() == saved_entry_count, "lost entry on dictionary copy?"); 141 142 // Destroy memory used by the buckets in the hashtable. The memory 143 // for the elements has been used in a new table and is not 144 // destroyed. The memory reuse will benefit resizing the SystemDictionary 145 // to avoid a memory allocation spike at safepoint. 146 BasicHashtable<F>::free_buckets(); 147 } 148 149 template <MEMFLAGS F> void BasicHashtable<F>::free_buckets() { 150 if (NULL != _buckets) { 151 // Don't delete the buckets in the shared space. They aren't 152 // allocated by os::malloc 153 if (!UseSharedSpaces || 154 !FileMapInfo::current_info()->is_in_shared_space(_buckets)) { 155 FREE_C_HEAP_ARRAY(HashtableBucket, _buckets); 156 } 157 _buckets = NULL; 158 } 159 } 160 161 template <MEMFLAGS F> void BasicHashtable<F>::BucketUnlinkContext::free_entry(BasicHashtableEntry<F>* entry) { 162 entry->set_next(_removed_head); 163 _removed_head = entry; 164 if (_removed_tail == NULL) { 165 _removed_tail = entry; 166 } 167 _num_removed++; 168 } 169 170 template <MEMFLAGS F> void BasicHashtable<F>::bulk_free_entries(BucketUnlinkContext* context) { 171 if (context->_num_removed == 0) { 172 assert(context->_removed_head == NULL && context->_removed_tail == NULL, 173 "Zero entries in the unlink context, but elements linked from " PTR_FORMAT " to " PTR_FORMAT, 174 p2i(context->_removed_head), p2i(context->_removed_tail)); 175 return; 176 } 177 178 // MT-safe add of the list of BasicHashTableEntrys from the context to the free list. 179 BasicHashtableEntry<F>* current = _free_list; 180 while (true) { 181 context->_removed_tail->set_next(current); 182 BasicHashtableEntry<F>* old = (BasicHashtableEntry<F>*)Atomic::cmpxchg_ptr(context->_removed_head, &_free_list, current); 183 if (old == current) { 184 break; 185 } 186 current = old; 187 } 188 Atomic::add(-context->_num_removed, &_number_of_entries); 189 } 190 // Copy the table to the shared space. 191 template <MEMFLAGS F> size_t BasicHashtable<F>::count_bytes_for_table() { 192 size_t bytes = 0; 193 bytes += sizeof(intptr_t); // len 194 195 for (int i = 0; i < _table_size; ++i) { 196 for (BasicHashtableEntry<F>** p = _buckets[i].entry_addr(); 197 *p != NULL; 198 p = (*p)->next_addr()) { 199 bytes += entry_size(); 200 } 201 } 202 203 return bytes; 204 } 205 206 // Dump the hash table entries (into CDS archive) 207 template <MEMFLAGS F> void BasicHashtable<F>::copy_table(char* top, char* end) { 208 assert(is_ptr_aligned(top, sizeof(intptr_t)), "bad alignment"); 209 intptr_t *plen = (intptr_t*)(top); 210 top += sizeof(*plen); 211 212 int i; 213 for (i = 0; i < _table_size; ++i) { 214 for (BasicHashtableEntry<F>** p = _buckets[i].entry_addr(); 215 *p != NULL; 216 p = (*p)->next_addr()) { 217 *p = (BasicHashtableEntry<F>*)memcpy(top, *p, entry_size()); 218 top += entry_size(); 219 } 220 } 221 *plen = (char*)(top) - (char*)plen - sizeof(*plen); 222 assert(top == end, "count_bytes_for_table is wrong"); 223 // Set the shared bit. 224 225 for (i = 0; i < _table_size; ++i) { 226 for (BasicHashtableEntry<F>* p = bucket(i); p != NULL; p = p->next()) { 227 p->set_shared(); 228 } 229 } 230 } 231 232 template <class T, MEMFLAGS F> int RehashableHashtable<T, F>::literal_size(Symbol *symbol) { 233 return symbol->size() * HeapWordSize; 234 } 235 236 template <class T, MEMFLAGS F> int RehashableHashtable<T, F>::literal_size(oop oop) { 237 // NOTE: this would over-count if (pre-JDK8) java_lang_Class::has_offset_field() is true, 238 // and the String.value array is shared by several Strings. However, starting from JDK8, 239 // the String.value array is not shared anymore. 240 assert(oop != NULL && oop->klass() == SystemDictionary::String_klass(), "only strings are supported"); 241 return (oop->size() + java_lang_String::value(oop)->size()) * HeapWordSize; 242 } 243 244 // Dump footprint and bucket length statistics 245 // 246 // Note: if you create a new subclass of Hashtable<MyNewType, F>, you will need to 247 // add a new function Hashtable<T, F>::literal_size(MyNewType lit) 248 249 template <class T, MEMFLAGS F> void RehashableHashtable<T, F>::dump_table(outputStream* st, const char *table_name) { 250 NumberSeq summary; 251 int literal_bytes = 0; 252 for (int i = 0; i < this->table_size(); ++i) { 253 int count = 0; 254 for (HashtableEntry<T, F>* e = this->bucket(i); 255 e != NULL; e = e->next()) { 256 count++; 257 literal_bytes += literal_size(e->literal()); 258 } 259 summary.add((double)count); 260 } 261 double num_buckets = summary.num(); 262 double num_entries = summary.sum(); 263 264 int bucket_bytes = (int)num_buckets * sizeof(HashtableBucket<F>); 265 int entry_bytes = (int)num_entries * sizeof(HashtableEntry<T, F>); 266 int total_bytes = literal_bytes + bucket_bytes + entry_bytes; 267 268 double bucket_avg = (num_buckets <= 0) ? 0 : (bucket_bytes / num_buckets); 269 double entry_avg = (num_entries <= 0) ? 0 : (entry_bytes / num_entries); 270 double literal_avg = (num_entries <= 0) ? 0 : (literal_bytes / num_entries); 271 272 st->print_cr("%s statistics:", table_name); 273 st->print_cr("Number of buckets : %9d = %9d bytes, avg %7.3f", (int)num_buckets, bucket_bytes, bucket_avg); 274 st->print_cr("Number of entries : %9d = %9d bytes, avg %7.3f", (int)num_entries, entry_bytes, entry_avg); 275 st->print_cr("Number of literals : %9d = %9d bytes, avg %7.3f", (int)num_entries, literal_bytes, literal_avg); 276 st->print_cr("Total footprint : %9s = %9d bytes", "", total_bytes); 277 st->print_cr("Average bucket size : %9.3f", summary.avg()); 278 st->print_cr("Variance of bucket size : %9.3f", summary.variance()); 279 st->print_cr("Std. dev. of bucket size: %9.3f", summary.sd()); 280 st->print_cr("Maximum bucket size : %9d", (int)summary.maximum()); 281 } 282 283 284 // Dump the hash table buckets. 285 286 template <MEMFLAGS F> size_t BasicHashtable<F>::count_bytes_for_buckets() { 287 size_t bytes = 0; 288 bytes += sizeof(intptr_t); // len 289 bytes += sizeof(intptr_t); // _number_of_entries 290 bytes += _table_size * sizeof(HashtableBucket<F>); // the buckets 291 292 return bytes; 293 } 294 295 // Dump the buckets (into CDS archive) 296 template <MEMFLAGS F> void BasicHashtable<F>::copy_buckets(char* top, char* end) { 297 assert(is_ptr_aligned(top, sizeof(intptr_t)), "bad alignment"); 298 intptr_t len = _table_size * sizeof(HashtableBucket<F>); 299 *(intptr_t*)(top) = len; 300 top += sizeof(intptr_t); 301 302 *(intptr_t*)(top) = _number_of_entries; 303 top += sizeof(intptr_t); 304 305 _buckets = (HashtableBucket<F>*)memcpy(top, (void*)_buckets, len); 306 top += len; 307 308 assert(top == end, "count_bytes_for_buckets is wrong"); 309 } 310 311 312 #ifndef PRODUCT 313 314 template <class T, MEMFLAGS F> void Hashtable<T, F>::print() { 315 ResourceMark rm; 316 317 for (int i = 0; i < BasicHashtable<F>::table_size(); i++) { 318 HashtableEntry<T, F>* entry = bucket(i); 319 while(entry != NULL) { 320 tty->print("%d : ", i); 321 entry->literal()->print(); 322 tty->cr(); 323 entry = entry->next(); 324 } 325 } 326 } 327 328 template <MEMFLAGS F> 329 template <class T> void BasicHashtable<F>::verify_table(const char* table_name) { 330 int element_count = 0; 331 int max_bucket_count = 0; 332 int max_bucket_number = 0; 333 for (int index = 0; index < table_size(); index++) { 334 int bucket_count = 0; 335 for (T* probe = (T*)bucket(index); probe != NULL; probe = probe->next()) { 336 probe->verify(); 337 bucket_count++; 338 } 339 element_count += bucket_count; 340 if (bucket_count > max_bucket_count) { 341 max_bucket_count = bucket_count; 342 max_bucket_number = index; 343 } 344 } 345 guarantee(number_of_entries() == element_count, 346 "Verify of %s failed", table_name); 347 348 // Log some statistics about the hashtable 349 log_info(hashtables)("%s max bucket size %d bucket %d element count %d table size %d", table_name, 350 max_bucket_count, max_bucket_number, _number_of_entries, _table_size); 351 if (_number_of_entries > 0 && log_is_enabled(Debug, hashtables)) { 352 for (int index = 0; index < table_size(); index++) { 353 int bucket_count = 0; 354 for (T* probe = (T*)bucket(index); probe != NULL; probe = probe->next()) { 355 log_debug(hashtables)("bucket %d hash " INTPTR_FORMAT, index, (intptr_t)probe->hash()); 356 bucket_count++; 357 } 358 if (bucket_count > 0) { 359 log_debug(hashtables)("bucket %d count %d", index, bucket_count); 360 } 361 } 362 } 363 } 364 #endif // PRODUCT 365 366 // Explicitly instantiate these types 367 #if INCLUDE_ALL_GCS 368 template class Hashtable<nmethod*, mtGC>; 369 template class HashtableEntry<nmethod*, mtGC>; 370 template class BasicHashtable<mtGC>; 371 #endif 372 template class Hashtable<ConstantPool*, mtClass>; 373 template class RehashableHashtable<Symbol*, mtSymbol>; 374 template class RehashableHashtable<oopDesc*, mtSymbol>; 375 template class Hashtable<Symbol*, mtSymbol>; 376 template class Hashtable<Klass*, mtClass>; 377 template class Hashtable<InstanceKlass*, mtClass>; 378 template class Hashtable<oop, mtClass>; 379 #if defined(SOLARIS) || defined(CHECK_UNHANDLED_OOPS) 380 template class Hashtable<oop, mtSymbol>; 381 template class RehashableHashtable<oop, mtSymbol>; 382 #endif // SOLARIS || CHECK_UNHANDLED_OOPS 383 template class Hashtable<oopDesc*, mtSymbol>; 384 template class Hashtable<Symbol*, mtClass>; 385 template class HashtableEntry<Symbol*, mtSymbol>; 386 template class HashtableEntry<Symbol*, mtClass>; 387 template class HashtableEntry<oop, mtSymbol>; 388 template class HashtableBucket<mtClass>; 389 template class BasicHashtableEntry<mtSymbol>; 390 template class BasicHashtableEntry<mtCode>; 391 template class BasicHashtable<mtClass>; 392 template class BasicHashtable<mtClassShared>; 393 template class BasicHashtable<mtSymbol>; 394 template class BasicHashtable<mtCode>; 395 template class BasicHashtable<mtInternal>; 396 template class BasicHashtable<mtModule>; 397 #if INCLUDE_TRACE 398 template class Hashtable<Symbol*, mtTracing>; 399 template class HashtableEntry<Symbol*, mtTracing>; 400 template class BasicHashtable<mtTracing>; 401 #endif 402 template class BasicHashtable<mtCompiler>; 403 404 template void BasicHashtable<mtClass>::verify_table<DictionaryEntry>(char const*); 405 template void BasicHashtable<mtModule>::verify_table<ModuleEntry>(char const*); 406 template void BasicHashtable<mtModule>::verify_table<PackageEntry>(char const*); 407 template void BasicHashtable<mtClass>::verify_table<ProtectionDomainCacheEntry>(char const*); 408 template void BasicHashtable<mtClass>::verify_table<PlaceholderEntry>(char const*); 409