< prev index next >

src/share/vm/utilities/hashtable.cpp

Print this page




  83   entry->set_literal(obj);
  84   return entry;
  85 }
  86 
  87 // Check to see if the hashtable is unbalanced.  The caller set a flag to
  88 // rehash at the next safepoint.  If this bucket is 60 times greater than the
  89 // expected average bucket length, it's an unbalanced hashtable.
  90 // This is somewhat an arbitrary heuristic but if one bucket gets to
  91 // rehash_count which is currently 100, there's probably something wrong.
  92 
  93 template <class T, MEMFLAGS F> bool RehashableHashtable<T, F>::check_rehash_table(int count) {
  94   assert(this->table_size() != 0, "underflow");
  95   if (count > (((double)this->number_of_entries()/(double)this->table_size())*rehash_multiple)) {
  96     // Set a flag for the next safepoint, which should be at some guaranteed
  97     // safepoint interval.
  98     return true;
  99   }
 100   return false;
 101 }
 102 
 103 template <class T, MEMFLAGS F> juint RehashableHashtable<T, F>::_seed = 0;
 104 
 105 // Create a new table and using alternate hash code, populate the new table
 106 // with the existing elements.   This can be used to change the hash code
 107 // and could in the future change the size of the table.
 108 
 109 template <class T, MEMFLAGS F> void RehashableHashtable<T, F>::move_to(RehashableHashtable<T, F>* new_table) {
 110 
 111   // Initialize the global seed for hashing.
 112   _seed = AltHashing::compute_seed();
 113   assert(seed() != 0, "shouldn't be zero");
 114 
 115   int saved_entry_count = this->number_of_entries();
 116 
 117   // Iterate through the table and create a new entry for the new table
 118   for (int i = 0; i < new_table->table_size(); ++i) {
 119     for (HashtableEntry<T, F>* p = this->bucket(i); p != NULL; ) {
 120       HashtableEntry<T, F>* next = p->next();
 121       T string = p->literal();
 122       // Use alternate hashing algorithm on the symbol in the first table
 123       unsigned int hashValue = string->new_hash(seed());
 124       // Get a new index relative to the new table (can also change size)


 190   Atomic::add(-context->_num_removed, &_number_of_entries);
 191 }
 192 
 193 // Copy the table to the shared space.
 194 
 195 template <MEMFLAGS F> void BasicHashtable<F>::copy_table(char** top, char* end) {
 196 
 197   // Dump the hash table entries.
 198 
 199   intptr_t *plen = (intptr_t*)(*top);
 200   *top += sizeof(*plen);
 201 
 202   int i;
 203   for (i = 0; i < _table_size; ++i) {
 204     for (BasicHashtableEntry<F>** p = _buckets[i].entry_addr();
 205                               *p != NULL;
 206                                p = (*p)->next_addr()) {
 207       if (*top + entry_size() > end) {
 208         report_out_of_shared_space(SharedMiscData);
 209       }
 210       *p = (BasicHashtableEntry<F>*)memcpy(*top, *p, entry_size());
 211       *top += entry_size();
 212     }
 213   }
 214   *plen = (char*)(*top) - (char*)plen - sizeof(*plen);
 215 
 216   // Set the shared bit.
 217 
 218   for (i = 0; i < _table_size; ++i) {
 219     for (BasicHashtableEntry<F>* p = bucket(i); p != NULL; p = p->next()) {
 220       p->set_shared();
 221     }
 222   }
 223 }
 224 
 225 template <class T, MEMFLAGS F> int RehashableHashtable<T, F>::literal_size(Symbol *symbol) {
 226   return symbol->size() * HeapWordSize;
 227 }
 228 
 229 template <class T, MEMFLAGS F> int RehashableHashtable<T, F>::literal_size(oop oop) {
 230   // NOTE: this would over-count if (pre-JDK8) java_lang_Class::has_offset_field() is true,


 270   st->print_cr("Average bucket size     : %9.3f", summary.avg());
 271   st->print_cr("Variance of bucket size : %9.3f", summary.variance());
 272   st->print_cr("Std. dev. of bucket size: %9.3f", summary.sd());
 273   st->print_cr("Maximum bucket size     : %9d", (int)summary.maximum());
 274 }
 275 
 276 
 277 // Dump the hash table buckets.
 278 
 279 template <MEMFLAGS F> void BasicHashtable<F>::copy_buckets(char** top, char* end) {
 280   intptr_t len = _table_size * sizeof(HashtableBucket<F>);
 281   *(intptr_t*)(*top) = len;
 282   *top += sizeof(intptr_t);
 283 
 284   *(intptr_t*)(*top) = _number_of_entries;
 285   *top += sizeof(intptr_t);
 286 
 287   if (*top + len > end) {
 288     report_out_of_shared_space(SharedMiscData);
 289   }
 290   _buckets = (HashtableBucket<F>*)memcpy(*top, _buckets, len);
 291   *top += len;
 292 }
 293 
 294 
 295 #ifndef PRODUCT
 296 
 297 template <class T, MEMFLAGS F> void Hashtable<T, F>::print() {
 298   ResourceMark rm;
 299 
 300   for (int i = 0; i < BasicHashtable<F>::table_size(); i++) {
 301     HashtableEntry<T, F>* entry = bucket(i);
 302     while(entry != NULL) {
 303       tty->print("%d : ", i);
 304       entry->literal()->print();
 305       tty->cr();
 306       entry = entry->next();
 307     }
 308   }
 309 }
 310 




  83   entry->set_literal(obj);
  84   return entry;
  85 }
  86 
  87 // Check to see if the hashtable is unbalanced.  The caller set a flag to
  88 // rehash at the next safepoint.  If this bucket is 60 times greater than the
  89 // expected average bucket length, it's an unbalanced hashtable.
  90 // This is somewhat an arbitrary heuristic but if one bucket gets to
  91 // rehash_count which is currently 100, there's probably something wrong.
  92 
  93 template <class T, MEMFLAGS F> bool RehashableHashtable<T, F>::check_rehash_table(int count) {
  94   assert(this->table_size() != 0, "underflow");
  95   if (count > (((double)this->number_of_entries()/(double)this->table_size())*rehash_multiple)) {
  96     // Set a flag for the next safepoint, which should be at some guaranteed
  97     // safepoint interval.
  98     return true;
  99   }
 100   return false;
 101 }
 102 


 103 // Create a new table and using alternate hash code, populate the new table
 104 // with the existing elements.   This can be used to change the hash code
 105 // and could in the future change the size of the table.
 106 
 107 template <class T, MEMFLAGS F> void RehashableHashtable<T, F>::move_to(RehashableHashtable<T, F>* new_table) {
 108 
 109   // Initialize the global seed for hashing.
 110   _seed = AltHashing::compute_seed();
 111   assert(seed() != 0, "shouldn't be zero");
 112 
 113   int saved_entry_count = this->number_of_entries();
 114 
 115   // Iterate through the table and create a new entry for the new table
 116   for (int i = 0; i < new_table->table_size(); ++i) {
 117     for (HashtableEntry<T, F>* p = this->bucket(i); p != NULL; ) {
 118       HashtableEntry<T, F>* next = p->next();
 119       T string = p->literal();
 120       // Use alternate hashing algorithm on the symbol in the first table
 121       unsigned int hashValue = string->new_hash(seed());
 122       // Get a new index relative to the new table (can also change size)


 188   Atomic::add(-context->_num_removed, &_number_of_entries);
 189 }
 190 
 191 // Copy the table to the shared space.
 192 
 193 template <MEMFLAGS F> void BasicHashtable<F>::copy_table(char** top, char* end) {
 194 
 195   // Dump the hash table entries.
 196 
 197   intptr_t *plen = (intptr_t*)(*top);
 198   *top += sizeof(*plen);
 199 
 200   int i;
 201   for (i = 0; i < _table_size; ++i) {
 202     for (BasicHashtableEntry<F>** p = _buckets[i].entry_addr();
 203                               *p != NULL;
 204                                p = (*p)->next_addr()) {
 205       if (*top + entry_size() > end) {
 206         report_out_of_shared_space(SharedMiscData);
 207       }
 208       *p = (BasicHashtableEntry<F>*)memcpy(*top, (void*)*p, entry_size());
 209       *top += entry_size();
 210     }
 211   }
 212   *plen = (char*)(*top) - (char*)plen - sizeof(*plen);
 213 
 214   // Set the shared bit.
 215 
 216   for (i = 0; i < _table_size; ++i) {
 217     for (BasicHashtableEntry<F>* p = bucket(i); p != NULL; p = p->next()) {
 218       p->set_shared();
 219     }
 220   }
 221 }
 222 
 223 template <class T, MEMFLAGS F> int RehashableHashtable<T, F>::literal_size(Symbol *symbol) {
 224   return symbol->size() * HeapWordSize;
 225 }
 226 
 227 template <class T, MEMFLAGS F> int RehashableHashtable<T, F>::literal_size(oop oop) {
 228   // NOTE: this would over-count if (pre-JDK8) java_lang_Class::has_offset_field() is true,


 268   st->print_cr("Average bucket size     : %9.3f", summary.avg());
 269   st->print_cr("Variance of bucket size : %9.3f", summary.variance());
 270   st->print_cr("Std. dev. of bucket size: %9.3f", summary.sd());
 271   st->print_cr("Maximum bucket size     : %9d", (int)summary.maximum());
 272 }
 273 
 274 
 275 // Dump the hash table buckets.
 276 
 277 template <MEMFLAGS F> void BasicHashtable<F>::copy_buckets(char** top, char* end) {
 278   intptr_t len = _table_size * sizeof(HashtableBucket<F>);
 279   *(intptr_t*)(*top) = len;
 280   *top += sizeof(intptr_t);
 281 
 282   *(intptr_t*)(*top) = _number_of_entries;
 283   *top += sizeof(intptr_t);
 284 
 285   if (*top + len > end) {
 286     report_out_of_shared_space(SharedMiscData);
 287   }
 288   _buckets = (HashtableBucket<F>*)memcpy(*top, (void*)_buckets, len);
 289   *top += len;
 290 }
 291 
 292 
 293 #ifndef PRODUCT
 294 
 295 template <class T, MEMFLAGS F> void Hashtable<T, F>::print() {
 296   ResourceMark rm;
 297 
 298   for (int i = 0; i < BasicHashtable<F>::table_size(); i++) {
 299     HashtableEntry<T, F>* entry = bucket(i);
 300     while(entry != NULL) {
 301       tty->print("%d : ", i);
 302       entry->literal()->print();
 303       tty->cr();
 304       entry = entry->next();
 305     }
 306   }
 307 }
 308 


< prev index next >