< prev index next >

src/share/vm/services/mallocSiteTable.hpp

Print this page
rev 7899 : imported patch nmtfix


 221         return true;
 222       }
 223     }
 224     return false;
 225   }
 226 
 227   // Walk this table.
 228   static bool walk_malloc_site(MallocSiteWalker* walker);
 229 
 230  private:
 231   static MallocSiteHashtableEntry* new_entry(const NativeCallStack& key);
 232   static void reset();
 233 
 234   // Delete a bucket linked list
 235   static void delete_linked_list(MallocSiteHashtableEntry* head);
 236 
 237   static MallocSite* lookup_or_add(const NativeCallStack& key, size_t* bucket_idx, size_t* pos_idx);
 238   static MallocSite* malloc_site(size_t bucket_idx, size_t pos_idx);
 239   static bool walk(MallocSiteWalker* walker);
 240 
 241   static inline int hash_to_index(int  hash) {
 242     hash = (hash > 0) ? hash : (-hash);
 243     return (hash % table_size);
 244   }
 245 
 246   static inline const NativeCallStack* hash_entry_allocation_stack() {
 247     return (NativeCallStack*)_hash_entry_allocation_stack;
 248   }
 249 
 250  private:
 251   // Counter for counting concurrent access
 252   static volatile int                _access_count;
 253 
 254   // The callsite hashtable. It has to be a static table,
 255   // since malloc call can come from C runtime linker.
 256   static MallocSiteHashtableEntry*   _table[table_size];
 257 
 258 
 259   // Reserve enough memory for placing the objects
 260 
 261   // The memory for hashtable entry allocation stack object
 262   static size_t _hash_entry_allocation_stack[CALC_OBJ_SIZE_IN_TYPE(NativeCallStack, size_t)];


 221         return true;
 222       }
 223     }
 224     return false;
 225   }
 226 
 227   // Walk this table.
 228   static bool walk_malloc_site(MallocSiteWalker* walker);
 229 
 230  private:
 231   static MallocSiteHashtableEntry* new_entry(const NativeCallStack& key);
 232   static void reset();
 233 
 234   // Delete a bucket linked list
 235   static void delete_linked_list(MallocSiteHashtableEntry* head);
 236 
 237   static MallocSite* lookup_or_add(const NativeCallStack& key, size_t* bucket_idx, size_t* pos_idx);
 238   static MallocSite* malloc_site(size_t bucket_idx, size_t pos_idx);
 239   static bool walk(MallocSiteWalker* walker);
 240 
 241   static inline unsigned int hash_to_index(unsigned int hash) {

 242     return (hash % table_size);
 243   }
 244 
 245   static inline const NativeCallStack* hash_entry_allocation_stack() {
 246     return (NativeCallStack*)_hash_entry_allocation_stack;
 247   }
 248 
 249  private:
 250   // Counter for counting concurrent access
 251   static volatile int                _access_count;
 252 
 253   // The callsite hashtable. It has to be a static table,
 254   // since malloc call can come from C runtime linker.
 255   static MallocSiteHashtableEntry*   _table[table_size];
 256 
 257 
 258   // Reserve enough memory for placing the objects
 259 
 260   // The memory for hashtable entry allocation stack object
 261   static size_t _hash_entry_allocation_stack[CALC_OBJ_SIZE_IN_TYPE(NativeCallStack, size_t)];
< prev index next >