src/share/vm/memory/heap.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Sdiff src/share/vm/memory

src/share/vm/memory/heap.cpp

Print this page




 154     assert(_number_of_reserved_segments == size_to_segments(_memory.reserved_size()), "number of reserved segments should not change");
 155     assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
 156     // expand _segmap space
 157     size_t ds = align_to_page_size(_number_of_committed_segments) - _segmap.committed_size();
 158     if ((ds > 0) && !_segmap.expand_by(ds)) {
 159       return false;
 160     }
 161     assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "just checking");
 162     // initialize additional segmap entries
 163     mark_segmap_as_free(i, _number_of_committed_segments);
 164   }
 165   return true;
 166 }
 167 
 168 void CodeHeap::clear() {
 169   _next_segment = 0;
 170   mark_segmap_as_free(0, _number_of_committed_segments);
 171 }
 172 
 173 
 174 void* CodeHeap::allocate(size_t instance_size, bool is_critical) {
 175   size_t number_of_segments = size_to_segments(instance_size + header_size());
 176   assert(segments_to_size(number_of_segments) >= sizeof(FreeBlock), "not enough room for FreeList");
 177 
 178   // First check if we can satisfy request from freelist
 179   NOT_PRODUCT(verify());
 180   HeapBlock* block = search_freelist(number_of_segments, is_critical);
 181   NOT_PRODUCT(verify());
 182 
 183   if (block != NULL) {
 184     assert(block->length() >= number_of_segments && block->length() < number_of_segments + CodeCacheMinBlockLength, "sanity check");
 185     assert(!block->free(), "must be marked free");
 186     DEBUG_ONLY(memset((void*)block->allocated_space(), badCodeHeapNewVal, instance_size));
 187     _max_allocated_capacity = MAX2(_max_allocated_capacity, allocated_capacity());
 188     return block->allocated_space();
 189   }
 190 
 191   // Ensure minimum size for allocation to the heap.
 192   number_of_segments = MAX2((int)CodeCacheMinBlockLength, (int)number_of_segments);
 193 
 194   if (!is_critical) {
 195     // Make sure the allocation fits in the unallocated heap without using
 196     // the CodeCacheMimimumFreeSpace that is reserved for critical allocations.
 197     if (segments_to_size(number_of_segments) > (heap_unallocated_capacity() - CodeCacheMinimumFreeSpace)) {
 198       // Fail allocation
 199       return NULL;
 200     }
 201   }
 202 
 203   if (_next_segment + number_of_segments <= _number_of_committed_segments) {
 204     mark_segmap_as_used(_next_segment, _next_segment + number_of_segments);
 205     HeapBlock* b =  block_at(_next_segment);
 206     b->initialize(number_of_segments);
 207     _next_segment += number_of_segments;
 208     DEBUG_ONLY(memset((void *)b->allocated_space(), badCodeHeapNewVal, instance_size));
 209     _max_allocated_capacity = MAX2(_max_allocated_capacity, allocated_capacity());
 210     return b->allocated_space();
 211   } else {
 212     return NULL;
 213   }
 214 }
 215 
 216 
 217 void CodeHeap::deallocate(void* p) {
 218   assert(p == find_start(p), "illegal deallocation");
 219   // Find start of HeapBlock
 220   HeapBlock* b = (((HeapBlock *)p) - 1);
 221   assert(b->allocated_space() == p, "sanity check");
 222   DEBUG_ONLY(memset((void *)b->allocated_space(), badCodeHeapFreeVal,


 410     return;
 411   }
 412 
 413   // Scan for right place to put into list. List
 414   // is sorted by increasing addresses
 415   FreeBlock* prev = _freelist;
 416   FreeBlock* cur  = _freelist->link();
 417   while(cur != NULL && cur < b) {
 418     assert(prev < cur, "Freelist must be ordered");
 419     prev = cur;
 420     cur  = cur->link();
 421   }
 422   assert((prev < b) && (cur == NULL || b < cur), "free-list must be ordered");
 423   insert_after(prev, b);
 424 }
 425 
 426 /**
 427  * Search freelist for an entry on the list with the best fit.
 428  * @return NULL, if no one was found
 429  */
 430 FreeBlock* CodeHeap::search_freelist(size_t length, bool is_critical) {
 431   FreeBlock* found_block = NULL;
 432   FreeBlock* found_prev  = NULL;
 433   size_t     found_length = 0;
 434 
 435   FreeBlock* prev = NULL;
 436   FreeBlock* cur = _freelist;
 437   const size_t critical_boundary = (size_t)high_boundary() - CodeCacheMinimumFreeSpace;
 438 
 439   // Search for first block that fits
 440   while(cur != NULL) {
 441     if (cur->length() >= length) {
 442       // Non critical allocations are not allowed to use the last part of the code heap.
 443       // Make sure the end of the allocation doesn't cross into the last part of the code heap.
 444       if (!is_critical && (((size_t)cur + length) > critical_boundary)) {
 445         // The freelist is sorted by address - if one fails, all consecutive will also fail.
 446         break;
 447       }
 448       // Remember block, its previous element, and its length
 449       found_block = cur;
 450       found_prev  = prev;
 451       found_length = found_block->length();
 452 
 453       break;
 454     }
 455     // Next element in list
 456     prev = cur;
 457     cur  = cur->link();
 458   }
 459 
 460   if (found_block == NULL) {
 461     // None found
 462     return NULL;
 463   }
 464 
 465   // Exact (or at least good enough) fit. Remove from list.
 466   // Don't leave anything on the freelist smaller than CodeCacheMinBlockLength.
 467   if (found_length - length < CodeCacheMinBlockLength) {




 154     assert(_number_of_reserved_segments == size_to_segments(_memory.reserved_size()), "number of reserved segments should not change");
 155     assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
 156     // expand _segmap space
 157     size_t ds = align_to_page_size(_number_of_committed_segments) - _segmap.committed_size();
 158     if ((ds > 0) && !_segmap.expand_by(ds)) {
 159       return false;
 160     }
 161     assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "just checking");
 162     // initialize additional segmap entries
 163     mark_segmap_as_free(i, _number_of_committed_segments);
 164   }
 165   return true;
 166 }
 167 
 168 void CodeHeap::clear() {
 169   _next_segment = 0;
 170   mark_segmap_as_free(0, _number_of_committed_segments);
 171 }
 172 
 173 
 174 void* CodeHeap::allocate(size_t instance_size) {
 175   size_t number_of_segments = size_to_segments(instance_size + header_size());
 176   assert(segments_to_size(number_of_segments) >= sizeof(FreeBlock), "not enough room for FreeList");
 177 
 178   // First check if we can satisfy request from freelist
 179   NOT_PRODUCT(verify());
 180   HeapBlock* block = search_freelist(number_of_segments);
 181   NOT_PRODUCT(verify());
 182 
 183   if (block != NULL) {
 184     assert(block->length() >= number_of_segments && block->length() < number_of_segments + CodeCacheMinBlockLength, "sanity check");
 185     assert(!block->free(), "must be marked free");
 186     DEBUG_ONLY(memset((void*)block->allocated_space(), badCodeHeapNewVal, instance_size));
 187     _max_allocated_capacity = MAX2(_max_allocated_capacity, allocated_capacity());
 188     return block->allocated_space();
 189   }
 190 
 191   // Ensure minimum size for allocation to the heap.
 192   number_of_segments = MAX2((int)CodeCacheMinBlockLength, (int)number_of_segments);
 193 









 194   if (_next_segment + number_of_segments <= _number_of_committed_segments) {
 195     mark_segmap_as_used(_next_segment, _next_segment + number_of_segments);
 196     HeapBlock* b =  block_at(_next_segment);
 197     b->initialize(number_of_segments);
 198     _next_segment += number_of_segments;
 199     DEBUG_ONLY(memset((void *)b->allocated_space(), badCodeHeapNewVal, instance_size));
 200     _max_allocated_capacity = MAX2(_max_allocated_capacity, allocated_capacity());
 201     return b->allocated_space();
 202   } else {
 203     return NULL;
 204   }
 205 }
 206 
 207 
 208 void CodeHeap::deallocate(void* p) {
 209   assert(p == find_start(p), "illegal deallocation");
 210   // Find start of HeapBlock
 211   HeapBlock* b = (((HeapBlock *)p) - 1);
 212   assert(b->allocated_space() == p, "sanity check");
 213   DEBUG_ONLY(memset((void *)b->allocated_space(), badCodeHeapFreeVal,


 401     return;
 402   }
 403 
 404   // Scan for right place to put into list. List
 405   // is sorted by increasing addresses
 406   FreeBlock* prev = _freelist;
 407   FreeBlock* cur  = _freelist->link();
 408   while(cur != NULL && cur < b) {
 409     assert(prev < cur, "Freelist must be ordered");
 410     prev = cur;
 411     cur  = cur->link();
 412   }
 413   assert((prev < b) && (cur == NULL || b < cur), "free-list must be ordered");
 414   insert_after(prev, b);
 415 }
 416 
 417 /**
 418  * Search freelist for an entry on the list with the best fit.
 419  * @return NULL, if no one was found
 420  */
 421 FreeBlock* CodeHeap::search_freelist(size_t length) {
 422   FreeBlock* found_block = NULL;
 423   FreeBlock* found_prev  = NULL;
 424   size_t     found_length = 0;
 425 
 426   FreeBlock* prev = NULL;
 427   FreeBlock* cur = _freelist;

 428 
 429   // Search for first block that fits
 430   while(cur != NULL) {
 431     if (cur->length() >= length) {






 432       // Remember block, its previous element, and its length
 433       found_block = cur;
 434       found_prev  = prev;
 435       found_length = found_block->length();
 436 
 437       break;
 438     }
 439     // Next element in list
 440     prev = cur;
 441     cur  = cur->link();
 442   }
 443 
 444   if (found_block == NULL) {
 445     // None found
 446     return NULL;
 447   }
 448 
 449   // Exact (or at least good enough) fit. Remove from list.
 450   // Don't leave anything on the freelist smaller than CodeCacheMinBlockLength.
 451   if (found_length - length < CodeCacheMinBlockLength) {


src/share/vm/memory/heap.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File