26 #include "memory/heap.hpp" 27 #include "oops/oop.inline.hpp" 28 #include "runtime/os.hpp" 29 #include "services/memTracker.hpp" 30 31 size_t CodeHeap::header_size() { 32 return sizeof(HeapBlock); 33 } 34 35 36 // Implementation of Heap 37 38 CodeHeap::CodeHeap() { 39 _number_of_committed_segments = 0; 40 _number_of_reserved_segments = 0; 41 _segment_size = 0; 42 _log2_segment_size = 0; 43 _next_segment = 0; 44 _freelist = NULL; 45 _freelist_segments = 0; 46 } 47 48 49 void CodeHeap::mark_segmap_as_free(size_t beg, size_t end) { 50 assert(0 <= beg && beg < _number_of_committed_segments, "interval begin out of bounds"); 51 assert(beg < end && end <= _number_of_committed_segments, "interval end out of bounds"); 52 // setup _segmap pointers for faster indexing 53 address p = (address)_segmap.low() + beg; 54 address q = (address)_segmap.low() + end; 55 // initialize interval 56 while (p < q) *p++ = 0xFF; 57 } 58 59 60 void CodeHeap::mark_segmap_as_used(size_t beg, size_t end) { 61 assert(0 <= beg && beg < _number_of_committed_segments, "interval begin out of bounds"); 62 assert(beg < end && end <= _number_of_committed_segments, "interval end out of bounds"); 63 // setup _segmap pointers for faster indexing 64 address p = (address)_segmap.low() + beg; 65 address q = (address)_segmap.low() + end; 66 // initialize interval 67 int i = 0; 68 while (p < q) { 69 *p++ = i++; 70 if (i == 0xFF) i = 1; 71 } 72 } 73 74 75 static size_t align_to_page_size(size_t size) { 76 const size_t alignment = (size_t)os::vm_page_size(); 77 assert(is_power_of_2(alignment), "no kidding ???"); 78 return (size + alignment - 1) & ~(alignment - 1); 79 } 80 81 82 void CodeHeap::on_code_mapping(char* base, size_t size) { 83 #ifdef LINUX 84 extern void linux_wrap_code(char* base, size_t size); 85 linux_wrap_code(base, size); 86 #endif 87 } 88 89 90 bool CodeHeap::reserve(size_t reserved_size, size_t committed_size, 122 const size_t reserved_segments_size = align_size_up(_number_of_reserved_segments, reserved_segments_alignment); 123 const size_t committed_segments_size = align_to_page_size(_number_of_committed_segments); 124 125 // reserve space for _segmap 126 if (!_segmap.initialize(reserved_segments_size, committed_segments_size)) { 127 return false; 128 } 129 130 MemTracker::record_virtual_memory_type((address)_segmap.low_boundary(), mtCode); 131 132 assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "could not commit enough space for segment map"); 133 assert(_segmap.reserved_size() >= (size_t) _number_of_reserved_segments , "could not reserve enough space for segment map"); 134 assert(_segmap.reserved_size() >= _segmap.committed_size() , "just checking"); 135 136 // initialize remaining instance variables 137 clear(); 138 return true; 139 } 140 141 142 void CodeHeap::release() { 143 Unimplemented(); 144 } 145 146 147 bool CodeHeap::expand_by(size_t size) { 148 // expand _memory space 149 size_t dm = align_to_page_size(_memory.committed_size() + size) - _memory.committed_size(); 150 if (dm > 0) { 151 char* base = _memory.low() + _memory.committed_size(); 152 if (!_memory.expand_by(dm)) return false; 153 on_code_mapping(base, dm); 154 size_t i = _number_of_committed_segments; 155 _number_of_committed_segments = size_to_segments(_memory.committed_size()); 156 assert(_number_of_reserved_segments == size_to_segments(_memory.reserved_size()), "number of reserved segments should not change"); 157 assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking"); 158 // expand _segmap space 159 size_t ds = align_to_page_size(_number_of_committed_segments) - _segmap.committed_size(); 160 if (ds > 0) { 161 if (!_segmap.expand_by(ds)) return false; 162 } 163 assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "just checking"); 164 // initialize additional segmap entries 165 mark_segmap_as_free(i, _number_of_committed_segments); 166 } 167 return true; 168 } 169 170 171 void CodeHeap::shrink_by(size_t size) { 172 Unimplemented(); 173 } 174 175 176 void CodeHeap::clear() { 177 _next_segment = 0; 178 mark_segmap_as_free(0, _number_of_committed_segments); 179 } 180 181 182 void* CodeHeap::allocate(size_t instance_size, bool is_critical) { 183 size_t number_of_segments = size_to_segments(instance_size + sizeof(HeapBlock)); 184 assert(segments_to_size(number_of_segments) >= sizeof(FreeBlock), "not enough room for FreeList"); 185 186 // First check if we can satify request from freelist 187 debug_only(verify()); 188 HeapBlock* block = search_freelist(number_of_segments, is_critical); 189 debug_only(if (VerifyCodeCacheOften) verify()); 190 if (block != NULL) { 191 assert(block->length() >= number_of_segments && block->length() < number_of_segments + CodeCacheMinBlockLength, "sanity check"); 192 assert(!block->free(), "must be marked free"); 193 #ifdef ASSERT 194 memset((void *)block->allocated_space(), badCodeHeapNewVal, instance_size); 195 #endif 196 return block->allocated_space(); 197 } 198 199 // Ensure minimum size for allocation to the heap. 200 if (number_of_segments < CodeCacheMinBlockLength) { 201 number_of_segments = CodeCacheMinBlockLength; 202 } 203 204 if (!is_critical) { 205 // Make sure the allocation fits in the unallocated heap without using 206 // the CodeCacheMimimumFreeSpace that is reserved for critical allocations. 207 if (segments_to_size(number_of_segments) > (heap_unallocated_capacity() - CodeCacheMinimumFreeSpace)) { 208 // Fail allocation 209 return NULL; 210 } 211 } 212 213 if (_next_segment + number_of_segments <= _number_of_committed_segments) { 214 mark_segmap_as_used(_next_segment, _next_segment + number_of_segments); 215 HeapBlock* b = block_at(_next_segment); 216 b->initialize(number_of_segments); 217 _next_segment += number_of_segments; 218 #ifdef ASSERT 219 memset((void *)b->allocated_space(), badCodeHeapNewVal, instance_size); 220 #endif 221 return b->allocated_space(); 222 } else { 223 return NULL; 224 } 225 } 226 227 228 void CodeHeap::deallocate(void* p) { 229 assert(p == find_start(p), "illegal deallocation"); 230 // Find start of HeapBlock 231 HeapBlock* b = (((HeapBlock *)p) - 1); 232 assert(b->allocated_space() == p, "sanity check"); 233 #ifdef ASSERT 234 memset((void *)b->allocated_space(), 235 badCodeHeapFreeVal, 236 segments_to_size(b->length()) - sizeof(HeapBlock)); 237 #endif 238 add_to_freelist(b); 239 240 debug_only(if (VerifyCodeCacheOften) verify()); 241 } 242 243 244 void* CodeHeap::find_start(void* p) const { 245 if (!contains(p)) { 246 return NULL; 247 } 248 size_t i = segment_for(p); 249 address b = (address)_segmap.low(); 250 if (b[i] == 0xFF) { 251 return NULL; 252 } 253 while (b[i] > 0) i -= (int)b[i]; 254 HeapBlock* h = block_at(i); 255 if (h->free()) { 256 return NULL; 257 } 258 return h->allocated_space(); 259 } 260 261 262 size_t CodeHeap::alignment_unit() const { 263 // this will be a power of two 264 return _segment_size; 265 } 266 267 268 size_t CodeHeap::alignment_offset() const { 269 // The lowest address in any allocated block will be 270 // equal to alignment_offset (mod alignment_unit). 271 return sizeof(HeapBlock) & (_segment_size - 1); 272 } 273 274 // Finds the next free heapblock. If the current one is free, that it returned 275 void* CodeHeap::next_free(HeapBlock *b) const { 276 // Since free blocks are merged, there is max. on free block 277 // between two used ones 278 if (b != NULL && b->free()) b = next_block(b); 279 assert(b == NULL || !b->free(), "must be in use or at end of heap"); 280 return (b == NULL) ? NULL : b->allocated_space(); 281 } 282 283 // Returns the first used HeapBlock 284 HeapBlock* CodeHeap::first_block() const { 285 if (_next_segment > 0) 286 return block_at(0); 287 return NULL; 288 } 289 290 HeapBlock *CodeHeap::block_start(void *q) const { 291 HeapBlock* b = (HeapBlock*)find_start(q); 292 if (b == NULL) return NULL; 293 return b - 1; 294 } 295 296 // Returns the next Heap block an offset into one 297 HeapBlock* CodeHeap::next_block(HeapBlock *b) const { 298 if (b == NULL) return NULL; 299 size_t i = segment_for(b) + b->length(); 300 if (i < _next_segment) 301 return block_at(i); 302 return NULL; 303 } 304 305 306 // Returns current capacity 307 size_t CodeHeap::capacity() const { 308 return _memory.committed_size(); 309 } 310 311 size_t CodeHeap::max_capacity() const { 312 return _memory.reserved_size(); 313 } 314 315 size_t CodeHeap::allocated_capacity() const { 316 // size of used heap - size on freelist 317 return segments_to_size(_next_segment - _freelist_segments); 318 } 319 320 // Returns size of the unallocated heap block 321 size_t CodeHeap::heap_unallocated_capacity() const { 322 // Total number of segments - number currently used 323 return segments_to_size(_number_of_reserved_segments - _next_segment); 324 } 325 326 // Free list management 327 328 FreeBlock *CodeHeap::following_block(FreeBlock *b) { 329 return (FreeBlock*)(((address)b) + _segment_size * b->length()); 330 } 331 332 // Inserts block b after a 333 void CodeHeap::insert_after(FreeBlock* a, FreeBlock* b) { 334 assert(a != NULL && b != NULL, "must be real pointers"); 335 336 // Link b into the list after a 337 b->set_link(a->link()); 338 a->set_link(b); 339 340 // See if we can merge blocks 341 merge_right(b); // Try to make b bigger 342 merge_right(a); // Try to make a include b 343 } 344 345 // Try to merge this block with the following block 346 void CodeHeap::merge_right(FreeBlock *a) { 347 assert(a->free(), "must be a free block"); 348 if (following_block(a) == a->link()) { 349 assert(a->link() != NULL && a->link()->free(), "must be free too"); 350 // Update block a to include the following block 351 a->set_length(a->length() + a->link()->length()); 352 a->set_link(a->link()->link()); 353 // Update find_start map 354 size_t beg = segment_for(a); 355 mark_segmap_as_used(beg, beg + a->length()); 356 } 357 } 358 359 void CodeHeap::add_to_freelist(HeapBlock *a) { 360 FreeBlock* b = (FreeBlock*)a; 361 assert(b != _freelist, "cannot be removed twice"); 362 363 // Mark as free and update free space count 364 _freelist_segments += b->length(); 365 b->set_free(); 366 367 // First element in list? 368 if (_freelist == NULL) { 369 _freelist = b; 370 b->set_link(NULL); 371 return; 372 } 373 374 // Scan for right place to put into list. List 375 // is sorted by increasing addresseses 376 FreeBlock* prev = NULL; 377 FreeBlock* cur = _freelist; 378 while(cur != NULL && cur < b) { 379 assert(prev == NULL || prev < cur, "must be ordered"); 380 prev = cur; 381 cur = cur->link(); 382 } 383 384 assert( (prev == NULL && b < _freelist) || 385 (prev < b && (cur == NULL || b < cur)), "list must be ordered"); 386 387 if (prev == NULL) { 388 // Insert first in list 389 b->set_link(_freelist); 390 _freelist = b; 391 merge_right(_freelist); 392 } else { 393 insert_after(prev, b); 394 } 395 } 396 397 // Search freelist for an entry on the list with the best fit 398 // Return NULL if no one was found 399 FreeBlock* CodeHeap::search_freelist(size_t length, bool is_critical) { 400 FreeBlock *best_block = NULL; 401 FreeBlock *best_prev = NULL; 402 size_t best_length = 0; 403 404 // Search for smallest block which is bigger than length 405 FreeBlock *prev = NULL; 406 FreeBlock *cur = _freelist; 407 while(cur != NULL) { 408 size_t l = cur->length(); 409 if (l >= length && (best_block == NULL || best_length > l)) { 410 411 // Non critical allocations are not allowed to use the last part of the code heap. 412 if (!is_critical) { 413 // Make sure the end of the allocation doesn't cross into the last part of the code heap 414 if (((size_t)cur + length) > ((size_t)high_boundary() - CodeCacheMinimumFreeSpace)) { 415 // the freelist is sorted by address - if one fails, all consecutive will also fail. 416 break; 417 } 418 } 419 420 // Remember best block, its previous element, and its length 421 best_block = cur; 422 best_prev = prev; 423 best_length = best_block->length(); 424 } 425 426 // Next element in list 427 prev = cur; 428 cur = cur->link(); 429 } 430 431 if (best_block == NULL) { 432 // None found 433 return NULL; 434 } 435 436 assert((best_prev == NULL && _freelist == best_block ) || 437 (best_prev != NULL && best_prev->link() == best_block), "sanity check"); 438 439 // Exact (or at least good enough) fit. Remove from list. 440 // Don't leave anything on the freelist smaller than CodeCacheMinBlockLength. 441 if (best_length < length + CodeCacheMinBlockLength) { 442 length = best_length; 443 if (best_prev == NULL) { 444 assert(_freelist == best_block, "sanity check"); 445 _freelist = _freelist->link(); 446 } else { 447 // Unmap element 448 best_prev->set_link(best_block->link()); 449 } 450 } else { 451 // Truncate block and return a pointer to the following block 452 best_block->set_length(best_length - length); 453 best_block = following_block(best_block); 454 // Set used bit and length on new block 455 size_t beg = segment_for(best_block); 456 mark_segmap_as_used(beg, beg + length); 457 best_block->set_length(length); 458 } 459 460 best_block->set_used(); 461 _freelist_segments -= length; 462 return best_block; 463 } 464 465 //---------------------------------------------------------------------------- 466 // Non-product code 467 468 #ifndef PRODUCT 469 470 void CodeHeap::print() { 471 tty->print_cr("The Heap"); 472 } 473 474 #endif 475 476 void CodeHeap::verify() { 477 // Count the number of blocks on the freelist, and the amount of space 478 // represented. 479 int count = 0; 480 size_t len = 0; 481 for(FreeBlock* b = _freelist; b != NULL; b = b->link()) { 482 len += b->length(); 483 count++; 484 } 485 486 // Verify that freelist contains the right amount of free space 487 // guarantee(len == _freelist_segments, "wrong freelist"); 488 489 // Verify that the number of free blocks is not out of hand. 490 static int free_block_threshold = 10000; 491 if (count > free_block_threshold) { 492 warning("CodeHeap: # of free blocks > %d", free_block_threshold); 493 // Double the warning limit 494 free_block_threshold *= 2; 495 } 496 497 // Verify that the freelist contains the same number of free blocks that is 498 // found on the full list. 499 for(HeapBlock *h = first_block(); h != NULL; h = next_block(h)) { 500 if (h->free()) count--; 501 } 502 // guarantee(count == 0, "missing free blocks"); 503 } | 26 #include "memory/heap.hpp" 27 #include "oops/oop.inline.hpp" 28 #include "runtime/os.hpp" 29 #include "services/memTracker.hpp" 30 31 size_t CodeHeap::header_size() { 32 return sizeof(HeapBlock); 33 } 34 35 36 // Implementation of Heap 37 38 CodeHeap::CodeHeap() { 39 _number_of_committed_segments = 0; 40 _number_of_reserved_segments = 0; 41 _segment_size = 0; 42 _log2_segment_size = 0; 43 _next_segment = 0; 44 _freelist = NULL; 45 _freelist_segments = 0; 46 _freelist_length = 0; 47 } 48 49 50 void CodeHeap::mark_segmap_as_free(size_t beg, size_t end) { 51 assert(0 <= beg && beg < _number_of_committed_segments, "interval begin out of bounds"); 52 assert(beg < end && end <= _number_of_committed_segments, "interval end out of bounds"); 53 // setup _segmap pointers for faster indexing 54 address p = (address)_segmap.low() + beg; 55 address q = (address)_segmap.low() + end; 56 // initialize interval 57 while (p < q) *p++ = free_sentinel; 58 } 59 60 61 void CodeHeap::mark_segmap_as_used(size_t beg, size_t end) { 62 assert(0 <= beg && beg < _number_of_committed_segments, "interval begin out of bounds"); 63 assert(beg < end && end <= _number_of_committed_segments, "interval end out of bounds"); 64 // setup _segmap pointers for faster indexing 65 address p = (address)_segmap.low() + beg; 66 address q = (address)_segmap.low() + end; 67 // initialize interval 68 int i = 0; 69 while (p < q) { 70 *p++ = i++; 71 if (i == free_sentinel) i = 1; 72 } 73 } 74 75 76 static size_t align_to_page_size(size_t size) { 77 const size_t alignment = (size_t)os::vm_page_size(); 78 assert(is_power_of_2(alignment), "no kidding ???"); 79 return (size + alignment - 1) & ~(alignment - 1); 80 } 81 82 83 void CodeHeap::on_code_mapping(char* base, size_t size) { 84 #ifdef LINUX 85 extern void linux_wrap_code(char* base, size_t size); 86 linux_wrap_code(base, size); 87 #endif 88 } 89 90 91 bool CodeHeap::reserve(size_t reserved_size, size_t committed_size, 123 const size_t reserved_segments_size = align_size_up(_number_of_reserved_segments, reserved_segments_alignment); 124 const size_t committed_segments_size = align_to_page_size(_number_of_committed_segments); 125 126 // reserve space for _segmap 127 if (!_segmap.initialize(reserved_segments_size, committed_segments_size)) { 128 return false; 129 } 130 131 MemTracker::record_virtual_memory_type((address)_segmap.low_boundary(), mtCode); 132 133 assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "could not commit enough space for segment map"); 134 assert(_segmap.reserved_size() >= (size_t) _number_of_reserved_segments , "could not reserve enough space for segment map"); 135 assert(_segmap.reserved_size() >= _segmap.committed_size() , "just checking"); 136 137 // initialize remaining instance variables 138 clear(); 139 return true; 140 } 141 142 143 bool CodeHeap::expand_by(size_t size) { 144 // expand _memory space 145 size_t dm = align_to_page_size(_memory.committed_size() + size) - _memory.committed_size(); 146 if (dm > 0) { 147 char* base = _memory.low() + _memory.committed_size(); 148 if (!_memory.expand_by(dm)) return false; 149 on_code_mapping(base, dm); 150 size_t i = _number_of_committed_segments; 151 _number_of_committed_segments = size_to_segments(_memory.committed_size()); 152 assert(_number_of_reserved_segments == size_to_segments(_memory.reserved_size()), "number of reserved segments should not change"); 153 assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking"); 154 // expand _segmap space 155 size_t ds = align_to_page_size(_number_of_committed_segments) - _segmap.committed_size(); 156 if ((ds > 0) && !_segmap.expand_by(ds)) { 157 return false; 158 } 159 assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "just checking"); 160 // initialize additional segmap entries 161 mark_segmap_as_free(i, _number_of_committed_segments); 162 } 163 return true; 164 } 165 166 void CodeHeap::clear() { 167 _next_segment = 0; 168 mark_segmap_as_free(0, _number_of_committed_segments); 169 } 170 171 172 void* CodeHeap::allocate(size_t instance_size, bool is_critical) { 173 size_t number_of_segments = size_to_segments(instance_size + header_size()); 174 assert(segments_to_size(number_of_segments) >= sizeof(FreeBlock), "not enough room for FreeList"); 175 176 // First check if we can satisfy request from freelist 177 NOT_PRODUCT(verify()); 178 HeapBlock* block = search_freelist(number_of_segments, is_critical); 179 NOT_PRODUCT(verify()); 180 181 if (block != NULL) { 182 assert(block->length() >= number_of_segments && block->length() < number_of_segments + CodeCacheMinBlockLength, "sanity check"); 183 assert(!block->free(), "must be marked free"); 184 DEBUG_ONLY(memset((void*)block->allocated_space(), badCodeHeapNewVal, instance_size)); 185 return block->allocated_space(); 186 } 187 188 // Ensure minimum size for allocation to the heap. 189 number_of_segments = MAX2((int)CodeCacheMinBlockLength, (int)number_of_segments); 190 191 if (!is_critical) { 192 // Make sure the allocation fits in the unallocated heap without using 193 // the CodeCacheMimimumFreeSpace that is reserved for critical allocations. 194 if (segments_to_size(number_of_segments) > (heap_unallocated_capacity() - CodeCacheMinimumFreeSpace)) { 195 // Fail allocation 196 return NULL; 197 } 198 } 199 200 if (_next_segment + number_of_segments <= _number_of_committed_segments) { 201 mark_segmap_as_used(_next_segment, _next_segment + number_of_segments); 202 HeapBlock* b = block_at(_next_segment); 203 b->initialize(number_of_segments); 204 _next_segment += number_of_segments; 205 DEBUG_ONLY(memset((void *)b->allocated_space(), badCodeHeapNewVal, instance_size)); 206 return b->allocated_space(); 207 } else { 208 return NULL; 209 } 210 } 211 212 213 void CodeHeap::deallocate(void* p) { 214 assert(p == find_start(p), "illegal deallocation"); 215 // Find start of HeapBlock 216 HeapBlock* b = (((HeapBlock *)p) - 1); 217 assert(b->allocated_space() == p, "sanity check"); 218 DEBUG_ONLY(memset((void *)b->allocated_space(), badCodeHeapFreeVal, 219 segments_to_size(b->length()) - sizeof(HeapBlock))); 220 add_to_freelist(b); 221 NOT_PRODUCT(verify()); 222 } 223 224 /** 225 * Uses segment map to find the the start (header) of a nmethod. This works as follows: 226 * The memory of the code cache is divided into 'segments'. The size of a segment is 227 * determined by -XX:CodeCacheSegmentSize=XX. Allocation in the code cache can only 228 * happen at segment boundaries. A pointer in the code cache can be mapped to a segment 229 * by calling segment_for(addr). Each time memory is requested from the code cache, 230 * the segmap is updated accordingly. See the following example, which illustrates the 231 * state of code cache and the segment map: (seg -> segment, nm ->nmethod) 232 * 233 * code cache segmap 234 * ----------- --------- 235 * seg 1 | nm 1 | -> | 0 | 236 * seg 2 | nm 1 | -> | 1 | 237 * ... | nm 1 | -> | .. | 238 * seg m | nm 2 | -> | 0 | 239 * seg m+1 | nm 2 | -> | 1 | 240 * ... | nm 2 | -> | 2 | 241 * ... | nm 2 | -> | .. | 242 * ... | nm 2 | -> | 0xFE | 243 * seg m+n | nm 2 | -> | 1 | 244 * ... | nm 2 | -> | | 245 * 246 * A value of '0' in the segmap indicates that this segment contains the beginning of 247 * an nmethod. Let's walk through a simple example: If we want to find the start of 248 * an nmethod that falls into seg 2, we read the value of the segmap[2]. The value 249 * is an offset that points to the segment that contains the start of the nmethod. 250 * Another example: If we want to get the start of nm 2, and we happen to get a pointer 251 * that points to seg m+n, we first read seg[n+m], which returns '1'. So we have to 252 * do one more read of the segmap[m+n-1] to finally get the segment header. 253 */ 254 void* CodeHeap::find_start(void* p) const { 255 if (!contains(p)) { 256 return NULL; 257 } 258 size_t seg_idx = segment_for(p); 259 address seg_map = (address)_segmap.low(); 260 if (is_segment_unused(seg_map[seg_idx])) { 261 return NULL; 262 } 263 while (seg_map[seg_idx] > 0) { 264 seg_idx -= (int)seg_map[seg_idx]; 265 } 266 267 HeapBlock* h = block_at(seg_idx); 268 if (h->free()) { 269 return NULL; 270 } 271 return h->allocated_space(); 272 } 273 274 275 size_t CodeHeap::alignment_unit() const { 276 // this will be a power of two 277 return _segment_size; 278 } 279 280 281 size_t CodeHeap::alignment_offset() const { 282 // The lowest address in any allocated block will be 283 // equal to alignment_offset (mod alignment_unit). 284 return sizeof(HeapBlock) & (_segment_size - 1); 285 } 286 287 // Finds the next free heapblock. If the current one is free, that it returned 288 void* CodeHeap::next_free(HeapBlock* b) const { 289 // Since free blocks are merged, there is max. on free block 290 // between two used ones 291 if (b != NULL && b->free()) b = next_block(b); 292 assert(b == NULL || !b->free(), "must be in use or at end of heap"); 293 return (b == NULL) ? NULL : b->allocated_space(); 294 } 295 296 // Returns the first used HeapBlock 297 HeapBlock* CodeHeap::first_block() const { 298 if (_next_segment > 0) 299 return block_at(0); 300 return NULL; 301 } 302 303 HeapBlock* CodeHeap::block_start(void* q) const { 304 HeapBlock* b = (HeapBlock*)find_start(q); 305 if (b == NULL) return NULL; 306 return b - 1; 307 } 308 309 // Returns the next Heap block an offset into one 310 HeapBlock* CodeHeap::next_block(HeapBlock *b) const { 311 if (b == NULL) return NULL; 312 size_t i = segment_for(b) + b->length(); 313 if (i < _next_segment) 314 return block_at(i); 315 return NULL; 316 } 317 318 319 // Returns current capacity 320 size_t CodeHeap::capacity() const { 321 return _memory.committed_size(); 322 } 323 324 size_t CodeHeap::max_capacity() const { 325 return _memory.reserved_size(); 326 } 327 328 int CodeHeap::allocated_segments() const { 329 return (int)_next_segment; 330 } 331 332 size_t CodeHeap::allocated_capacity() const { 333 // size of used heap - size on freelist 334 return segments_to_size(_next_segment - _freelist_segments); 335 } 336 337 // Returns size of the unallocated heap block 338 size_t CodeHeap::heap_unallocated_capacity() const { 339 // Total number of segments - number currently used 340 return segments_to_size(_number_of_reserved_segments - _next_segment); 341 } 342 343 // Free list management 344 345 FreeBlock* CodeHeap::following_block(FreeBlock *b) { 346 return (FreeBlock*)(((address)b) + _segment_size * b->length()); 347 } 348 349 // Inserts block b after a 350 void CodeHeap::insert_after(FreeBlock* a, FreeBlock* b) { 351 assert(a != NULL && b != NULL, "must be real pointers"); 352 353 // Link b into the list after a 354 b->set_link(a->link()); 355 a->set_link(b); 356 357 // See if we can merge blocks 358 merge_right(b); // Try to make b bigger 359 merge_right(a); // Try to make a include b 360 } 361 362 // Try to merge this block with the following block 363 bool CodeHeap::merge_right(FreeBlock* a) { 364 assert(a->free(), "must be a free block"); 365 if (following_block(a) == a->link()) { 366 assert(a->link() != NULL && a->link()->free(), "must be free too"); 367 // Update block a to include the following block 368 a->set_length(a->length() + a->link()->length()); 369 a->set_link(a->link()->link()); 370 // Update find_start map 371 size_t beg = segment_for(a); 372 mark_segmap_as_used(beg, beg + a->length()); 373 _freelist_length--; 374 return true; 375 } 376 return false; 377 } 378 379 380 void CodeHeap::add_to_freelist(HeapBlock* a) { 381 FreeBlock* b = (FreeBlock*)a; 382 _freelist_length++; 383 384 assert(b != _freelist, "cannot be removed twice"); 385 386 387 // Mark as free and update free space count 388 _freelist_segments += b->length(); 389 b->set_free(); 390 391 // First element in list? 392 if (_freelist == NULL) { 393 _freelist = b; 394 b->set_link(NULL); 395 return; 396 } 397 398 // Since the freelist is ordered (smaller addresses -> larger addresses) and the 399 // element we want to insert into the freelist has a smaller address than the first 400 // element, we can simply add 'b' as the first element and we are done. 401 if (b < _freelist) { 402 // Insert first in list 403 b->set_link(_freelist); 404 _freelist = b; 405 merge_right(_freelist); 406 return; 407 } 408 409 // Scan for right place to put into list. List 410 // is sorted by increasing addresses 411 FreeBlock* prev = _freelist; 412 FreeBlock* cur = _freelist->link(); 413 while(cur != NULL && cur < b) { 414 assert(prev < cur, "Freelist must be ordered"); 415 prev = cur; 416 cur = cur->link(); 417 } 418 assert((prev < b) && (cur == NULL || b < cur), "free-list must be ordered"); 419 insert_after(prev, b); 420 } 421 422 /** 423 * Search freelist for an entry on the list with the best fit. 424 * @return NULL, if no one was found 425 */ 426 FreeBlock* CodeHeap::search_freelist(size_t length, bool is_critical) { 427 FreeBlock* found_block = NULL; 428 FreeBlock* found_prev = NULL; 429 size_t found_length = 0; 430 431 FreeBlock* prev = NULL; 432 FreeBlock* cur = _freelist; 433 const size_t critical_boundary = (size_t)high_boundary() - CodeCacheMinimumFreeSpace; 434 435 // Search for first block that fits 436 while(cur != NULL) { 437 if (cur->length() >= length) { 438 // Non critical allocations are not allowed to use the last part of the code heap. 439 // Make sure the end of the allocation doesn't cross into the last part of the code heap. 440 if (!is_critical && (((size_t)cur + length) > critical_boundary)) { 441 // The freelist is sorted by address - if one fails, all consecutive will also fail. 442 break; 443 } 444 445 // Remember block, its previous element, and its length 446 found_block = cur; 447 found_prev = prev; 448 found_length = found_block->length(); 449 450 break; 451 } 452 // Next element in list 453 prev = cur; 454 cur = cur->link(); 455 } 456 457 if (found_block == NULL) { 458 // None found 459 return NULL; 460 } 461 462 // Exact (or at least good enough) fit. Remove from list. 463 // Don't leave anything on the freelist smaller than CodeCacheMinBlockLength. 464 if (found_length - length < CodeCacheMinBlockLength) { 465 _freelist_length--; 466 length = found_length; 467 if (found_prev == NULL) { 468 assert(_freelist == found_block, "sanity check"); 469 _freelist = _freelist->link(); 470 } else { 471 assert((found_prev->link() == found_block), "sanity check"); 472 // Unmap element 473 found_prev->set_link(found_block->link()); 474 } 475 } else { 476 // Truncate block and return a pointer to the following block 477 found_block->set_length(found_length - length); 478 found_block = following_block(found_block); 479 // Set used bit and length on new block 480 size_t beg = segment_for(found_block); 481 mark_segmap_as_used(beg, beg + length); 482 found_block->set_length(length); 483 } 484 485 found_block->set_used(); 486 _freelist_segments -= length; 487 return found_block; 488 } 489 490 //---------------------------------------------------------------------------- 491 // Non-product code 492 493 #ifndef PRODUCT 494 495 void CodeHeap::print() { 496 tty->print_cr("The Heap"); 497 } 498 499 void CodeHeap::verify() { 500 if (VerifyCodeCache) { 501 size_t len = 0; 502 int count = 0; 503 for(FreeBlock* b = _freelist; b != NULL; b = b->link()) { 504 len += b->length(); 505 count++; 506 // Check if we have merged all free blocks 507 assert(merge_right(b) == false, "Missed merging opportunity"); 508 } 509 // Verify that freelist contains the right amount of free space 510 assert(len == _freelist_segments, "wrong freelist"); 511 512 for(HeapBlock* h = first_block(); h != NULL; h = next_block(h)) { 513 if (h->free()) count--; 514 } 515 // Verify that the freelist contains the same number of blocks 516 // than free blocks found on the full list. 517 assert(count == 0, "missing free blocks"); 518 519 // Verify that the number of free blocks is not out of hand. 520 static int free_block_threshold = 10000; 521 if (count > free_block_threshold) { 522 warning("CodeHeap: # of free blocks > %d", free_block_threshold); 523 // Double the warning limit 524 free_block_threshold *= 2; 525 } 526 } 527 } 528 529 #endif |