< prev index next >

src/hotspot/share/memory/heap.cpp

Print this page




 187 
 188 static size_t align_to_page_size(size_t size) {
 189   const size_t alignment = (size_t)os::vm_page_size();
 190   assert(is_power_of_2(alignment), "no kidding ???");
 191   return (size + alignment - 1) & ~(alignment - 1);
 192 }
 193 
 194 
 195 void CodeHeap::on_code_mapping(char* base, size_t size) {
 196 #ifdef LINUX
 197   extern void linux_wrap_code(char* base, size_t size);
 198   linux_wrap_code(base, size);
 199 #endif
 200 }
 201 
 202 
 203 bool CodeHeap::reserve(ReservedSpace rs, size_t committed_size, size_t segment_size) {
 204   assert(rs.size() >= committed_size, "reserved < committed");
 205   assert(segment_size >= sizeof(FreeBlock), "segment size is too small");
 206   assert(is_power_of_2(segment_size), "segment_size must be a power of 2");

 207 
 208   _segment_size      = segment_size;
 209   _log2_segment_size = exact_log2(segment_size);
 210 
 211   // Reserve and initialize space for _memory.
 212   size_t page_size = os::vm_page_size();
 213   if (os::can_execute_large_page_memory()) {
 214     const size_t min_pages = 8;
 215     page_size = MIN2(os::page_size_for_region_aligned(committed_size, min_pages),
 216                      os::page_size_for_region_aligned(rs.size(), min_pages));
 217   }
 218 
 219   const size_t granularity = os::vm_allocation_granularity();
 220   const size_t c_size = align_up(committed_size, page_size);
 221 
 222   os::trace_page_sizes(_name, committed_size, rs.size(), page_size,
 223                        rs.base(), rs.size());
 224   if (!_memory.initialize(rs, c_size)) {
 225     return false;
 226   }


 235 
 236   // reserve space for _segmap
 237   if (!_segmap.initialize(reserved_segments_size, committed_segments_size)) {
 238     return false;
 239   }
 240 
 241   MemTracker::record_virtual_memory_type((address)_segmap.low_boundary(), mtCode);
 242 
 243   assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "could not commit  enough space for segment map");
 244   assert(_segmap.reserved_size()  >= (size_t) _number_of_reserved_segments , "could not reserve enough space for segment map");
 245   assert(_segmap.reserved_size()  >= _segmap.committed_size()     , "just checking");
 246 
 247   // initialize remaining instance variables, heap memory and segmap
 248   clear();
 249   init_segmap_template();
 250   return true;
 251 }
 252 
 253 
 254 bool CodeHeap::expand_by(size_t size) {


 255   // expand _memory space
 256   size_t dm = align_to_page_size(_memory.committed_size() + size) - _memory.committed_size();
 257   if (dm > 0) {
 258     // Use at least the available uncommitted space if 'size' is larger
 259     if (_memory.uncommitted_size() != 0 && dm > _memory.uncommitted_size()) {
 260       dm = _memory.uncommitted_size();
 261     }
 262     char* base = _memory.low() + _memory.committed_size();
 263     if (!_memory.expand_by(dm)) return false;
 264     on_code_mapping(base, dm);
 265     size_t i = _number_of_committed_segments;
 266     _number_of_committed_segments = size_to_segments(_memory.committed_size());
 267     assert(_number_of_reserved_segments == size_to_segments(_memory.reserved_size()), "number of reserved segments should not change");
 268     assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
 269     // expand _segmap space
 270     size_t ds = align_to_page_size(_number_of_committed_segments) - _segmap.committed_size();
 271     if ((ds > 0) && !_segmap.expand_by(ds)) {
 272       return false;
 273     }
 274     assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "just checking");
 275     // initialize additional space (heap memory and segmap)
 276     clear(i, _number_of_committed_segments);
 277   }
 278   return true;
 279 }
 280 
 281 
 282 void* CodeHeap::allocate(size_t instance_size) {
 283   size_t number_of_segments = size_to_segments(instance_size + header_size());
 284   assert(segments_to_size(number_of_segments) >= sizeof(FreeBlock), "not enough room for FreeList");

 285 
 286   // First check if we can satisfy request from freelist
 287   NOT_PRODUCT(verify());
 288   HeapBlock* block = search_freelist(number_of_segments);
 289   NOT_PRODUCT(verify());
 290 
 291   if (block != NULL) {
 292     assert(!block->free(), "must not be marked free");
 293     guarantee((char*) block >= _memory.low_boundary() && (char*) block < _memory.high(),
 294               "The newly allocated block " INTPTR_FORMAT " is not within the heap "
 295               "starting with "  INTPTR_FORMAT " and ending with "  INTPTR_FORMAT,
 296               p2i(block), p2i(_memory.low_boundary()), p2i(_memory.high()));
 297     _max_allocated_capacity = MAX2(_max_allocated_capacity, allocated_capacity());
 298     _blob_count++;
 299     return block->allocated_space();
 300   }
 301 
 302   // Ensure minimum size for allocation to the heap.
 303   number_of_segments = MAX2((int)CodeCacheMinBlockLength, (int)number_of_segments);
 304 


 329 //          where the split happens. The segment with relative
 330 //          number split_at is the first segment of the split-off block.
 331 HeapBlock* CodeHeap::split_block(HeapBlock *b, size_t split_at) {
 332   if (b == NULL) return NULL;
 333   // After the split, both blocks must have a size of at least CodeCacheMinBlockLength
 334   assert((split_at >= CodeCacheMinBlockLength) && (split_at + CodeCacheMinBlockLength <= b->length()),
 335          "split position(%d) out of range [0..%d]", (int)split_at, (int)b->length());
 336   size_t split_segment = segment_for(b) + split_at;
 337   size_t b_size        = b->length();
 338   size_t newb_size     = b_size - split_at;
 339 
 340   HeapBlock* newb = block_at(split_segment);
 341   newb->set_length(newb_size);
 342   mark_segmap_as_used(segment_for(newb), segment_for(newb) + newb_size, false);
 343   b->set_length(split_at);
 344   return newb;
 345 }
 346 
 347 void CodeHeap::deallocate_tail(void* p, size_t used_size) {
 348   assert(p == find_start(p), "illegal deallocation");


 349   // Find start of HeapBlock
 350   HeapBlock* b = (((HeapBlock *)p) - 1);
 351   assert(b->allocated_space() == p, "sanity check");
 352 
 353   size_t actual_number_of_segments = b->length();
 354   size_t used_number_of_segments   = size_to_segments(used_size + header_size());
 355   size_t unused_number_of_segments = actual_number_of_segments - used_number_of_segments;
 356   guarantee(used_number_of_segments <= actual_number_of_segments, "Must be!");
 357 
 358   HeapBlock* f = split_block(b, used_number_of_segments);
 359   add_to_freelist(f);
 360   NOT_PRODUCT(verify());
 361 }
 362 
 363 void CodeHeap::deallocate(void* p) {
 364   assert(p == find_start(p), "illegal deallocation");


 365   // Find start of HeapBlock
 366   HeapBlock* b = (((HeapBlock *)p) - 1);
 367   assert(b->allocated_space() == p, "sanity check");
 368   guarantee((char*) b >= _memory.low_boundary() && (char*) b < _memory.high(),
 369             "The block to be deallocated " INTPTR_FORMAT " is not within the heap "
 370             "starting with "  INTPTR_FORMAT " and ending with " INTPTR_FORMAT,
 371             p2i(b), p2i(_memory.low_boundary()), p2i(_memory.high()));
 372   add_to_freelist(b);
 373   NOT_PRODUCT(verify());
 374 }
 375 
 376 /**
 377  * The segment map is used to quickly find the the start (header) of a
 378  * code block (e.g. nmethod) when only a pointer to a location inside the
 379  * code block is known. This works as follows:
 380  *  - The storage reserved for the code heap is divided into 'segments'.
 381  *  - The size of a segment is determined by -XX:CodeCacheSegmentSize=<#bytes>.
 382  *  - The size must be a power of two to allow the use of shift operations
 383  *    to quickly convert between segment index and segment address.
 384  *  - Segment start addresses should be aligned to be multiples of CodeCacheSegmentSize.


 772     while (p[ix] > 0) {
 773       ix -= p[ix];
 774       nhops++;
 775     }
 776     return (nhops > hops_expected) ? nhops - hops_expected : 0;
 777   }
 778   return 0;
 779 }
 780 
 781 //----------------------------------------------------------------------------
 782 // Non-product code
 783 
 784 #ifndef PRODUCT
 785 
 786 void CodeHeap::print() {
 787   tty->print_cr("The Heap");
 788 }
 789 
 790 void CodeHeap::verify() {
 791   if (VerifyCodeCache) {

 792     size_t len = 0;
 793     int count = 0;
 794     for(FreeBlock* b = _freelist; b != NULL; b = b->link()) {
 795       len += b->length();
 796       count++;
 797       // Check if we have merged all free blocks
 798       assert(merge_right(b) == false, "Missed merging opportunity");
 799     }
 800     // Verify that freelist contains the right amount of free space
 801     assert(len == _freelist_segments, "wrong freelist");
 802 
 803     for(HeapBlock* h = first_block(); h != NULL; h = next_block(h)) {
 804       if (h->free()) count--;
 805     }
 806     // Verify that the freelist contains the same number of blocks
 807     // than free blocks found on the full list.
 808     assert(count == 0, "missing free blocks");
 809 
 810     //---<  all free block memory must have been invalidated  >---
 811     for(FreeBlock* b = _freelist; b != NULL; b = b->link()) {




 187 
 188 static size_t align_to_page_size(size_t size) {
 189   const size_t alignment = (size_t)os::vm_page_size();
 190   assert(is_power_of_2(alignment), "no kidding ???");
 191   return (size + alignment - 1) & ~(alignment - 1);
 192 }
 193 
 194 
 195 void CodeHeap::on_code_mapping(char* base, size_t size) {
 196 #ifdef LINUX
 197   extern void linux_wrap_code(char* base, size_t size);
 198   linux_wrap_code(base, size);
 199 #endif
 200 }
 201 
 202 
 203 bool CodeHeap::reserve(ReservedSpace rs, size_t committed_size, size_t segment_size) {
 204   assert(rs.size() >= committed_size, "reserved < committed");
 205   assert(segment_size >= sizeof(FreeBlock), "segment size is too small");
 206   assert(is_power_of_2(segment_size), "segment_size must be a power of 2");
 207   assert_locked_or_safepoint(CodeCache_lock);
 208 
 209   _segment_size      = segment_size;
 210   _log2_segment_size = exact_log2(segment_size);
 211 
 212   // Reserve and initialize space for _memory.
 213   size_t page_size = os::vm_page_size();
 214   if (os::can_execute_large_page_memory()) {
 215     const size_t min_pages = 8;
 216     page_size = MIN2(os::page_size_for_region_aligned(committed_size, min_pages),
 217                      os::page_size_for_region_aligned(rs.size(), min_pages));
 218   }
 219 
 220   const size_t granularity = os::vm_allocation_granularity();
 221   const size_t c_size = align_up(committed_size, page_size);
 222 
 223   os::trace_page_sizes(_name, committed_size, rs.size(), page_size,
 224                        rs.base(), rs.size());
 225   if (!_memory.initialize(rs, c_size)) {
 226     return false;
 227   }


 236 
 237   // reserve space for _segmap
 238   if (!_segmap.initialize(reserved_segments_size, committed_segments_size)) {
 239     return false;
 240   }
 241 
 242   MemTracker::record_virtual_memory_type((address)_segmap.low_boundary(), mtCode);
 243 
 244   assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "could not commit  enough space for segment map");
 245   assert(_segmap.reserved_size()  >= (size_t) _number_of_reserved_segments , "could not reserve enough space for segment map");
 246   assert(_segmap.reserved_size()  >= _segmap.committed_size()     , "just checking");
 247 
 248   // initialize remaining instance variables, heap memory and segmap
 249   clear();
 250   init_segmap_template();
 251   return true;
 252 }
 253 
 254 
 255 bool CodeHeap::expand_by(size_t size) {
 256   assert_locked_or_safepoint(CodeCache_lock);
 257 
 258   // expand _memory space
 259   size_t dm = align_to_page_size(_memory.committed_size() + size) - _memory.committed_size();
 260   if (dm > 0) {
 261     // Use at least the available uncommitted space if 'size' is larger
 262     if (_memory.uncommitted_size() != 0 && dm > _memory.uncommitted_size()) {
 263       dm = _memory.uncommitted_size();
 264     }
 265     char* base = _memory.low() + _memory.committed_size();
 266     if (!_memory.expand_by(dm)) return false;
 267     on_code_mapping(base, dm);
 268     size_t i = _number_of_committed_segments;
 269     _number_of_committed_segments = size_to_segments(_memory.committed_size());
 270     assert(_number_of_reserved_segments == size_to_segments(_memory.reserved_size()), "number of reserved segments should not change");
 271     assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
 272     // expand _segmap space
 273     size_t ds = align_to_page_size(_number_of_committed_segments) - _segmap.committed_size();
 274     if ((ds > 0) && !_segmap.expand_by(ds)) {
 275       return false;
 276     }
 277     assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "just checking");
 278     // initialize additional space (heap memory and segmap)
 279     clear(i, _number_of_committed_segments);
 280   }
 281   return true;
 282 }
 283 
 284 
 285 void* CodeHeap::allocate(size_t instance_size) {
 286   size_t number_of_segments = size_to_segments(instance_size + header_size());
 287   assert(segments_to_size(number_of_segments) >= sizeof(FreeBlock), "not enough room for FreeList");
 288   assert_locked_or_safepoint(CodeCache_lock);
 289 
 290   // First check if we can satisfy request from freelist
 291   NOT_PRODUCT(verify());
 292   HeapBlock* block = search_freelist(number_of_segments);
 293   NOT_PRODUCT(verify());
 294 
 295   if (block != NULL) {
 296     assert(!block->free(), "must not be marked free");
 297     guarantee((char*) block >= _memory.low_boundary() && (char*) block < _memory.high(),
 298               "The newly allocated block " INTPTR_FORMAT " is not within the heap "
 299               "starting with "  INTPTR_FORMAT " and ending with "  INTPTR_FORMAT,
 300               p2i(block), p2i(_memory.low_boundary()), p2i(_memory.high()));
 301     _max_allocated_capacity = MAX2(_max_allocated_capacity, allocated_capacity());
 302     _blob_count++;
 303     return block->allocated_space();
 304   }
 305 
 306   // Ensure minimum size for allocation to the heap.
 307   number_of_segments = MAX2((int)CodeCacheMinBlockLength, (int)number_of_segments);
 308 


 333 //          where the split happens. The segment with relative
 334 //          number split_at is the first segment of the split-off block.
 335 HeapBlock* CodeHeap::split_block(HeapBlock *b, size_t split_at) {
 336   if (b == NULL) return NULL;
 337   // After the split, both blocks must have a size of at least CodeCacheMinBlockLength
 338   assert((split_at >= CodeCacheMinBlockLength) && (split_at + CodeCacheMinBlockLength <= b->length()),
 339          "split position(%d) out of range [0..%d]", (int)split_at, (int)b->length());
 340   size_t split_segment = segment_for(b) + split_at;
 341   size_t b_size        = b->length();
 342   size_t newb_size     = b_size - split_at;
 343 
 344   HeapBlock* newb = block_at(split_segment);
 345   newb->set_length(newb_size);
 346   mark_segmap_as_used(segment_for(newb), segment_for(newb) + newb_size, false);
 347   b->set_length(split_at);
 348   return newb;
 349 }
 350 
 351 void CodeHeap::deallocate_tail(void* p, size_t used_size) {
 352   assert(p == find_start(p), "illegal deallocation");
 353   assert_locked_or_safepoint(CodeCache_lock);
 354 
 355   // Find start of HeapBlock
 356   HeapBlock* b = (((HeapBlock *)p) - 1);
 357   assert(b->allocated_space() == p, "sanity check");
 358 
 359   size_t actual_number_of_segments = b->length();
 360   size_t used_number_of_segments   = size_to_segments(used_size + header_size());
 361   size_t unused_number_of_segments = actual_number_of_segments - used_number_of_segments;
 362   guarantee(used_number_of_segments <= actual_number_of_segments, "Must be!");
 363 
 364   HeapBlock* f = split_block(b, used_number_of_segments);
 365   add_to_freelist(f);
 366   NOT_PRODUCT(verify());
 367 }
 368 
 369 void CodeHeap::deallocate(void* p) {
 370   assert(p == find_start(p), "illegal deallocation");
 371   assert_locked_or_safepoint(CodeCache_lock);
 372 
 373   // Find start of HeapBlock
 374   HeapBlock* b = (((HeapBlock *)p) - 1);
 375   assert(b->allocated_space() == p, "sanity check");
 376   guarantee((char*) b >= _memory.low_boundary() && (char*) b < _memory.high(),
 377             "The block to be deallocated " INTPTR_FORMAT " is not within the heap "
 378             "starting with "  INTPTR_FORMAT " and ending with " INTPTR_FORMAT,
 379             p2i(b), p2i(_memory.low_boundary()), p2i(_memory.high()));
 380   add_to_freelist(b);
 381   NOT_PRODUCT(verify());
 382 }
 383 
 384 /**
 385  * The segment map is used to quickly find the the start (header) of a
 386  * code block (e.g. nmethod) when only a pointer to a location inside the
 387  * code block is known. This works as follows:
 388  *  - The storage reserved for the code heap is divided into 'segments'.
 389  *  - The size of a segment is determined by -XX:CodeCacheSegmentSize=<#bytes>.
 390  *  - The size must be a power of two to allow the use of shift operations
 391  *    to quickly convert between segment index and segment address.
 392  *  - Segment start addresses should be aligned to be multiples of CodeCacheSegmentSize.


 780     while (p[ix] > 0) {
 781       ix -= p[ix];
 782       nhops++;
 783     }
 784     return (nhops > hops_expected) ? nhops - hops_expected : 0;
 785   }
 786   return 0;
 787 }
 788 
 789 //----------------------------------------------------------------------------
 790 // Non-product code
 791 
 792 #ifndef PRODUCT
 793 
 794 void CodeHeap::print() {
 795   tty->print_cr("The Heap");
 796 }
 797 
 798 void CodeHeap::verify() {
 799   if (VerifyCodeCache) {
 800     assert_locked_or_safepoint(CodeCache_lock);
 801     size_t len = 0;
 802     int count = 0;
 803     for(FreeBlock* b = _freelist; b != NULL; b = b->link()) {
 804       len += b->length();
 805       count++;
 806       // Check if we have merged all free blocks
 807       assert(merge_right(b) == false, "Missed merging opportunity");
 808     }
 809     // Verify that freelist contains the right amount of free space
 810     assert(len == _freelist_segments, "wrong freelist");
 811 
 812     for(HeapBlock* h = first_block(); h != NULL; h = next_block(h)) {
 813       if (h->free()) count--;
 814     }
 815     // Verify that the freelist contains the same number of blocks
 816     // than free blocks found on the full list.
 817     assert(count == 0, "missing free blocks");
 818 
 819     //---<  all free block memory must have been invalidated  >---
 820     for(FreeBlock* b = _freelist; b != NULL; b = b->link()) {


< prev index next >