< prev index next >

src/hotspot/share/memory/heap.cpp

Print this page




 188 
 189 static size_t align_to_page_size(size_t size) {
 190   const size_t alignment = (size_t)os::vm_page_size();
 191   assert(is_power_of_2(alignment), "no kidding ???");
 192   return (size + alignment - 1) & ~(alignment - 1);
 193 }
 194 
 195 
 196 void CodeHeap::on_code_mapping(char* base, size_t size) {
 197 #ifdef LINUX
 198   extern void linux_wrap_code(char* base, size_t size);
 199   linux_wrap_code(base, size);
 200 #endif
 201 }
 202 
 203 
 204 bool CodeHeap::reserve(ReservedSpace rs, size_t committed_size, size_t segment_size) {
 205   assert(rs.size() >= committed_size, "reserved < committed");
 206   assert(segment_size >= sizeof(FreeBlock), "segment size is too small");
 207   assert(is_power_of_2(segment_size), "segment_size must be a power of 2");

 208 
 209   _segment_size      = segment_size;
 210   _log2_segment_size = exact_log2(segment_size);
 211 
 212   // Reserve and initialize space for _memory.
 213   size_t page_size = os::vm_page_size();
 214   if (os::can_execute_large_page_memory()) {
 215     const size_t min_pages = 8;
 216     page_size = MIN2(os::page_size_for_region_aligned(committed_size, min_pages),
 217                      os::page_size_for_region_aligned(rs.size(), min_pages));
 218   }
 219 
 220   const size_t granularity = os::vm_allocation_granularity();
 221   const size_t c_size = align_up(committed_size, page_size);
 222 
 223   os::trace_page_sizes(_name, committed_size, rs.size(), page_size,
 224                        rs.base(), rs.size());
 225   if (!_memory.initialize(rs, c_size)) {
 226     return false;
 227   }


 236 
 237   // reserve space for _segmap
 238   if (!_segmap.initialize(reserved_segments_size, committed_segments_size)) {
 239     return false;
 240   }
 241 
 242   MemTracker::record_virtual_memory_type((address)_segmap.low_boundary(), mtCode);
 243 
 244   assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "could not commit  enough space for segment map");
 245   assert(_segmap.reserved_size()  >= (size_t) _number_of_reserved_segments , "could not reserve enough space for segment map");
 246   assert(_segmap.reserved_size()  >= _segmap.committed_size()     , "just checking");
 247 
 248   // initialize remaining instance variables, heap memory and segmap
 249   clear();
 250   init_segmap_template();
 251   return true;
 252 }
 253 
 254 
 255 bool CodeHeap::expand_by(size_t size) {


 256   // expand _memory space
 257   size_t dm = align_to_page_size(_memory.committed_size() + size) - _memory.committed_size();
 258   if (dm > 0) {
 259     // Use at least the available uncommitted space if 'size' is larger
 260     if (_memory.uncommitted_size() != 0 && dm > _memory.uncommitted_size()) {
 261       dm = _memory.uncommitted_size();
 262     }
 263     char* base = _memory.low() + _memory.committed_size();
 264     if (!_memory.expand_by(dm)) return false;
 265     on_code_mapping(base, dm);
 266     size_t i = _number_of_committed_segments;
 267     _number_of_committed_segments = size_to_segments(_memory.committed_size());
 268     assert(_number_of_reserved_segments == size_to_segments(_memory.reserved_size()), "number of reserved segments should not change");
 269     assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
 270     // expand _segmap space
 271     size_t ds = align_to_page_size(_number_of_committed_segments) - _segmap.committed_size();
 272     if ((ds > 0) && !_segmap.expand_by(ds)) {
 273       return false;
 274     }
 275     assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "just checking");
 276     // initialize additional space (heap memory and segmap)
 277     clear(i, _number_of_committed_segments);
 278   }
 279   return true;
 280 }
 281 
 282 
 283 void* CodeHeap::allocate(size_t instance_size) {
 284   size_t number_of_segments = size_to_segments(instance_size + header_size());
 285   assert(segments_to_size(number_of_segments) >= sizeof(FreeBlock), "not enough room for FreeList");

 286 
 287   // First check if we can satisfy request from freelist
 288   NOT_PRODUCT(verify());
 289   HeapBlock* block = search_freelist(number_of_segments);
 290   NOT_PRODUCT(verify());
 291 
 292   if (block != NULL) {
 293     assert(!block->free(), "must not be marked free");
 294     guarantee((char*) block >= _memory.low_boundary() && (char*) block < _memory.high(),
 295               "The newly allocated block " INTPTR_FORMAT " is not within the heap "
 296               "starting with "  INTPTR_FORMAT " and ending with "  INTPTR_FORMAT,
 297               p2i(block), p2i(_memory.low_boundary()), p2i(_memory.high()));
 298     _max_allocated_capacity = MAX2(_max_allocated_capacity, allocated_capacity());
 299     _blob_count++;
 300     return block->allocated_space();
 301   }
 302 
 303   // Ensure minimum size for allocation to the heap.
 304   number_of_segments = MAX2((int)CodeCacheMinBlockLength, (int)number_of_segments);
 305 


 330 //          where the split happens. The segment with relative
 331 //          number split_at is the first segment of the split-off block.
 332 HeapBlock* CodeHeap::split_block(HeapBlock *b, size_t split_at) {
 333   if (b == NULL) return NULL;
 334   // After the split, both blocks must have a size of at least CodeCacheMinBlockLength
 335   assert((split_at >= CodeCacheMinBlockLength) && (split_at + CodeCacheMinBlockLength <= b->length()),
 336          "split position(%d) out of range [0..%d]", (int)split_at, (int)b->length());
 337   size_t split_segment = segment_for(b) + split_at;
 338   size_t b_size        = b->length();
 339   size_t newb_size     = b_size - split_at;
 340 
 341   HeapBlock* newb = block_at(split_segment);
 342   newb->set_length(newb_size);
 343   mark_segmap_as_used(segment_for(newb), segment_for(newb) + newb_size, false);
 344   b->set_length(split_at);
 345   return newb;
 346 }
 347 
 348 void CodeHeap::deallocate_tail(void* p, size_t used_size) {
 349   assert(p == find_start(p), "illegal deallocation");


 350   // Find start of HeapBlock
 351   HeapBlock* b = (((HeapBlock *)p) - 1);
 352   assert(b->allocated_space() == p, "sanity check");
 353 
 354   size_t actual_number_of_segments = b->length();
 355   size_t used_number_of_segments   = size_to_segments(used_size + header_size());
 356   size_t unused_number_of_segments = actual_number_of_segments - used_number_of_segments;
 357   guarantee(used_number_of_segments <= actual_number_of_segments, "Must be!");
 358 
 359   HeapBlock* f = split_block(b, used_number_of_segments);
 360   add_to_freelist(f);
 361   NOT_PRODUCT(verify());
 362 }
 363 
 364 void CodeHeap::deallocate(void* p) {
 365   assert(p == find_start(p), "illegal deallocation");


 366   // Find start of HeapBlock
 367   HeapBlock* b = (((HeapBlock *)p) - 1);
 368   assert(b->allocated_space() == p, "sanity check");
 369   guarantee((char*) b >= _memory.low_boundary() && (char*) b < _memory.high(),
 370             "The block to be deallocated " INTPTR_FORMAT " is not within the heap "
 371             "starting with "  INTPTR_FORMAT " and ending with " INTPTR_FORMAT,
 372             p2i(b), p2i(_memory.low_boundary()), p2i(_memory.high()));
 373   add_to_freelist(b);
 374   NOT_PRODUCT(verify());
 375 }
 376 
 377 /**
 378  * The segment map is used to quickly find the the start (header) of a
 379  * code block (e.g. nmethod) when only a pointer to a location inside the
 380  * code block is known. This works as follows:
 381  *  - The storage reserved for the code heap is divided into 'segments'.
 382  *  - The size of a segment is determined by -XX:CodeCacheSegmentSize=<#bytes>.
 383  *  - The size must be a power of two to allow the use of shift operations
 384  *    to quickly convert between segment index and segment address.
 385  *  - Segment start addresses should be aligned to be multiples of CodeCacheSegmentSize.


 773     while (p[ix] > 0) {
 774       ix -= p[ix];
 775       nhops++;
 776     }
 777     return (nhops > hops_expected) ? nhops - hops_expected : 0;
 778   }
 779   return 0;
 780 }
 781 
 782 //----------------------------------------------------------------------------
 783 // Non-product code
 784 
 785 #ifndef PRODUCT
 786 
 787 void CodeHeap::print() {
 788   tty->print_cr("The Heap");
 789 }
 790 
 791 void CodeHeap::verify() {
 792   if (VerifyCodeCache) {

 793     size_t len = 0;
 794     int count = 0;
 795     for(FreeBlock* b = _freelist; b != NULL; b = b->link()) {
 796       len += b->length();
 797       count++;
 798       // Check if we have merged all free blocks
 799       assert(merge_right(b) == false, "Missed merging opportunity");
 800     }
 801     // Verify that freelist contains the right amount of free space
 802     assert(len == _freelist_segments, "wrong freelist");
 803 
 804     for(HeapBlock* h = first_block(); h != NULL; h = next_block(h)) {
 805       if (h->free()) count--;
 806     }
 807     // Verify that the freelist contains the same number of blocks
 808     // than free blocks found on the full list.
 809     assert(count == 0, "missing free blocks");
 810 
 811     //---<  all free block memory must have been invalidated  >---
 812     for(FreeBlock* b = _freelist; b != NULL; b = b->link()) {




 188 
 189 static size_t align_to_page_size(size_t size) {
 190   const size_t alignment = (size_t)os::vm_page_size();
 191   assert(is_power_of_2(alignment), "no kidding ???");
 192   return (size + alignment - 1) & ~(alignment - 1);
 193 }
 194 
 195 
 196 void CodeHeap::on_code_mapping(char* base, size_t size) {
 197 #ifdef LINUX
 198   extern void linux_wrap_code(char* base, size_t size);
 199   linux_wrap_code(base, size);
 200 #endif
 201 }
 202 
 203 
 204 bool CodeHeap::reserve(ReservedSpace rs, size_t committed_size, size_t segment_size) {
 205   assert(rs.size() >= committed_size, "reserved < committed");
 206   assert(segment_size >= sizeof(FreeBlock), "segment size is too small");
 207   assert(is_power_of_2(segment_size), "segment_size must be a power of 2");
 208   assert_locked_or_safepoint(CodeCache_lock);
 209 
 210   _segment_size      = segment_size;
 211   _log2_segment_size = exact_log2(segment_size);
 212 
 213   // Reserve and initialize space for _memory.
 214   size_t page_size = os::vm_page_size();
 215   if (os::can_execute_large_page_memory()) {
 216     const size_t min_pages = 8;
 217     page_size = MIN2(os::page_size_for_region_aligned(committed_size, min_pages),
 218                      os::page_size_for_region_aligned(rs.size(), min_pages));
 219   }
 220 
 221   const size_t granularity = os::vm_allocation_granularity();
 222   const size_t c_size = align_up(committed_size, page_size);
 223 
 224   os::trace_page_sizes(_name, committed_size, rs.size(), page_size,
 225                        rs.base(), rs.size());
 226   if (!_memory.initialize(rs, c_size)) {
 227     return false;
 228   }


 237 
 238   // reserve space for _segmap
 239   if (!_segmap.initialize(reserved_segments_size, committed_segments_size)) {
 240     return false;
 241   }
 242 
 243   MemTracker::record_virtual_memory_type((address)_segmap.low_boundary(), mtCode);
 244 
 245   assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "could not commit  enough space for segment map");
 246   assert(_segmap.reserved_size()  >= (size_t) _number_of_reserved_segments , "could not reserve enough space for segment map");
 247   assert(_segmap.reserved_size()  >= _segmap.committed_size()     , "just checking");
 248 
 249   // initialize remaining instance variables, heap memory and segmap
 250   clear();
 251   init_segmap_template();
 252   return true;
 253 }
 254 
 255 
 256 bool CodeHeap::expand_by(size_t size) {
 257   assert_locked_or_safepoint(CodeCache_lock);
 258 
 259   // expand _memory space
 260   size_t dm = align_to_page_size(_memory.committed_size() + size) - _memory.committed_size();
 261   if (dm > 0) {
 262     // Use at least the available uncommitted space if 'size' is larger
 263     if (_memory.uncommitted_size() != 0 && dm > _memory.uncommitted_size()) {
 264       dm = _memory.uncommitted_size();
 265     }
 266     char* base = _memory.low() + _memory.committed_size();
 267     if (!_memory.expand_by(dm)) return false;
 268     on_code_mapping(base, dm);
 269     size_t i = _number_of_committed_segments;
 270     _number_of_committed_segments = size_to_segments(_memory.committed_size());
 271     assert(_number_of_reserved_segments == size_to_segments(_memory.reserved_size()), "number of reserved segments should not change");
 272     assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
 273     // expand _segmap space
 274     size_t ds = align_to_page_size(_number_of_committed_segments) - _segmap.committed_size();
 275     if ((ds > 0) && !_segmap.expand_by(ds)) {
 276       return false;
 277     }
 278     assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "just checking");
 279     // initialize additional space (heap memory and segmap)
 280     clear(i, _number_of_committed_segments);
 281   }
 282   return true;
 283 }
 284 
 285 
 286 void* CodeHeap::allocate(size_t instance_size) {
 287   size_t number_of_segments = size_to_segments(instance_size + header_size());
 288   assert(segments_to_size(number_of_segments) >= sizeof(FreeBlock), "not enough room for FreeList");
 289   assert_locked_or_safepoint(CodeCache_lock);
 290 
 291   // First check if we can satisfy request from freelist
 292   NOT_PRODUCT(verify());
 293   HeapBlock* block = search_freelist(number_of_segments);
 294   NOT_PRODUCT(verify());
 295 
 296   if (block != NULL) {
 297     assert(!block->free(), "must not be marked free");
 298     guarantee((char*) block >= _memory.low_boundary() && (char*) block < _memory.high(),
 299               "The newly allocated block " INTPTR_FORMAT " is not within the heap "
 300               "starting with "  INTPTR_FORMAT " and ending with "  INTPTR_FORMAT,
 301               p2i(block), p2i(_memory.low_boundary()), p2i(_memory.high()));
 302     _max_allocated_capacity = MAX2(_max_allocated_capacity, allocated_capacity());
 303     _blob_count++;
 304     return block->allocated_space();
 305   }
 306 
 307   // Ensure minimum size for allocation to the heap.
 308   number_of_segments = MAX2((int)CodeCacheMinBlockLength, (int)number_of_segments);
 309 


 334 //          where the split happens. The segment with relative
 335 //          number split_at is the first segment of the split-off block.
 336 HeapBlock* CodeHeap::split_block(HeapBlock *b, size_t split_at) {
 337   if (b == NULL) return NULL;
 338   // After the split, both blocks must have a size of at least CodeCacheMinBlockLength
 339   assert((split_at >= CodeCacheMinBlockLength) && (split_at + CodeCacheMinBlockLength <= b->length()),
 340          "split position(%d) out of range [0..%d]", (int)split_at, (int)b->length());
 341   size_t split_segment = segment_for(b) + split_at;
 342   size_t b_size        = b->length();
 343   size_t newb_size     = b_size - split_at;
 344 
 345   HeapBlock* newb = block_at(split_segment);
 346   newb->set_length(newb_size);
 347   mark_segmap_as_used(segment_for(newb), segment_for(newb) + newb_size, false);
 348   b->set_length(split_at);
 349   return newb;
 350 }
 351 
 352 void CodeHeap::deallocate_tail(void* p, size_t used_size) {
 353   assert(p == find_start(p), "illegal deallocation");
 354   assert_locked_or_safepoint(CodeCache_lock);
 355 
 356   // Find start of HeapBlock
 357   HeapBlock* b = (((HeapBlock *)p) - 1);
 358   assert(b->allocated_space() == p, "sanity check");
 359 
 360   size_t actual_number_of_segments = b->length();
 361   size_t used_number_of_segments   = size_to_segments(used_size + header_size());
 362   size_t unused_number_of_segments = actual_number_of_segments - used_number_of_segments;
 363   guarantee(used_number_of_segments <= actual_number_of_segments, "Must be!");
 364 
 365   HeapBlock* f = split_block(b, used_number_of_segments);
 366   add_to_freelist(f);
 367   NOT_PRODUCT(verify());
 368 }
 369 
 370 void CodeHeap::deallocate(void* p) {
 371   assert(p == find_start(p), "illegal deallocation");
 372   assert_locked_or_safepoint(CodeCache_lock);
 373 
 374   // Find start of HeapBlock
 375   HeapBlock* b = (((HeapBlock *)p) - 1);
 376   assert(b->allocated_space() == p, "sanity check");
 377   guarantee((char*) b >= _memory.low_boundary() && (char*) b < _memory.high(),
 378             "The block to be deallocated " INTPTR_FORMAT " is not within the heap "
 379             "starting with "  INTPTR_FORMAT " and ending with " INTPTR_FORMAT,
 380             p2i(b), p2i(_memory.low_boundary()), p2i(_memory.high()));
 381   add_to_freelist(b);
 382   NOT_PRODUCT(verify());
 383 }
 384 
 385 /**
 386  * The segment map is used to quickly find the the start (header) of a
 387  * code block (e.g. nmethod) when only a pointer to a location inside the
 388  * code block is known. This works as follows:
 389  *  - The storage reserved for the code heap is divided into 'segments'.
 390  *  - The size of a segment is determined by -XX:CodeCacheSegmentSize=<#bytes>.
 391  *  - The size must be a power of two to allow the use of shift operations
 392  *    to quickly convert between segment index and segment address.
 393  *  - Segment start addresses should be aligned to be multiples of CodeCacheSegmentSize.


 781     while (p[ix] > 0) {
 782       ix -= p[ix];
 783       nhops++;
 784     }
 785     return (nhops > hops_expected) ? nhops - hops_expected : 0;
 786   }
 787   return 0;
 788 }
 789 
 790 //----------------------------------------------------------------------------
 791 // Non-product code
 792 
 793 #ifndef PRODUCT
 794 
 795 void CodeHeap::print() {
 796   tty->print_cr("The Heap");
 797 }
 798 
 799 void CodeHeap::verify() {
 800   if (VerifyCodeCache) {
 801     assert_locked_or_safepoint(CodeCache_lock);
 802     size_t len = 0;
 803     int count = 0;
 804     for(FreeBlock* b = _freelist; b != NULL; b = b->link()) {
 805       len += b->length();
 806       count++;
 807       // Check if we have merged all free blocks
 808       assert(merge_right(b) == false, "Missed merging opportunity");
 809     }
 810     // Verify that freelist contains the right amount of free space
 811     assert(len == _freelist_segments, "wrong freelist");
 812 
 813     for(HeapBlock* h = first_block(); h != NULL; h = next_block(h)) {
 814       if (h->free()) count--;
 815     }
 816     // Verify that the freelist contains the same number of blocks
 817     // than free blocks found on the full list.
 818     assert(count == 0, "missing free blocks");
 819 
 820     //---<  all free block memory must have been invalidated  >---
 821     for(FreeBlock* b = _freelist; b != NULL; b = b->link()) {


< prev index next >