< prev index next >

src/hotspot/os/posix/os_posix.cpp

Print this page




 279 // rather than unmapping and remapping the whole chunk to get requested alignment.
 280 char* os::reserve_memory_aligned(size_t size, size_t alignment, int file_desc) {
 281   assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
 282       "Alignment must be a multiple of allocation granularity (page size)");
 283   assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
 284 
 285   size_t extra_size = size + alignment;
 286   assert(extra_size >= size, "overflow, size is too large to allow alignment");
 287 
 288   char* extra_base;
 289   if (file_desc != -1) {
 290     // For file mapping, we do not call os:reserve_memory(extra_size, NULL, alignment, file_desc) because
 291     // we need to deal with shrinking of the file space later when we release extra memory after alignment.
 292     // We also cannot called os:reserve_memory() with file_desc set to -1 because on aix we might get SHM memory.
 293     // So here to call a helper function while reserve memory for us. After we have a aligned base,
 294     // we will replace anonymous mapping with file mapping.
 295     extra_base = reserve_mmapped_memory(extra_size, NULL);
 296     if (extra_base != NULL) {
 297       MemTracker::record_virtual_memory_reserve((address)extra_base, extra_size, CALLER_PC);
 298     }
 299   }
 300   else {
 301     extra_base = os::reserve_memory(extra_size, NULL, alignment);
 302   }
 303 
 304   if (extra_base == NULL) {
 305     return NULL;
 306   }
 307 
 308   // Do manual alignment
 309   char* aligned_base = align_up(extra_base, alignment);
 310 
 311   // [  |                                       |  ]
 312   // ^ extra_base
 313   //    ^ extra_base + begin_offset == aligned_base
 314   //     extra_base + begin_offset + size       ^
 315   //                       extra_base + extra_size ^
 316   // |<>| == begin_offset
 317   //                              end_offset == |<>|
 318   size_t begin_offset = aligned_base - extra_base;
 319   size_t end_offset = (extra_base + extra_size) - (aligned_base + size);
 320 




 279 // rather than unmapping and remapping the whole chunk to get requested alignment.
 280 char* os::reserve_memory_aligned(size_t size, size_t alignment, int file_desc) {
 281   assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
 282       "Alignment must be a multiple of allocation granularity (page size)");
 283   assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
 284 
 285   size_t extra_size = size + alignment;
 286   assert(extra_size >= size, "overflow, size is too large to allow alignment");
 287 
 288   char* extra_base;
 289   if (file_desc != -1) {
 290     // For file mapping, we do not call os:reserve_memory(extra_size, NULL, alignment, file_desc) because
 291     // we need to deal with shrinking of the file space later when we release extra memory after alignment.
 292     // We also cannot called os:reserve_memory() with file_desc set to -1 because on aix we might get SHM memory.
 293     // So here to call a helper function while reserve memory for us. After we have a aligned base,
 294     // we will replace anonymous mapping with file mapping.
 295     extra_base = reserve_mmapped_memory(extra_size, NULL);
 296     if (extra_base != NULL) {
 297       MemTracker::record_virtual_memory_reserve((address)extra_base, extra_size, CALLER_PC);
 298     }
 299   } else {

 300     extra_base = os::reserve_memory(extra_size, NULL, alignment);
 301   }
 302 
 303   if (extra_base == NULL) {
 304     return NULL;
 305   }
 306 
 307   // Do manual alignment
 308   char* aligned_base = align_up(extra_base, alignment);
 309 
 310   // [  |                                       |  ]
 311   // ^ extra_base
 312   //    ^ extra_base + begin_offset == aligned_base
 313   //     extra_base + begin_offset + size       ^
 314   //                       extra_base + extra_size ^
 315   // |<>| == begin_offset
 316   //                              end_offset == |<>|
 317   size_t begin_offset = aligned_base - extra_base;
 318   size_t end_offset = (extra_base + extra_size) - (aligned_base + size);
 319 


< prev index next >