< prev index next >

src/hotspot/os/bsd/os_bsd.cpp

Print this page




2333   } else {
2334     return shmdt(base) == 0;
2335   }
2336 }
2337 
2338 size_t os::large_page_size() {
2339   return _large_page_size;
2340 }
2341 
2342 // HugeTLBFS allows application to commit large page memory on demand;
2343 // with SysV SHM the entire memory region must be allocated as shared
2344 // memory.
2345 bool os::can_commit_large_page_memory() {
2346   return UseHugeTLBFS;
2347 }
2348 
2349 bool os::can_execute_large_page_memory() {
2350   return UseHugeTLBFS;
2351 }
2352 











2353 // Reserve memory at an arbitrary address, only if that area is
2354 // available (and not reserved for something else).
2355 
2356 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2357   const int max_tries = 10;
2358   char* base[max_tries];
2359   size_t size[max_tries];
2360   const size_t gap = 0x000000;
2361 
2362   // Assert only that the size is a multiple of the page size, since
2363   // that's all that mmap requires, and since that's all we really know
2364   // about at this low abstraction level.  If we need higher alignment,
2365   // we can either pass an alignment to this method or verify alignment
2366   // in one of the methods further up the call chain.  See bug 5044738.
2367   assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
2368 
2369   // Repeatedly allocate blocks until the block is allocated at the
2370   // right spot.
2371 
2372   // Bsd mmap allows caller to pass an address as hint; give it a try first,




2333   } else {
2334     return shmdt(base) == 0;
2335   }
2336 }
2337 
2338 size_t os::large_page_size() {
2339   return _large_page_size;
2340 }
2341 
2342 // HugeTLBFS allows application to commit large page memory on demand;
2343 // with SysV SHM the entire memory region must be allocated as shared
2344 // memory.
2345 bool os::can_commit_large_page_memory() {
2346   return UseHugeTLBFS;
2347 }
2348 
2349 bool os::can_execute_large_page_memory() {
2350   return UseHugeTLBFS;
2351 }
2352 
2353 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int file_desc) {
2354   assert(file_desc >= 0, "file_desc is not valid");
2355   char* result = pd_attempt_reserve_memory_at(bytes, requested_addr);
2356   if (result != NULL) {
2357     if (replace_existing_mapping_with_file_mapping(result, bytes, file_desc) == NULL) {
2358       vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
2359     }
2360   }
2361   return result;
2362 }
2363 
2364 // Reserve memory at an arbitrary address, only if that area is
2365 // available (and not reserved for something else).
2366 
2367 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2368   const int max_tries = 10;
2369   char* base[max_tries];
2370   size_t size[max_tries];
2371   const size_t gap = 0x000000;
2372 
2373   // Assert only that the size is a multiple of the page size, since
2374   // that's all that mmap requires, and since that's all we really know
2375   // about at this low abstraction level.  If we need higher alignment,
2376   // we can either pass an alignment to this method or verify alignment
2377   // in one of the methods further up the call chain.  See bug 5044738.
2378   assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
2379 
2380   // Repeatedly allocate blocks until the block is allocated at the
2381   // right spot.
2382 
2383   // Bsd mmap allows caller to pass an address as hint; give it a try first,


< prev index next >