< prev index next >

src/hotspot/os/bsd/os_bsd.cpp

Print this page




2334   } else {
2335     return shmdt(base) == 0;
2336   }
2337 }
2338 
2339 size_t os::large_page_size() {
2340   return _large_page_size;
2341 }
2342 
2343 // HugeTLBFS allows application to commit large page memory on demand;
2344 // with SysV SHM the entire memory region must be allocated as shared
2345 // memory.
2346 bool os::can_commit_large_page_memory() {
2347   return UseHugeTLBFS;
2348 }
2349 
2350 bool os::can_execute_large_page_memory() {
2351   return UseHugeTLBFS;
2352 }
2353 











2354 // Reserve memory at an arbitrary address, only if that area is
2355 // available (and not reserved for something else).
2356 
2357 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2358   const int max_tries = 10;
2359   char* base[max_tries];
2360   size_t size[max_tries];
2361   const size_t gap = 0x000000;
2362 
2363   // Assert only that the size is a multiple of the page size, since
2364   // that's all that mmap requires, and since that's all we really know
2365   // about at this low abstraction level.  If we need higher alignment,
2366   // we can either pass an alignment to this method or verify alignment
2367   // in one of the methods further up the call chain.  See bug 5044738.
2368   assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
2369 
2370   // Repeatedly allocate blocks until the block is allocated at the
2371   // right spot.
2372 
2373   // Bsd mmap allows caller to pass an address as hint; give it a try first,




2334   } else {
2335     return shmdt(base) == 0;
2336   }
2337 }
2338 
2339 size_t os::large_page_size() {
2340   return _large_page_size;
2341 }
2342 
2343 // HugeTLBFS allows application to commit large page memory on demand;
2344 // with SysV SHM the entire memory region must be allocated as shared
2345 // memory.
2346 bool os::can_commit_large_page_memory() {
2347   return UseHugeTLBFS;
2348 }
2349 
2350 bool os::can_execute_large_page_memory() {
2351   return UseHugeTLBFS;
2352 }
2353 
2354 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int file_desc) {
2355   assert(file_desc >= 0, "file_desc is not valid");
2356   char* result = pd_attempt_reserve_memory_at(bytes, requested_addr);
2357   if (result != NULL) {
2358     if (replace_existing_mapping_with_dax_file_mapping(result, bytes, file_desc) == NULL) {
2359       vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
2360     }
2361   }
2362   return result;
2363 }
2364 
2365 // Reserve memory at an arbitrary address, only if that area is
2366 // available (and not reserved for something else).
2367 
2368 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2369   const int max_tries = 10;
2370   char* base[max_tries];
2371   size_t size[max_tries];
2372   const size_t gap = 0x000000;
2373 
2374   // Assert only that the size is a multiple of the page size, since
2375   // that's all that mmap requires, and since that's all we really know
2376   // about at this low abstraction level.  If we need higher alignment,
2377   // we can either pass an alignment to this method or verify alignment
2378   // in one of the methods further up the call chain.  See bug 5044738.
2379   assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
2380 
2381   // Repeatedly allocate blocks until the block is allocated at the
2382   // right spot.
2383 
2384   // Bsd mmap allows caller to pass an address as hint; give it a try first,


< prev index next >