< prev index next >

src/hotspot/os/aix/os_aix.cpp

Print this page




2474 bool os::release_memory_special(char* base, size_t bytes) {
2475   // Detaching the SHM segment will also delete it, see reserve_memory_special().
2476   Unimplemented();
2477   return false;
2478 }
2479 
2480 size_t os::large_page_size() {
2481   return _large_page_size;
2482 }
2483 
2484 bool os::can_commit_large_page_memory() {
2485   // Does not matter, we do not support huge pages.
2486   return false;
2487 }
2488 
2489 bool os::can_execute_large_page_memory() {
2490   // Does not matter, we do not support huge pages.
2491   return false;
2492 }
2493 
















2494 // Reserve memory at an arbitrary address, only if that area is
2495 // available (and not reserved for something else).
2496 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2497   char* addr = NULL;
2498 
2499   // Always round to os::vm_page_size(), which may be larger than 4K.
2500   bytes = align_up(bytes, os::vm_page_size());
2501 
2502   // In 4K mode always use mmap.
2503   // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2504   if (os::vm_page_size() == 4*K) {
2505     return reserve_mmaped_memory(bytes, requested_addr, 0);
2506   } else {
2507     if (bytes >= Use64KPagesThreshold) {
2508       return reserve_shmated_memory(bytes, requested_addr, 0);
2509     } else {
2510       return reserve_mmaped_memory(bytes, requested_addr, 0);
2511     }
2512   }
2513 




2474 bool os::release_memory_special(char* base, size_t bytes) {
2475   // Detaching the SHM segment will also delete it, see reserve_memory_special().
2476   Unimplemented();
2477   return false;
2478 }
2479 
2480 size_t os::large_page_size() {
2481   return _large_page_size;
2482 }
2483 
2484 bool os::can_commit_large_page_memory() {
2485   // Does not matter, we do not support huge pages.
2486   return false;
2487 }
2488 
2489 bool os::can_execute_large_page_memory() {
2490   // Does not matter, we do not support huge pages.
2491   return false;
2492 }
2493 
2494 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int file_desc) {
2495   assert(file_desc >= 0, "file_desc is not valid");
2496   char* result = NULL;
2497 
2498   // Always round to os::vm_page_size(), which may be larger than 4K.
2499   bytes = align_up(bytes, os::vm_page_size());
2500   result = reserve_mmaped_memory(bytes, requested_addr, 0);
2501   
2502   if (result != NULL) {
2503     if (replace_existing_mapping_with_file_mapping(result, bytes, file_desc) == NULL) {
2504       vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
2505     }
2506   }
2507   return result;
2508 }
2509 
2510 // Reserve memory at an arbitrary address, only if that area is
2511 // available (and not reserved for something else).
2512 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2513   char* addr = NULL;
2514 
2515   // Always round to os::vm_page_size(), which may be larger than 4K.
2516   bytes = align_up(bytes, os::vm_page_size());
2517 
2518   // In 4K mode always use mmap.
2519   // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2520   if (os::vm_page_size() == 4*K) {
2521     return reserve_mmaped_memory(bytes, requested_addr, 0);
2522   } else {
2523     if (bytes >= Use64KPagesThreshold) {
2524       return reserve_shmated_memory(bytes, requested_addr, 0);
2525     } else {
2526       return reserve_mmaped_memory(bytes, requested_addr, 0);
2527     }
2528   }
2529 


< prev index next >