< prev index next >

src/os/aix/vm/os_aix.cpp

Print this page




2494   Unimplemented();
2495   return false;
2496 }
2497 
2498 size_t os::large_page_size() {
2499   return _large_page_size;
2500 }
2501 
2502 bool os::can_commit_large_page_memory() {
2503   // Does not matter, we do not support huge pages.
2504   return false;
2505 }
2506 
2507 bool os::can_execute_large_page_memory() {
2508   // Does not matter, we do not support huge pages.
2509   return false;
2510 }
2511 
2512 // Reserve memory at an arbitrary address, only if that area is
2513 // available (and not reserved for something else).
2514 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2515   char* addr = NULL;
2516 
2517   // Always round to os::vm_page_size(), which may be larger than 4K.
2518   bytes = align_size_up(bytes, os::vm_page_size());
2519 
2520   // In 4K mode always use mmap.
2521   // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2522   if (os::vm_page_size() == 4*K) {
2523     return reserve_mmaped_memory(bytes, requested_addr, 0);
2524   } else {
2525     if (bytes >= Use64KPagesThreshold) {
2526       return reserve_shmated_memory(bytes, requested_addr, 0);
2527     } else {
2528       return reserve_mmaped_memory(bytes, requested_addr, 0);
2529     }
2530   }
2531 
2532   return addr;
2533 }
2534 
2535 size_t os::read(int fd, void *buf, unsigned int nBytes) {
2536   return ::read(fd, buf, nBytes);
2537 }
2538 
2539 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
2540   return ::pread(fd, buf, nBytes, offset);
2541 }
2542 
2543 void os::naked_short_sleep(jlong ms) {
2544   struct timespec req;
2545 




2494   Unimplemented();
2495   return false;
2496 }
2497 
2498 size_t os::large_page_size() {
2499   return _large_page_size;
2500 }
2501 
2502 bool os::can_commit_large_page_memory() {
2503   // Does not matter, we do not support huge pages.
2504   return false;
2505 }
2506 
2507 bool os::can_execute_large_page_memory() {
2508   // Does not matter, we do not support huge pages.
2509   return false;
2510 }
2511 
2512 // Reserve memory at an arbitrary address, only if that area is
2513 // available (and not reserved for something else).
2514 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, bool use_SHM) {
2515   char* addr = NULL;
2516 
2517   // Always round to os::vm_page_size(), which may be larger than 4K.
2518   bytes = align_size_up(bytes, os::vm_page_size());
2519 
2520   // In 4K mode always use mmap.
2521   // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2522   if (os::vm_page_size() == 4*K) {
2523     return reserve_mmaped_memory(bytes, requested_addr, 0);
2524   } else {
2525     if (use_SHM && bytes >= Use64KPagesThreshold) {
2526       return reserve_shmated_memory(bytes, requested_addr, 0);
2527     } else {
2528       return reserve_mmaped_memory(bytes, requested_addr, 0);
2529     }
2530   }
2531 
2532   return addr;
2533 }
2534 
2535 size_t os::read(int fd, void *buf, unsigned int nBytes) {
2536   return ::read(fd, buf, nBytes);
2537 }
2538 
2539 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
2540   return ::pread(fd, buf, nBytes, offset);
2541 }
2542 
2543 void os::naked_short_sleep(jlong ms) {
2544   struct timespec req;
2545 


< prev index next >