< prev index next >

src/os/aix/vm/os_aix.cpp

Print this page




1919 
1920   // Either give me wish address or wish alignment but not both.
1921   assert0(!(requested_addr != NULL && alignment_hint != 0));
1922 
1923   // We must prevent anyone from attaching too close to the
1924   // BRK because that may cause malloc OOM.
1925   if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
1926     trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
1927       "Will attach anywhere.", requested_addr);
1928     // Act like the OS refused to attach there.
1929     requested_addr = NULL;
1930   }
1931 
1932   // For old AS/400's (V5R4 and older) we should not even be here - System V shared memory is not
1933   // really supported (max size 4GB), so reserve_mmapped_memory should have been used instead.
1934   if (os::Aix::on_pase_V5R4_or_older()) {
1935     ShouldNotReachHere();
1936   }
1937 
1938   // Align size of shm up to 64K to avoid errors if we later try to change the page size.
1939   const size_t size = align_size_up(bytes, 64*K);
1940 
1941   // Reserve the shared segment.
1942   int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);
1943   if (shmid == -1) {
1944     trcVerbose("shmget(.., " UINTX_FORMAT ", ..) failed (errno: %d).", size, errno);
1945     return NULL;
1946   }
1947 
1948   // Important note:
1949   // It is very important that we, upon leaving this function, do not leave a shm segment alive.
1950   // We must right after attaching it remove it from the system. System V shm segments are global and
1951   // survive the process.
1952   // So, from here on: Do not assert, do not return, until we have called shmctl(IPC_RMID) (A).
1953 
1954   struct shmid_ds shmbuf;
1955   memset(&shmbuf, 0, sizeof(shmbuf));
1956   shmbuf.shm_pagesize = 64*K;
1957   if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) != 0) {
1958     trcVerbose("Failed to set page size (need " UINTX_FORMAT " 64K pages) - shmctl failed with %d.",
1959                size / (64*K), errno);


2060     trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
2061       "Will attach anywhere.", requested_addr);
2062     // Act like the OS refused to attach there.
2063     requested_addr = NULL;
2064   }
2065 
2066   // Specify one or the other but not both.
2067   assert0(!(requested_addr != NULL && alignment_hint > 0));
2068 
2069   // In 64K mode, we claim the global page size (os::vm_page_size())
2070   // is 64K. This is one of the few points where that illusion may
2071   // break, because mmap() will always return memory aligned to 4K. So
2072   // we must ensure we only ever return memory aligned to 64k.
2073   if (alignment_hint) {
2074     alignment_hint = lcm(alignment_hint, os::vm_page_size());
2075   } else {
2076     alignment_hint = os::vm_page_size();
2077   }
2078 
2079   // Size shall always be a multiple of os::vm_page_size (esp. in 64K mode).
2080   const size_t size = align_size_up(bytes, os::vm_page_size());
2081 
2082   // alignment: Allocate memory large enough to include an aligned range of the right size and
2083   // cut off the leading and trailing waste pages.
2084   assert0(alignment_hint != 0 && is_aligned_to(alignment_hint, os::vm_page_size())); // see above
2085   const size_t extra_size = size + alignment_hint;
2086 
2087   // Note: MAP_SHARED (instead of MAP_PRIVATE) needed to be able to
2088   // later use msync(MS_INVALIDATE) (see os::uncommit_memory).
2089   int flags = MAP_ANONYMOUS | MAP_SHARED;
2090 
2091   // MAP_FIXED is needed to enforce requested_addr - manpage is vague about what
2092   // it means if wishaddress is given but MAP_FIXED is not set.
2093   //
2094   // Important! Behaviour differs depending on whether SPEC1170 mode is active or not.
2095   // SPEC1170 mode active: behaviour like POSIX, MAP_FIXED will clobber existing mappings.
2096   // SPEC1170 mode not active: behaviour, unlike POSIX, is that no existing mappings will
2097   // get clobbered.
2098   if (requested_addr != NULL) {
2099     if (!os::Aix::xpg_sus_mode()) {  // not SPEC1170 Behaviour
2100       flags |= MAP_FIXED;
2101     }
2102   }
2103 
2104   char* addr = (char*)::mmap(requested_addr, extra_size,
2105       PROT_READ|PROT_WRITE|PROT_EXEC, flags, -1, 0);
2106 
2107   if (addr == MAP_FAILED) {
2108     trcVerbose("mmap(" PTR_FORMAT ", " UINTX_FORMAT ", ..) failed (%d)", requested_addr, size, errno);
2109     return NULL;
2110   }
2111 
2112   // Handle alignment.
2113   char* const addr_aligned = align_ptr_up(addr, alignment_hint);
2114   const size_t waste_pre = addr_aligned - addr;
2115   char* const addr_aligned_end = addr_aligned + size;
2116   const size_t waste_post = extra_size - waste_pre - size;
2117   if (waste_pre > 0) {
2118     ::munmap(addr, waste_pre);
2119   }
2120   if (waste_post > 0) {
2121     ::munmap(addr_aligned_end, waste_post);
2122   }
2123   addr = addr_aligned;
2124 
2125   if (addr) {
2126     trcVerbose("mmap-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes)",
2127       addr, addr + bytes, bytes);
2128   } else {
2129     if (requested_addr != NULL) {
2130       trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at wish address " PTR_FORMAT ".", bytes, requested_addr);
2131     } else {
2132       trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at any address.", bytes);
2133     }


2319 
2320 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2321   return end;
2322 }
2323 
2324 // Reserves and attaches a shared memory segment.
2325 // Will assert if a wish address is given and could not be obtained.
2326 char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2327 
2328   // All other Unices do a mmap(MAP_FIXED) if the addr is given,
2329   // thereby clobbering old mappings at that place. That is probably
2330   // not intended, never used and almost certainly an error were it
2331   // ever be used this way (to try attaching at a specified address
2332   // without clobbering old mappings an alternate API exists,
2333   // os::attempt_reserve_memory_at()).
2334   // Instead of mimicking the dangerous coding of the other platforms, here I
2335   // just ignore the request address (release) or assert(debug).
2336   assert0(requested_addr == NULL);
2337 
2338   // Always round to os::vm_page_size(), which may be larger than 4K.
2339   bytes = align_size_up(bytes, os::vm_page_size());
2340   const size_t alignment_hint0 =
2341     alignment_hint ? align_size_up(alignment_hint, os::vm_page_size()) : 0;
2342 
2343   // In 4K mode always use mmap.
2344   // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2345   if (os::vm_page_size() == 4*K) {
2346     return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2347   } else {
2348     if (bytes >= Use64KPagesThreshold) {
2349       return reserve_shmated_memory(bytes, requested_addr, alignment_hint);
2350     } else {
2351       return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2352     }
2353   }
2354 }
2355 
2356 bool os::pd_release_memory(char* addr, size_t size) {
2357 
2358   // Dynamically do different things for mmap/shmat.
2359   vmembk_t* const vmi = vmembk_find(addr);
2360   guarantee0(vmi);
2361 
2362   // Always round to os::vm_page_size(), which may be larger than 4K.
2363   size = align_size_up(size, os::vm_page_size());
2364   addr = align_ptr_up(addr, os::vm_page_size());
2365 
2366   bool rc = false;
2367   bool remove_bookkeeping = false;
2368   if (vmi->type == VMEM_SHMATED) {
2369     // For shmatted memory, we do:
2370     // - If user wants to release the whole range, release the memory (shmdt).
2371     // - If user only wants to release a partial range, uncommit (disclaim) that
2372     //   range. That way, at least, we do not use memory anymore (bust still page
2373     //   table space).
2374     vmi->assert_is_valid_subrange(addr, size);
2375     if (addr == vmi->addr && size == vmi->size) {
2376       rc = release_shmated_memory(addr, size);
2377       remove_bookkeeping = true;
2378     } else {
2379       rc = uncommit_shmated_memory(addr, size);
2380     }
2381   } else {
2382     // User may unmap partial regions but region has to be fully contained.
2383 #ifdef ASSERT
2384     vmi->assert_is_valid_subrange(addr, size);


2510 size_t os::large_page_size() {
2511   return _large_page_size;
2512 }
2513 
2514 bool os::can_commit_large_page_memory() {
2515   // Does not matter, we do not support huge pages.
2516   return false;
2517 }
2518 
2519 bool os::can_execute_large_page_memory() {
2520   // Does not matter, we do not support huge pages.
2521   return false;
2522 }
2523 
2524 // Reserve memory at an arbitrary address, only if that area is
2525 // available (and not reserved for something else).
2526 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2527   char* addr = NULL;
2528 
2529   // Always round to os::vm_page_size(), which may be larger than 4K.
2530   bytes = align_size_up(bytes, os::vm_page_size());
2531 
2532   // In 4K mode always use mmap.
2533   // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2534   if (os::vm_page_size() == 4*K) {
2535     return reserve_mmaped_memory(bytes, requested_addr, 0);
2536   } else {
2537     if (bytes >= Use64KPagesThreshold) {
2538       return reserve_shmated_memory(bytes, requested_addr, 0);
2539     } else {
2540       return reserve_mmaped_memory(bytes, requested_addr, 0);
2541     }
2542   }
2543 
2544   return addr;
2545 }
2546 
2547 size_t os::read(int fd, void *buf, unsigned int nBytes) {
2548   return ::read(fd, buf, nBytes);
2549 }
2550 


4295 // Get the current stack base from the OS (actually, the pthread library).
4296 // Note: usually not page aligned.
4297 address os::current_stack_base() {
4298   AixMisc::stackbounds_t bounds;
4299   bool rc = AixMisc::query_stack_bounds_for_current_thread(&bounds);
4300   guarantee(rc, "Unable to retrieve stack bounds.");
4301   return bounds.base;
4302 }
4303 
4304 // Get the current stack size from the OS (actually, the pthread library).
4305 // Returned size is such that (base - size) is always aligned to page size.
4306 size_t os::current_stack_size() {
4307   AixMisc::stackbounds_t bounds;
4308   bool rc = AixMisc::query_stack_bounds_for_current_thread(&bounds);
4309   guarantee(rc, "Unable to retrieve stack bounds.");
4310   // Align the returned stack size such that the stack low address
4311   // is aligned to page size (Note: base is usually not and we do not care).
4312   // We need to do this because caller code will assume stack low address is
4313   // page aligned and will place guard pages without checking.
4314   address low = bounds.base - bounds.size;
4315   address low_aligned = (address)align_ptr_up(low, os::vm_page_size());
4316   size_t s = bounds.base - low_aligned;
4317   return s;
4318 }
4319 
4320 extern char** environ;
4321 
4322 // Run the specified command in a separate process. Return its exit value,
4323 // or -1 on failure (e.g. can't fork a new process).
4324 // Unlike system(), this function can be called from signal handler. It
4325 // doesn't block SIGINT et al.
4326 int os::fork_and_exec(char* cmd) {
4327   char * argv[4] = {"sh", "-c", cmd, NULL};
4328 
4329   pid_t pid = fork();
4330 
4331   if (pid < 0) {
4332     // fork failed
4333     return -1;
4334 
4335   } else if (pid == 0) {




1919 
1920   // Either give me wish address or wish alignment but not both.
1921   assert0(!(requested_addr != NULL && alignment_hint != 0));
1922 
1923   // We must prevent anyone from attaching too close to the
1924   // BRK because that may cause malloc OOM.
1925   if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
1926     trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
1927       "Will attach anywhere.", requested_addr);
1928     // Act like the OS refused to attach there.
1929     requested_addr = NULL;
1930   }
1931 
1932   // For old AS/400's (V5R4 and older) we should not even be here - System V shared memory is not
1933   // really supported (max size 4GB), so reserve_mmapped_memory should have been used instead.
1934   if (os::Aix::on_pase_V5R4_or_older()) {
1935     ShouldNotReachHere();
1936   }
1937 
1938   // Align size of shm up to 64K to avoid errors if we later try to change the page size.
1939   const size_t size = align_up(bytes, 64*K);
1940 
1941   // Reserve the shared segment.
1942   int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);
1943   if (shmid == -1) {
1944     trcVerbose("shmget(.., " UINTX_FORMAT ", ..) failed (errno: %d).", size, errno);
1945     return NULL;
1946   }
1947 
1948   // Important note:
1949   // It is very important that we, upon leaving this function, do not leave a shm segment alive.
1950   // We must right after attaching it remove it from the system. System V shm segments are global and
1951   // survive the process.
1952   // So, from here on: Do not assert, do not return, until we have called shmctl(IPC_RMID) (A).
1953 
1954   struct shmid_ds shmbuf;
1955   memset(&shmbuf, 0, sizeof(shmbuf));
1956   shmbuf.shm_pagesize = 64*K;
1957   if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) != 0) {
1958     trcVerbose("Failed to set page size (need " UINTX_FORMAT " 64K pages) - shmctl failed with %d.",
1959                size / (64*K), errno);


2060     trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
2061       "Will attach anywhere.", requested_addr);
2062     // Act like the OS refused to attach there.
2063     requested_addr = NULL;
2064   }
2065 
2066   // Specify one or the other but not both.
2067   assert0(!(requested_addr != NULL && alignment_hint > 0));
2068 
2069   // In 64K mode, we claim the global page size (os::vm_page_size())
2070   // is 64K. This is one of the few points where that illusion may
2071   // break, because mmap() will always return memory aligned to 4K. So
2072   // we must ensure we only ever return memory aligned to 64k.
2073   if (alignment_hint) {
2074     alignment_hint = lcm(alignment_hint, os::vm_page_size());
2075   } else {
2076     alignment_hint = os::vm_page_size();
2077   }
2078 
2079   // Size shall always be a multiple of os::vm_page_size (esp. in 64K mode).
2080   const size_t size = align_up(bytes, os::vm_page_size());
2081 
2082   // alignment: Allocate memory large enough to include an aligned range of the right size and
2083   // cut off the leading and trailing waste pages.
2084   assert0(alignment_hint != 0 && is_aligned_to(alignment_hint, os::vm_page_size())); // see above
2085   const size_t extra_size = size + alignment_hint;
2086 
2087   // Note: MAP_SHARED (instead of MAP_PRIVATE) needed to be able to
2088   // later use msync(MS_INVALIDATE) (see os::uncommit_memory).
2089   int flags = MAP_ANONYMOUS | MAP_SHARED;
2090 
2091   // MAP_FIXED is needed to enforce requested_addr - manpage is vague about what
2092   // it means if wishaddress is given but MAP_FIXED is not set.
2093   //
2094   // Important! Behaviour differs depending on whether SPEC1170 mode is active or not.
2095   // SPEC1170 mode active: behaviour like POSIX, MAP_FIXED will clobber existing mappings.
2096   // SPEC1170 mode not active: behaviour, unlike POSIX, is that no existing mappings will
2097   // get clobbered.
2098   if (requested_addr != NULL) {
2099     if (!os::Aix::xpg_sus_mode()) {  // not SPEC1170 Behaviour
2100       flags |= MAP_FIXED;
2101     }
2102   }
2103 
2104   char* addr = (char*)::mmap(requested_addr, extra_size,
2105       PROT_READ|PROT_WRITE|PROT_EXEC, flags, -1, 0);
2106 
2107   if (addr == MAP_FAILED) {
2108     trcVerbose("mmap(" PTR_FORMAT ", " UINTX_FORMAT ", ..) failed (%d)", requested_addr, size, errno);
2109     return NULL;
2110   }
2111 
2112   // Handle alignment.
2113   char* const addr_aligned = align_up(addr, alignment_hint);
2114   const size_t waste_pre = addr_aligned - addr;
2115   char* const addr_aligned_end = addr_aligned + size;
2116   const size_t waste_post = extra_size - waste_pre - size;
2117   if (waste_pre > 0) {
2118     ::munmap(addr, waste_pre);
2119   }
2120   if (waste_post > 0) {
2121     ::munmap(addr_aligned_end, waste_post);
2122   }
2123   addr = addr_aligned;
2124 
2125   if (addr) {
2126     trcVerbose("mmap-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes)",
2127       addr, addr + bytes, bytes);
2128   } else {
2129     if (requested_addr != NULL) {
2130       trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at wish address " PTR_FORMAT ".", bytes, requested_addr);
2131     } else {
2132       trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at any address.", bytes);
2133     }


2319 
2320 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2321   return end;
2322 }
2323 
2324 // Reserves and attaches a shared memory segment.
2325 // Will assert if a wish address is given and could not be obtained.
2326 char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2327 
2328   // All other Unices do a mmap(MAP_FIXED) if the addr is given,
2329   // thereby clobbering old mappings at that place. That is probably
2330   // not intended, never used and almost certainly an error were it
2331   // ever be used this way (to try attaching at a specified address
2332   // without clobbering old mappings an alternate API exists,
2333   // os::attempt_reserve_memory_at()).
2334   // Instead of mimicking the dangerous coding of the other platforms, here I
2335   // just ignore the request address (release) or assert(debug).
2336   assert0(requested_addr == NULL);
2337 
2338   // Always round to os::vm_page_size(), which may be larger than 4K.
2339   bytes = align_up(bytes, os::vm_page_size());
2340   const size_t alignment_hint0 =
2341     alignment_hint ? align_up(alignment_hint, os::vm_page_size()) : 0;
2342 
2343   // In 4K mode always use mmap.
2344   // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2345   if (os::vm_page_size() == 4*K) {
2346     return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2347   } else {
2348     if (bytes >= Use64KPagesThreshold) {
2349       return reserve_shmated_memory(bytes, requested_addr, alignment_hint);
2350     } else {
2351       return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2352     }
2353   }
2354 }
2355 
2356 bool os::pd_release_memory(char* addr, size_t size) {
2357 
2358   // Dynamically do different things for mmap/shmat.
2359   vmembk_t* const vmi = vmembk_find(addr);
2360   guarantee0(vmi);
2361 
2362   // Always round to os::vm_page_size(), which may be larger than 4K.
2363   size = align_up(size, os::vm_page_size());
2364   addr = align_up(addr, os::vm_page_size());
2365 
2366   bool rc = false;
2367   bool remove_bookkeeping = false;
2368   if (vmi->type == VMEM_SHMATED) {
2369     // For shmatted memory, we do:
2370     // - If user wants to release the whole range, release the memory (shmdt).
2371     // - If user only wants to release a partial range, uncommit (disclaim) that
2372     //   range. That way, at least, we do not use memory anymore (bust still page
2373     //   table space).
2374     vmi->assert_is_valid_subrange(addr, size);
2375     if (addr == vmi->addr && size == vmi->size) {
2376       rc = release_shmated_memory(addr, size);
2377       remove_bookkeeping = true;
2378     } else {
2379       rc = uncommit_shmated_memory(addr, size);
2380     }
2381   } else {
2382     // User may unmap partial regions but region has to be fully contained.
2383 #ifdef ASSERT
2384     vmi->assert_is_valid_subrange(addr, size);


2510 size_t os::large_page_size() {
2511   return _large_page_size;
2512 }
2513 
2514 bool os::can_commit_large_page_memory() {
2515   // Does not matter, we do not support huge pages.
2516   return false;
2517 }
2518 
2519 bool os::can_execute_large_page_memory() {
2520   // Does not matter, we do not support huge pages.
2521   return false;
2522 }
2523 
2524 // Reserve memory at an arbitrary address, only if that area is
2525 // available (and not reserved for something else).
2526 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2527   char* addr = NULL;
2528 
2529   // Always round to os::vm_page_size(), which may be larger than 4K.
2530   bytes = align_up(bytes, os::vm_page_size());
2531 
2532   // In 4K mode always use mmap.
2533   // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2534   if (os::vm_page_size() == 4*K) {
2535     return reserve_mmaped_memory(bytes, requested_addr, 0);
2536   } else {
2537     if (bytes >= Use64KPagesThreshold) {
2538       return reserve_shmated_memory(bytes, requested_addr, 0);
2539     } else {
2540       return reserve_mmaped_memory(bytes, requested_addr, 0);
2541     }
2542   }
2543 
2544   return addr;
2545 }
2546 
2547 size_t os::read(int fd, void *buf, unsigned int nBytes) {
2548   return ::read(fd, buf, nBytes);
2549 }
2550 


4295 // Get the current stack base from the OS (actually, the pthread library).
4296 // Note: usually not page aligned.
4297 address os::current_stack_base() {
4298   AixMisc::stackbounds_t bounds;
4299   bool rc = AixMisc::query_stack_bounds_for_current_thread(&bounds);
4300   guarantee(rc, "Unable to retrieve stack bounds.");
4301   return bounds.base;
4302 }
4303 
4304 // Get the current stack size from the OS (actually, the pthread library).
4305 // Returned size is such that (base - size) is always aligned to page size.
4306 size_t os::current_stack_size() {
4307   AixMisc::stackbounds_t bounds;
4308   bool rc = AixMisc::query_stack_bounds_for_current_thread(&bounds);
4309   guarantee(rc, "Unable to retrieve stack bounds.");
4310   // Align the returned stack size such that the stack low address
4311   // is aligned to page size (Note: base is usually not and we do not care).
4312   // We need to do this because caller code will assume stack low address is
4313   // page aligned and will place guard pages without checking.
4314   address low = bounds.base - bounds.size;
4315   address low_aligned = (address)align_up(low, os::vm_page_size());
4316   size_t s = bounds.base - low_aligned;
4317   return s;
4318 }
4319 
4320 extern char** environ;
4321 
4322 // Run the specified command in a separate process. Return its exit value,
4323 // or -1 on failure (e.g. can't fork a new process).
4324 // Unlike system(), this function can be called from signal handler. It
4325 // doesn't block SIGINT et al.
4326 int os::fork_and_exec(char* cmd) {
4327   char * argv[4] = {"sh", "-c", cmd, NULL};
4328 
4329   pid_t pid = fork();
4330 
4331   if (pid < 0) {
4332     // fork failed
4333     return -1;
4334 
4335   } else if (pid == 0) {


< prev index next >