< prev index next >

src/hotspot/os/aix/os_aix.cpp

Print this page




2252     // AIX commits memory on touch. So, touch all pages to be committed.
2253     for (char* p = addr; p < (addr + size); p += 4*K) {
2254       *p = '\0';
2255     }
2256   }
2257 
2258   return true;
2259 }
2260 
2261 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, bool exec) {
2262   return pd_commit_memory(addr, size, exec);
2263 }
2264 
2265 void os::pd_commit_memory_or_exit(char* addr, size_t size,
2266                                   size_t alignment_hint, bool exec,
2267                                   const char* mesg) {
2268   // Alignment_hint is ignored on this OS.
2269   pd_commit_memory_or_exit(addr, size, exec, mesg);
2270 }
2271 
2272 bool os::pd_uncommit_memory(char* addr, size_t size) {
2273   assert(is_aligned_to(addr, os::vm_page_size()),
2274     "addr " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2275     p2i(addr), os::vm_page_size());
2276   assert(is_aligned_to(size, os::vm_page_size()),
2277     "size " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2278     size, os::vm_page_size());
2279 
2280   // Dynamically do different things for mmap/shmat.
2281   const vmembk_t* const vmi = vmembk_find(addr);
2282   guarantee0(vmi);
2283   vmi->assert_is_valid_subrange(addr, size);
2284 
2285   if (vmi->type == VMEM_SHMATED) {
2286     return uncommit_shmated_memory(addr, size);
2287   } else {
2288     return uncommit_mmaped_memory(addr, size);
2289   }
2290 }
2291 
2292 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {


2330     ids[0] = 0;
2331     return 1;
2332   }
2333   return 0;
2334 }
2335 
2336 int os::numa_get_group_id_for_address(const void* address) {
2337   return 0;
2338 }
2339 
2340 bool os::get_page_info(char *start, page_info* info) {
2341   return false;
2342 }
2343 
2344 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2345   return end;
2346 }
2347 
2348 // Reserves and attaches a shared memory segment.
2349 // Will assert if a wish address is given and could not be obtained.
2350 char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2351 
2352   // All other Unices do a mmap(MAP_FIXED) if the addr is given,
2353   // thereby clobbering old mappings at that place. That is probably
2354   // not intended, never used and almost certainly an error were it
2355   // ever be used this way (to try attaching at a specified address
2356   // without clobbering old mappings an alternate API exists,
2357   // os::attempt_reserve_memory_at()).
2358   // Instead of mimicking the dangerous coding of the other platforms, here I
2359   // just ignore the request address (release) or assert(debug).
2360   assert0(requested_addr == NULL);
2361 
2362   // Always round to os::vm_page_size(), which may be larger than 4K.
2363   bytes = align_up(bytes, os::vm_page_size());
2364   const size_t alignment_hint0 =
2365     alignment_hint ? align_up(alignment_hint, os::vm_page_size()) : 0;
2366 
2367   // In 4K mode always use mmap.
2368   // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2369   if (os::vm_page_size() == 4*K) {
2370     return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);




2252     // AIX commits memory on touch. So, touch all pages to be committed.
2253     for (char* p = addr; p < (addr + size); p += 4*K) {
2254       *p = '\0';
2255     }
2256   }
2257 
2258   return true;
2259 }
2260 
2261 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, bool exec) {
2262   return pd_commit_memory(addr, size, exec);
2263 }
2264 
2265 void os::pd_commit_memory_or_exit(char* addr, size_t size,
2266                                   size_t alignment_hint, bool exec,
2267                                   const char* mesg) {
2268   // Alignment_hint is ignored on this OS.
2269   pd_commit_memory_or_exit(addr, size, exec, mesg);
2270 }
2271 
2272 bool os::pd_uncommit_memory(char* addr, size_t size, bool exec) {
2273   assert(is_aligned_to(addr, os::vm_page_size()),
2274     "addr " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2275     p2i(addr), os::vm_page_size());
2276   assert(is_aligned_to(size, os::vm_page_size()),
2277     "size " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2278     size, os::vm_page_size());
2279 
2280   // Dynamically do different things for mmap/shmat.
2281   const vmembk_t* const vmi = vmembk_find(addr);
2282   guarantee0(vmi);
2283   vmi->assert_is_valid_subrange(addr, size);
2284 
2285   if (vmi->type == VMEM_SHMATED) {
2286     return uncommit_shmated_memory(addr, size);
2287   } else {
2288     return uncommit_mmaped_memory(addr, size);
2289   }
2290 }
2291 
2292 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {


2330     ids[0] = 0;
2331     return 1;
2332   }
2333   return 0;
2334 }
2335 
2336 int os::numa_get_group_id_for_address(const void* address) {
2337   return 0;
2338 }
2339 
2340 bool os::get_page_info(char *start, page_info* info) {
2341   return false;
2342 }
2343 
2344 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2345   return end;
2346 }
2347 
2348 // Reserves and attaches a shared memory segment.
2349 // Will assert if a wish address is given and could not be obtained.
2350 char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint, bool executable) {
2351 
2352   // All other Unices do a mmap(MAP_FIXED) if the addr is given,
2353   // thereby clobbering old mappings at that place. That is probably
2354   // not intended, never used and almost certainly an error were it
2355   // ever be used this way (to try attaching at a specified address
2356   // without clobbering old mappings an alternate API exists,
2357   // os::attempt_reserve_memory_at()).
2358   // Instead of mimicking the dangerous coding of the other platforms, here I
2359   // just ignore the request address (release) or assert(debug).
2360   assert0(requested_addr == NULL);
2361 
2362   // Always round to os::vm_page_size(), which may be larger than 4K.
2363   bytes = align_up(bytes, os::vm_page_size());
2364   const size_t alignment_hint0 =
2365     alignment_hint ? align_up(alignment_hint, os::vm_page_size()) : 0;
2366 
2367   // In 4K mode always use mmap.
2368   // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2369   if (os::vm_page_size() == 4*K) {
2370     return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);


< prev index next >