src/os/bsd/vm/os_bsd.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File 8015252 Sdiff src/os/bsd/vm

src/os/bsd/vm/os_bsd.cpp

Print this page




 609   OSThread* osthread = thread->osthread();
 610   osthread->set_caller_sigmask(caller_sigmask);
 611 
 612   pthread_sigmask(SIG_UNBLOCK, os::Bsd::unblocked_signals(), NULL);
 613 
 614   if (!ReduceSignalUsage) {
 615     if (thread->is_VM_thread()) {
 616       // Only the VM thread handles BREAK_SIGNAL ...
 617       pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
 618     } else {
 619       // ... all other threads block BREAK_SIGNAL
 620       pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
 621     }
 622   }
 623 }
 624 
 625 
 626 //////////////////////////////////////////////////////////////////////////////
 627 // create new thread
 628 
 629 static address highest_vm_reserved_address();
 630 
 631 // check if it's safe to start a new thread
 632 static bool _thread_safety_check(Thread* thread) {
 633   return true;
 634 }
 635 
 636 #ifdef __APPLE__
 637 // library handle for calling objc_registerThreadWithCollector()
 638 // without static linking to the libobjc library
 639 #define OBJC_LIB "/usr/lib/libobjc.dylib"
 640 #define OBJC_GCREGISTER "objc_registerThreadWithCollector"
 641 typedef void (*objc_registerThreadWithCollector_t)();
 642 extern "C" objc_registerThreadWithCollector_t objc_registerThreadWithCollectorFunction;
 643 objc_registerThreadWithCollector_t objc_registerThreadWithCollectorFunction = NULL;
 644 #endif
 645 
 646 #ifdef __APPLE__
 647 static uint64_t locate_unique_thread_id() {
 648   // Additional thread_id used to correlate threads in SA
 649   thread_identifier_info_data_t     m_ident_info;
 650   mach_msg_type_number_t            count = THREAD_IDENTIFIER_INFO_COUNT;


2095   return addr == MAP_FAILED ? NULL : addr;
2096 }
2097 
2098 // Don't update _highest_vm_reserved_address, because there might be memory
2099 // regions above addr + size. If so, releasing a memory region only creates
2100 // a hole in the address space, it doesn't help prevent heap-stack collision.
2101 //
2102 static int anon_munmap(char * addr, size_t size) {
2103   return ::munmap(addr, size) == 0;
2104 }
2105 
2106 char* os::pd_reserve_memory(size_t bytes, char* requested_addr,
2107                          size_t alignment_hint) {
2108   return anon_mmap(requested_addr, bytes, (requested_addr != NULL));
2109 }
2110 
2111 bool os::pd_release_memory(char* addr, size_t size) {
2112   return anon_munmap(addr, size);
2113 }
2114 
2115 static address highest_vm_reserved_address() {
2116   return _highest_vm_reserved_address;
2117 }
2118 
2119 static bool bsd_mprotect(char* addr, size_t size, int prot) {
2120   // Bsd wants the mprotect address argument to be page aligned.
2121   char* bottom = (char*)align_size_down((intptr_t)addr, os::Bsd::page_size());
2122 
2123   // According to SUSv3, mprotect() should only be used with mappings
2124   // established by mmap(), and mmap() always maps whole pages. Unaligned
2125   // 'addr' likely indicates problem in the VM (e.g. trying to change
2126   // protection of malloc'ed or statically allocated memory). Check the
2127   // caller if you hit this assert.
2128   assert(addr == bottom, "sanity check");
2129 
2130   size = align_size_up(pointer_delta(addr, bottom, 1) + size, os::Bsd::page_size());
2131   return ::mprotect(bottom, size, prot) == 0;
2132 }
2133 
2134 // Set protections specified
2135 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
2136                         bool is_committed) {
2137   unsigned int p = 0;
2138   switch (prot) {


2142   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2143   default:
2144     ShouldNotReachHere();
2145   }
2146   // is_committed is unused.
2147   return bsd_mprotect(addr, bytes, p);
2148 }
2149 
2150 bool os::guard_memory(char* addr, size_t size) {
2151   return bsd_mprotect(addr, size, PROT_NONE);
2152 }
2153 
2154 bool os::unguard_memory(char* addr, size_t size) {
2155   return bsd_mprotect(addr, size, PROT_READ|PROT_WRITE);
2156 }
2157 
2158 bool os::Bsd::hugetlbfs_sanity_check(bool warn, size_t page_size) {
2159   return false;
2160 }
2161 
2162 /*
2163 * Set the coredump_filter bits to include largepages in core dump (bit 6)
2164 *
2165 * From the coredump_filter documentation:
2166 *
2167 * - (bit 0) anonymous private memory
2168 * - (bit 1) anonymous shared memory
2169 * - (bit 2) file-backed private memory
2170 * - (bit 3) file-backed shared memory
2171 * - (bit 4) ELF header pages in file-backed private memory areas (it is
2172 *           effective only if the bit 2 is cleared)
2173 * - (bit 5) hugetlb private memory
2174 * - (bit 6) hugetlb shared memory
2175 */
2176 static void set_coredump_filter(void) {
2177   FILE *f;
2178   long cdm;
2179 
2180   if ((f = fopen("/proc/self/coredump_filter", "r+")) == NULL) {
2181     return;
2182   }
2183 
2184   if (fscanf(f, "%lx", &cdm) != 1) {
2185     fclose(f);
2186     return;
2187   }
2188 
2189   rewind(f);
2190 
2191   if ((cdm & LARGEPAGES_BIT) == 0) {
2192     cdm |= LARGEPAGES_BIT;
2193     fprintf(f, "%#lx", cdm);
2194   }
2195 
2196   fclose(f);
2197 }
2198 
2199 // Large page support
2200 
2201 static size_t _large_page_size = 0;
2202 
2203 void os::large_page_init() {
2204 }
2205 
2206 
2207 char* os::reserve_memory_special(size_t bytes, char* req_addr, bool exec) {
2208   // "exec" is passed in but not used.  Creating the shared image for
2209   // the code cache doesn't have an SHM_X executable permission to check.
2210   assert(UseLargePages && UseSHM, "only for SHM large pages");
2211 
2212   key_t key = IPC_PRIVATE;
2213   char *addr;
2214 
2215   bool warn_on_failure = UseLargePages &&
2216                         (!FLAG_IS_DEFAULT(UseLargePages) ||
2217                          !FLAG_IS_DEFAULT(LargePageSizeInBytes)
2218                         );




 609   OSThread* osthread = thread->osthread();
 610   osthread->set_caller_sigmask(caller_sigmask);
 611 
 612   pthread_sigmask(SIG_UNBLOCK, os::Bsd::unblocked_signals(), NULL);
 613 
 614   if (!ReduceSignalUsage) {
 615     if (thread->is_VM_thread()) {
 616       // Only the VM thread handles BREAK_SIGNAL ...
 617       pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
 618     } else {
 619       // ... all other threads block BREAK_SIGNAL
 620       pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
 621     }
 622   }
 623 }
 624 
 625 
 626 //////////////////////////////////////////////////////////////////////////////
 627 // create new thread
 628 


 629 // check if it's safe to start a new thread
 630 static bool _thread_safety_check(Thread* thread) {
 631   return true;
 632 }
 633 
 634 #ifdef __APPLE__
 635 // library handle for calling objc_registerThreadWithCollector()
 636 // without static linking to the libobjc library
 637 #define OBJC_LIB "/usr/lib/libobjc.dylib"
 638 #define OBJC_GCREGISTER "objc_registerThreadWithCollector"
 639 typedef void (*objc_registerThreadWithCollector_t)();
 640 extern "C" objc_registerThreadWithCollector_t objc_registerThreadWithCollectorFunction;
 641 objc_registerThreadWithCollector_t objc_registerThreadWithCollectorFunction = NULL;
 642 #endif
 643 
 644 #ifdef __APPLE__
 645 static uint64_t locate_unique_thread_id() {
 646   // Additional thread_id used to correlate threads in SA
 647   thread_identifier_info_data_t     m_ident_info;
 648   mach_msg_type_number_t            count = THREAD_IDENTIFIER_INFO_COUNT;


2093   return addr == MAP_FAILED ? NULL : addr;
2094 }
2095 
2096 // Don't update _highest_vm_reserved_address, because there might be memory
2097 // regions above addr + size. If so, releasing a memory region only creates
2098 // a hole in the address space, it doesn't help prevent heap-stack collision.
2099 //
2100 static int anon_munmap(char * addr, size_t size) {
2101   return ::munmap(addr, size) == 0;
2102 }
2103 
2104 char* os::pd_reserve_memory(size_t bytes, char* requested_addr,
2105                          size_t alignment_hint) {
2106   return anon_mmap(requested_addr, bytes, (requested_addr != NULL));
2107 }
2108 
2109 bool os::pd_release_memory(char* addr, size_t size) {
2110   return anon_munmap(addr, size);
2111 }
2112 




2113 static bool bsd_mprotect(char* addr, size_t size, int prot) {
2114   // Bsd wants the mprotect address argument to be page aligned.
2115   char* bottom = (char*)align_size_down((intptr_t)addr, os::Bsd::page_size());
2116 
2117   // According to SUSv3, mprotect() should only be used with mappings
2118   // established by mmap(), and mmap() always maps whole pages. Unaligned
2119   // 'addr' likely indicates problem in the VM (e.g. trying to change
2120   // protection of malloc'ed or statically allocated memory). Check the
2121   // caller if you hit this assert.
2122   assert(addr == bottom, "sanity check");
2123 
2124   size = align_size_up(pointer_delta(addr, bottom, 1) + size, os::Bsd::page_size());
2125   return ::mprotect(bottom, size, prot) == 0;
2126 }
2127 
2128 // Set protections specified
2129 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
2130                         bool is_committed) {
2131   unsigned int p = 0;
2132   switch (prot) {


2136   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2137   default:
2138     ShouldNotReachHere();
2139   }
2140   // is_committed is unused.
2141   return bsd_mprotect(addr, bytes, p);
2142 }
2143 
2144 bool os::guard_memory(char* addr, size_t size) {
2145   return bsd_mprotect(addr, size, PROT_NONE);
2146 }
2147 
2148 bool os::unguard_memory(char* addr, size_t size) {
2149   return bsd_mprotect(addr, size, PROT_READ|PROT_WRITE);
2150 }
2151 
2152 bool os::Bsd::hugetlbfs_sanity_check(bool warn, size_t page_size) {
2153   return false;
2154 }
2155 





































2156 // Large page support
2157 
2158 static size_t _large_page_size = 0;
2159 
2160 void os::large_page_init() {
2161 }
2162 
2163 
2164 char* os::reserve_memory_special(size_t bytes, char* req_addr, bool exec) {
2165   // "exec" is passed in but not used.  Creating the shared image for
2166   // the code cache doesn't have an SHM_X executable permission to check.
2167   assert(UseLargePages && UseSHM, "only for SHM large pages");
2168 
2169   key_t key = IPC_PRIVATE;
2170   char *addr;
2171 
2172   bool warn_on_failure = UseLargePages &&
2173                         (!FLAG_IS_DEFAULT(UseLargePages) ||
2174                          !FLAG_IS_DEFAULT(LargePageSizeInBytes)
2175                         );


src/os/bsd/vm/os_bsd.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File