1 /*
   2 * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
   3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4 *
   5 * This code is free software; you can redistribute it and/or modify it
   6 * under the terms of the GNU General Public License version 2 only, as
   7 * published by the Free Software Foundation.
   8 *
   9 * This code is distributed in the hope that it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12 * version 2 for more details (a copy is included in the LICENSE file that
  13 * accompanied this code).
  14 *
  15 * You should have received a copy of the GNU General Public License version
  16 * 2 along with this work; if not, write to the Free Software Foundation,
  17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18 *
  19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20 * or visit www.oracle.com if you need additional information or have any
  21 * questions.
  22 *
  23 */
  24 
  25 #include "prims/jvm.h"
  26 #include "runtime/frame.inline.hpp"
  27 #include "runtime/os.hpp"
  28 #include "utilities/vmError.hpp"
  29 
  30 #include <unistd.h>
  31 #include <sys/resource.h>
  32 #include <sys/utsname.h>
  33 
  34 
  35 // Check core dump limit and report possible place where core can be found
  36 void os::check_or_create_dump(void* exceptionRecord, void* contextRecord, char* buffer, size_t bufferSize) {
  37   int n;
  38   struct rlimit rlim;
  39   bool success;
  40 
  41   n = get_core_path(buffer, bufferSize);
  42 
  43   if (getrlimit(RLIMIT_CORE, &rlim) != 0) {
  44     jio_snprintf(buffer + n, bufferSize - n, "/core or core.%d (may not exist)", current_process_id());
  45     success = true;
  46   } else {
  47     switch(rlim.rlim_cur) {
  48       case RLIM_INFINITY:
  49         jio_snprintf(buffer + n, bufferSize - n, "/core or core.%d", current_process_id());
  50         success = true;
  51         break;
  52       case 0:
  53         jio_snprintf(buffer, bufferSize, "Core dumps have been disabled. To enable core dumping, try \"ulimit -c unlimited\" before starting Java again");
  54         success = false;
  55         break;
  56       default:
  57         jio_snprintf(buffer + n, bufferSize - n, "/core or core.%d (max size %lu kB). To ensure a full core dump, try \"ulimit -c unlimited\" before starting Java again", current_process_id(), (unsigned long)(rlim.rlim_cur >> 10));
  58         success = true;
  59         break;
  60     }
  61   }
  62   VMError::report_coredump_status(buffer, success);
  63 }
  64 
  65 address os::get_caller_pc(int n) {
  66 #ifdef _NMT_NOINLINE_
  67   n ++;
  68 #endif
  69   frame fr = os::current_frame();
  70   while (n > 0 && fr.pc() &&
  71     !os::is_first_C_frame(&fr) && fr.sender_pc()) {
  72     fr = os::get_sender_for_C_frame(&fr);
  73     n --;
  74   }
  75   if (n == 0) {
  76     return fr.pc();
  77   } else {
  78     return NULL;
  79   }
  80 }
  81 
  82 int os::get_last_error() {
  83   return errno;
  84 }
  85 
  86 bool os::is_debugger_attached() {
  87   // not implemented
  88   return false;
  89 }
  90 
  91 void os::wait_for_keypress_at_exit(void) {
  92   // don't do anything on posix platforms
  93   return;
  94 }
  95 
  96 // Multiple threads can race in this code, and can remap over each other with MAP_FIXED,
  97 // so on posix, unmap the section at the start and at the end of the chunk that we mapped
  98 // rather than unmapping and remapping the whole chunk to get requested alignment.
  99 char* os::reserve_memory_aligned(size_t size, size_t alignment) {
 100   assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
 101       "Alignment must be a multiple of allocation granularity (page size)");
 102   assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
 103 
 104   size_t extra_size = size + alignment;
 105   assert(extra_size >= size, "overflow, size is too large to allow alignment");
 106 
 107   char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
 108 
 109   if (extra_base == NULL) {
 110     return NULL;
 111   }
 112 
 113   // Do manual alignment
 114   char* aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment);
 115 
 116   // [  |                                       |  ]
 117   // ^ extra_base
 118   //    ^ extra_base + begin_offset == aligned_base
 119   //     extra_base + begin_offset + size       ^
 120   //                       extra_base + extra_size ^
 121   // |<>| == begin_offset
 122   //                              end_offset == |<>|
 123   size_t begin_offset = aligned_base - extra_base;
 124   size_t end_offset = (extra_base + extra_size) - (aligned_base + size);
 125 
 126   if (begin_offset > 0) {
 127       os::release_memory(extra_base, begin_offset);
 128   }
 129 
 130   if (end_offset > 0) {
 131       os::release_memory(extra_base + begin_offset + size, end_offset);
 132   }
 133 
 134   return aligned_base;
 135 }
 136 
 137 void os::Posix::print_load_average(outputStream* st) {
 138   st->print("load average:");
 139   double loadavg[3];
 140   os::loadavg(loadavg, 3);
 141   st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
 142   st->cr();
 143 }
 144 
 145 void os::Posix::print_rlimit_info(outputStream* st) {
 146   st->print("rlimit:");
 147   struct rlimit rlim;
 148 
 149   st->print(" STACK ");
 150   getrlimit(RLIMIT_STACK, &rlim);
 151   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
 152   else st->print("%uk", rlim.rlim_cur >> 10);
 153 
 154   st->print(", CORE ");
 155   getrlimit(RLIMIT_CORE, &rlim);
 156   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
 157   else st->print("%uk", rlim.rlim_cur >> 10);
 158 
 159   //Isn't there on solaris
 160 #ifndef TARGET_OS_FAMILY_solaris
 161   st->print(", NPROC ");
 162   getrlimit(RLIMIT_NPROC, &rlim);
 163   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
 164   else st->print("%d", rlim.rlim_cur);
 165 #endif
 166 
 167   st->print(", NOFILE ");
 168   getrlimit(RLIMIT_NOFILE, &rlim);
 169   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
 170   else st->print("%d", rlim.rlim_cur);
 171 
 172   st->print(", AS ");
 173   getrlimit(RLIMIT_AS, &rlim);
 174   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
 175   else st->print("%uk", rlim.rlim_cur >> 10);
 176   st->cr();
 177 }
 178 
 179 void os::Posix::print_uname_info(outputStream* st) {
 180   // kernel
 181   st->print("uname:");
 182   struct utsname name;
 183   uname(&name);
 184   st->print(name.sysname); st->print(" ");
 185   st->print(name.release); st->print(" ");
 186   st->print(name.version); st->print(" ");
 187   st->print(name.machine);
 188   st->cr();
 189 }
 190 
 191 bool os::has_allocatable_memory_limit(julong* limit) {
 192   struct rlimit rlim;
 193   int getrlimit_res = getrlimit(RLIMIT_AS, &rlim);
 194   // if there was an error when calling getrlimit, assume that there is no limitation
 195   // on virtual memory.
 196   bool result;
 197   if ((getrlimit_res != 0) || (rlim.rlim_cur == RLIM_INFINITY)) {
 198     result = false;
 199   } else {
 200     *limit = (julong)rlim.rlim_cur;
 201     result = true;
 202   }
 203 #ifdef _LP64
 204   return result;
 205 #else
 206   // arbitrary virtual space limit for 32 bit Unices found by testing. If
 207   // getrlimit above returned a limit, bound it with this limit. Otherwise
 208   // directly use it.
 209   const julong max_virtual_limit = (julong)3800*M;
 210   if (result) {
 211     *limit = MIN2(*limit, max_virtual_limit);
 212   } else {
 213     *limit = max_virtual_limit;
 214   }
 215 
 216   // bound by actually allocatable memory. The algorithm uses two bounds, an
 217   // upper and a lower limit. The upper limit is the current highest amount of
 218   // memory that could not be allocated, the lower limit is the current highest
 219   // amount of memory that could be allocated.
 220   // The algorithm iteratively refines the result by halving the difference
 221   // between these limits, updating either the upper limit (if that value could
 222   // not be allocated) or the lower limit (if the that value could be allocated)
 223   // until the difference between these limits is "small".
 224 
 225   // the minimum amount of memory we care about allocating.
 226   const julong min_allocation_size = M;
 227 
 228   julong upper_limit = *limit;
 229 
 230   // first check a few trivial cases
 231   if (is_allocatable(upper_limit) || (upper_limit <= min_allocation_size)) {
 232     *limit = upper_limit;
 233   } else if (!is_allocatable(min_allocation_size)) {
 234     // we found that not even min_allocation_size is allocatable. Return it
 235     // anyway. There is no point to search for a better value any more.
 236     *limit = min_allocation_size;
 237   } else {
 238     // perform the binary search.
 239     julong lower_limit = min_allocation_size;
 240     while ((upper_limit - lower_limit) > min_allocation_size) {
 241       julong temp_limit = ((upper_limit - lower_limit) / 2) + lower_limit;
 242       temp_limit = align_size_down_(temp_limit, min_allocation_size);
 243       if (is_allocatable(temp_limit)) {
 244         lower_limit = temp_limit;
 245       } else {
 246         upper_limit = temp_limit;
 247       }
 248     }
 249     *limit = lower_limit;
 250   }
 251   return true;
 252 #endif
 253 }
 254 
 255 const char* os::get_current_directory(char *buf, size_t buflen) {
 256   return getcwd(buf, buflen);
 257 }
 258 
 259 FILE* os::open(int fd, const char* mode) {
 260   return ::fdopen(fd, mode);
 261 }
 262 
 263 os::WatcherThreadCrashProtection::WatcherThreadCrashProtection() {
 264   assert(Thread::current()->is_Watcher_thread(), "Must be WatcherThread");
 265 }
 266 
 267 /*
 268  * See the caveats for this class in os_posix.hpp
 269  * Protects the callback call so that SIGSEGV / SIGBUS jumps back into this
 270  * method and returns false. If none of the signals are raised, returns true.
 271  * The callback is supposed to provide the method that should be protected.
 272  */
 273 bool os::WatcherThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
 274   assert(Thread::current()->is_Watcher_thread(), "Only for WatcherThread");
 275   assert(!WatcherThread::watcher_thread()->has_crash_protection(),
 276       "crash_protection already set?");
 277 
 278   if (sigsetjmp(_jmpbuf, 1) == 0) {
 279     // make sure we can see in the signal handler that we have crash protection
 280     // installed
 281     WatcherThread::watcher_thread()->set_crash_protection(this);
 282     cb.call();
 283     // and clear the crash protection
 284     WatcherThread::watcher_thread()->set_crash_protection(NULL);
 285     return true;
 286   }
 287   // this happens when we siglongjmp() back
 288   WatcherThread::watcher_thread()->set_crash_protection(NULL);
 289   return false;
 290 }
 291 
 292 void os::WatcherThreadCrashProtection::restore() {
 293   assert(WatcherThread::watcher_thread()->has_crash_protection(),
 294       "must have crash protection");
 295 
 296   siglongjmp(_jmpbuf, 1);
 297 }
 298 
 299 void os::WatcherThreadCrashProtection::check_crash_protection(int sig,
 300     Thread* thread) {
 301 
 302   if (thread != NULL &&
 303       thread->is_Watcher_thread() &&
 304       WatcherThread::watcher_thread()->has_crash_protection()) {
 305 
 306     if (sig == SIGSEGV || sig == SIGBUS) {
 307       WatcherThread::watcher_thread()->crash_protection()->restore();
 308     }
 309   }
 310 }