< prev index next >

src/os/solaris/vm/os_solaris.cpp

Print this page




 165 address os::Solaris::_main_stack_base = NULL;  // 4352906 workaround
 166 
 167 
 168 // "default" initializers for missing libc APIs
 169 extern "C" {
 170   static int lwp_mutex_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
 171   static int lwp_mutex_destroy(mutex_t *mx)                 { return 0; }
 172 
 173   static int lwp_cond_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
 174   static int lwp_cond_destroy(cond_t *cv)                   { return 0; }
 175 }
 176 
 177 // "default" initializers for pthread-based synchronization
 178 extern "C" {
 179   static int pthread_mutex_default_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
 180   static int pthread_cond_default_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
 181 }
 182 
 183 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time);
 184 
 185 // Thread Local Storage
 186 // This is common to all Solaris platforms so it is defined here,
 187 // in this common file.
 188 // The declarations are in the os_cpu threadLS*.hpp files.
 189 //
 190 // Static member initialization for TLS
 191 Thread* ThreadLocalStorage::_get_thread_cache[ThreadLocalStorage::_pd_cache_size] = {NULL};
 192 
 193 #ifndef PRODUCT
 194   #define _PCT(n,d)       ((100.0*(double)(n))/(double)(d))
 195 
 196 int ThreadLocalStorage::_tcacheHit = 0;
 197 int ThreadLocalStorage::_tcacheMiss = 0;
 198 
 199 void ThreadLocalStorage::print_statistics() {
 200   int total = _tcacheMiss+_tcacheHit;
 201   tty->print_cr("Thread cache hits %d misses %d total %d percent %f\n",
 202                 _tcacheHit, _tcacheMiss, total, _PCT(_tcacheHit, total));
 203 }
 204   #undef _PCT
 205 #endif // PRODUCT
 206 
 207 Thread* ThreadLocalStorage::get_thread_via_cache_slowly(uintptr_t raw_id,
 208                                                         int index) {
 209   Thread *thread = get_thread_slow();
 210   if (thread != NULL) {
 211     address sp = os::current_stack_pointer();
 212     guarantee(thread->_stack_base == NULL ||
 213               (sp <= thread->_stack_base &&
 214               sp >= thread->_stack_base - thread->_stack_size) ||
 215               is_error_reported(),
 216               "sp must be inside of selected thread stack");
 217 
 218     thread->set_self_raw_id(raw_id);  // mark for quick retrieval
 219     _get_thread_cache[index] = thread;
 220   }
 221   return thread;
 222 }
 223 
 224 
 225 static const double all_zero[sizeof(Thread) / sizeof(double) + 1] = {0};
 226 #define NO_CACHED_THREAD ((Thread*)all_zero)
 227 
 228 void ThreadLocalStorage::pd_set_thread(Thread* thread) {
 229 
 230   // Store the new value before updating the cache to prevent a race
 231   // between get_thread_via_cache_slowly() and this store operation.
 232   os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
 233 
 234   // Update thread cache with new thread if setting on thread create,
 235   // or NO_CACHED_THREAD (zeroed) thread if resetting thread on exit.
 236   uintptr_t raw = pd_raw_thread_id();
 237   int ix = pd_cache_index(raw);
 238   _get_thread_cache[ix] = thread == NULL ? NO_CACHED_THREAD : thread;
 239 }
 240 
 241 void ThreadLocalStorage::pd_init() {
 242   for (int i = 0; i < _pd_cache_size; i++) {
 243     _get_thread_cache[i] = NO_CACHED_THREAD;
 244   }
 245 }
 246 
 247 // Invalidate all the caches (happens to be the same as pd_init).
 248 void ThreadLocalStorage::pd_invalidate_all() { pd_init(); }
 249 
 250 #undef NO_CACHED_THREAD
 251 
 252 // END Thread Local Storage
 253 
 254 static inline size_t adjust_stack_size(address base, size_t size) {
 255   if ((ssize_t)size < 0) {
 256     // 4759953: Compensate for ridiculous stack size.
 257     size = max_intx;
 258   }
 259   if (size > (size_t)base) {
 260     // 4812466: Make sure size doesn't allow the stack to wrap the address space.
 261     size = (size_t)base;
 262   }
 263   return size;
 264 }
 265 
 266 static inline stack_t get_stack_info() {
 267   stack_t st;
 268   int retval = thr_stksegment(&st);
 269   st.ss_size = adjust_stack_size((address)st.ss_sp, st.ss_size);
 270   assert(retval == 0, "incorrect return value from thr_stksegment");
 271   assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
 272   assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
 273   return st;


1272   }
1273   delete osthread;
1274 }
1275 
1276 void os::pd_start_thread(Thread* thread) {
1277   int status = thr_continue(thread->osthread()->thread_id());
1278   assert_status(status == 0, status, "thr_continue failed");
1279 }
1280 
1281 
1282 intx os::current_thread_id() {
1283   return (intx)thr_self();
1284 }
1285 
1286 static pid_t _initial_pid = 0;
1287 
1288 int os::current_process_id() {
1289   return (int)(_initial_pid ? _initial_pid : getpid());
1290 }
1291 
1292 int os::allocate_thread_local_storage() {
1293   // %%%       in Win32 this allocates a memory segment pointed to by a
1294   //           register.  Dan Stein can implement a similar feature in
1295   //           Solaris.  Alternatively, the VM can do the same thing
1296   //           explicitly: malloc some storage and keep the pointer in a
1297   //           register (which is part of the thread's context) (or keep it
1298   //           in TLS).
1299   // %%%       In current versions of Solaris, thr_self and TSD can
1300   //           be accessed via short sequences of displaced indirections.
1301   //           The value of thr_self is available as %g7(36).
1302   //           The value of thr_getspecific(k) is stored in %g7(12)(4)(k*4-4),
1303   //           assuming that the current thread already has a value bound to k.
1304   //           It may be worth experimenting with such access patterns,
1305   //           and later having the parameters formally exported from a Solaris
1306   //           interface.  I think, however, that it will be faster to
1307   //           maintain the invariant that %g2 always contains the
1308   //           JavaThread in Java code, and have stubs simply
1309   //           treat %g2 as a caller-save register, preserving it in a %lN.
1310   thread_key_t tk;
1311   if (thr_keycreate(&tk, NULL)) {
1312     fatal(err_msg("os::allocate_thread_local_storage: thr_keycreate failed "
1313                   "(%s)", strerror(errno)));
1314   }
1315   return int(tk);
1316 }
1317 
1318 void os::free_thread_local_storage(int index) {
1319   // %%% don't think we need anything here
1320   // if (pthread_key_delete((pthread_key_t) tk)) {
1321   //   fatal("os::free_thread_local_storage: pthread_key_delete failed");
1322   // }
1323 }
1324 
1325 // libthread allocate for tsd_common is a version specific
1326 // small number - point is NO swap space available
1327 #define SMALLINT 32
1328 void os::thread_local_storage_at_put(int index, void* value) {
1329   // %%% this is used only in threadLocalStorage.cpp
1330   if (thr_setspecific((thread_key_t)index, value)) {
1331     if (errno == ENOMEM) {
1332       vm_exit_out_of_memory(SMALLINT, OOM_MALLOC_ERROR,
1333                             "thr_setspecific: out of swap space");
1334     } else {
1335       fatal(err_msg("os::thread_local_storage_at_put: thr_setspecific failed "
1336                     "(%s)", strerror(errno)));
1337     }
1338   } else {
1339     ThreadLocalStorage::set_thread_in_slot((Thread *) value);
1340   }
1341 }
1342 
1343 // This function could be called before TLS is initialized, for example, when
1344 // VM receives an async signal or when VM causes a fatal error during
1345 // initialization. Return NULL if thr_getspecific() fails.
1346 void* os::thread_local_storage_at(int index) {
1347   // %%% this is used only in threadLocalStorage.cpp
1348   void* r = NULL;
1349   return thr_getspecific((thread_key_t)index, &r) != 0 ? NULL : r;
1350 }
1351 
1352 
1353 // gethrtime() should be monotonic according to the documentation,
1354 // but some virtualized platforms are known to break this guarantee.
1355 // getTimeNanos() must be guaranteed not to move backwards, so we
1356 // are forced to add a check here.
1357 inline hrtime_t getTimeNanos() {
1358   const hrtime_t now = gethrtime();
1359   const hrtime_t prev = max_hrtime;
1360   if (now <= prev) {
1361     return prev;   // same or retrograde time;
1362   }
1363   const hrtime_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&max_hrtime, prev);
1364   assert(obsv >= prev, "invariant");   // Monotonicity
1365   // If the CAS succeeded then we're done and return "now".
1366   // If the CAS failed and the observed value "obsv" is >= now then
1367   // we should return "obsv".  If the CAS failed and now > obsv > prv then
1368   // some other thread raced this thread and installed a new value, in which case
1369   // we could either (a) retry the entire operation, (b) retry trying to install now
1370   // or (c) just return obsv.  We use (c).   No loop is required although in some cases
1371   // we might discard a higher "now" value in deference to a slightly lower but freshly
1372   // installed obsv value.   That's entirely benign -- it admits no new orderings compared




 165 address os::Solaris::_main_stack_base = NULL;  // 4352906 workaround
 166 
 167 
 168 // "default" initializers for missing libc APIs
 169 extern "C" {
 170   static int lwp_mutex_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
 171   static int lwp_mutex_destroy(mutex_t *mx)                 { return 0; }
 172 
 173   static int lwp_cond_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
 174   static int lwp_cond_destroy(cond_t *cv)                   { return 0; }
 175 }
 176 
 177 // "default" initializers for pthread-based synchronization
 178 extern "C" {
 179   static int pthread_mutex_default_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
 180   static int pthread_cond_default_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
 181 }
 182 
 183 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time);
 184 





































































 185 static inline size_t adjust_stack_size(address base, size_t size) {
 186   if ((ssize_t)size < 0) {
 187     // 4759953: Compensate for ridiculous stack size.
 188     size = max_intx;
 189   }
 190   if (size > (size_t)base) {
 191     // 4812466: Make sure size doesn't allow the stack to wrap the address space.
 192     size = (size_t)base;
 193   }
 194   return size;
 195 }
 196 
 197 static inline stack_t get_stack_info() {
 198   stack_t st;
 199   int retval = thr_stksegment(&st);
 200   st.ss_size = adjust_stack_size((address)st.ss_sp, st.ss_size);
 201   assert(retval == 0, "incorrect return value from thr_stksegment");
 202   assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
 203   assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
 204   return st;


1203   }
1204   delete osthread;
1205 }
1206 
1207 void os::pd_start_thread(Thread* thread) {
1208   int status = thr_continue(thread->osthread()->thread_id());
1209   assert_status(status == 0, status, "thr_continue failed");
1210 }
1211 
1212 
1213 intx os::current_thread_id() {
1214   return (intx)thr_self();
1215 }
1216 
1217 static pid_t _initial_pid = 0;
1218 
1219 int os::current_process_id() {
1220   return (int)(_initial_pid ? _initial_pid : getpid());
1221 }
1222 





























































1223 // gethrtime() should be monotonic according to the documentation,
1224 // but some virtualized platforms are known to break this guarantee.
1225 // getTimeNanos() must be guaranteed not to move backwards, so we
1226 // are forced to add a check here.
1227 inline hrtime_t getTimeNanos() {
1228   const hrtime_t now = gethrtime();
1229   const hrtime_t prev = max_hrtime;
1230   if (now <= prev) {
1231     return prev;   // same or retrograde time;
1232   }
1233   const hrtime_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&max_hrtime, prev);
1234   assert(obsv >= prev, "invariant");   // Monotonicity
1235   // If the CAS succeeded then we're done and return "now".
1236   // If the CAS failed and the observed value "obsv" is >= now then
1237   // we should return "obsv".  If the CAS failed and now > obsv > prv then
1238   // some other thread raced this thread and installed a new value, in which case
1239   // we could either (a) retry the entire operation, (b) retry trying to install now
1240   // or (c) just return obsv.  We use (c).   No loop is required although in some cases
1241   // we might discard a higher "now" value in deference to a slightly lower but freshly
1242   // installed obsv value.   That's entirely benign -- it admits no new orderings compared


< prev index next >