330
331 jint os::Solaris::_os_thread_limit = 0;
332 volatile jint os::Solaris::_os_thread_count = 0;
333
334 julong os::available_memory() {
335 return Solaris::available_memory();
336 }
337
338 julong os::Solaris::available_memory() {
339 return (julong)sysconf(_SC_AVPHYS_PAGES) * os::vm_page_size();
340 }
341
342 julong os::Solaris::_physical_memory = 0;
343
344 julong os::physical_memory() {
345 return Solaris::physical_memory();
346 }
347
348 static hrtime_t first_hrtime = 0;
349 static const hrtime_t hrtime_hz = 1000*1000*1000;
350 const int LOCK_BUSY = 1;
351 const int LOCK_FREE = 0;
352 const int LOCK_INVALID = -1;
353 static volatile hrtime_t max_hrtime = 0;
354 static volatile int max_hrtime_lock = LOCK_FREE; // Update counter with LSB as lock-in-progress
355
356
357 void os::Solaris::initialize_system_info() {
358 set_processor_count(sysconf(_SC_NPROCESSORS_CONF));
359 _processors_online = sysconf (_SC_NPROCESSORS_ONLN);
360 _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE);
361 }
362
363 int os::active_processor_count() {
364 int online_cpus = sysconf(_SC_NPROCESSORS_ONLN);
365 pid_t pid = getpid();
366 psetid_t pset = PS_NONE;
367 // Are we running in a processor set or is there any processor set around?
368 if (pset_bind(PS_QUERY, P_PID, pid, &pset) == 0) {
369 uint_t pset_cpus;
370 // Query the number of cpus available to us.
371 if (pset_info(pset, NULL, &pset_cpus, NULL) == 0) {
372 assert(pset_cpus > 0 && pset_cpus <= online_cpus, "sanity check");
373 _processors_online = pset_cpus;
374 return pset_cpus;
1347 "thr_setspecific: out of swap space");
1348 } else {
1349 fatal(err_msg("os::thread_local_storage_at_put: thr_setspecific failed "
1350 "(%s)", strerror(errno)));
1351 }
1352 } else {
1353 ThreadLocalStorage::set_thread_in_slot ((Thread *) value) ;
1354 }
1355 }
1356
1357 // This function could be called before TLS is initialized, for example, when
1358 // VM receives an async signal or when VM causes a fatal error during
1359 // initialization. Return NULL if thr_getspecific() fails.
1360 void* os::thread_local_storage_at(int index) {
1361 // %%% this is used only in threadLocalStorage.cpp
1362 void* r = NULL;
1363 return thr_getspecific((thread_key_t)index, &r) != 0 ? NULL : r;
1364 }
1365
1366
1367 // gethrtime can move backwards if read from one cpu and then a different cpu
1368 // getTimeNanos is guaranteed to not move backward on Solaris
1369 // local spinloop created as faster for a CAS on an int than
1370 // a CAS on a 64bit jlong. Also Atomic::cmpxchg for jlong is not
1371 // supported on sparc v8 or pre supports_cx8 intel boxes.
1372 // oldgetTimeNanos for systems which do not support CAS on 64bit jlong
1373 // i.e. sparc v8 and pre supports_cx8 (i486) intel boxes
1374 inline hrtime_t oldgetTimeNanos() {
1375 int gotlock = LOCK_INVALID;
1376 hrtime_t newtime = gethrtime();
1377
1378 for (;;) {
1379 // grab lock for max_hrtime
1380 int curlock = max_hrtime_lock;
1381 if (curlock & LOCK_BUSY) continue;
1382 if (gotlock = Atomic::cmpxchg(LOCK_BUSY, &max_hrtime_lock, LOCK_FREE) != LOCK_FREE) continue;
1383 if (newtime > max_hrtime) {
1384 max_hrtime = newtime;
1385 } else {
1386 newtime = max_hrtime;
1387 }
1388 // release lock
1389 max_hrtime_lock = LOCK_FREE;
1390 return newtime;
1391 }
1392 }
1393 // gethrtime can move backwards if read from one cpu and then a different cpu
1394 // getTimeNanos is guaranteed to not move backward on Solaris
1395 inline hrtime_t getTimeNanos() {
1396 if (VM_Version::supports_cx8()) {
1397 const hrtime_t now = gethrtime();
1398 // Use atomic long load since 32-bit x86 uses 2 registers to keep long.
1399 const hrtime_t prev = Atomic::load((volatile jlong*)&max_hrtime);
1400 if (now <= prev) return prev; // same or retrograde time;
1401 const hrtime_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&max_hrtime, prev);
1402 assert(obsv >= prev, "invariant"); // Monotonicity
1403 // If the CAS succeeded then we're done and return "now".
1404 // If the CAS failed and the observed value "obs" is >= now then
1405 // we should return "obs". If the CAS failed and now > obs > prv then
1406 // some other thread raced this thread and installed a new value, in which case
1407 // we could either (a) retry the entire operation, (b) retry trying to install now
1408 // or (c) just return obs. We use (c). No loop is required although in some cases
1409 // we might discard a higher "now" value in deference to a slightly lower but freshly
1410 // installed obs value. That's entirely benign -- it admits no new orderings compared
1411 // to (a) or (b) -- and greatly reduces coherence traffic.
1412 // We might also condition (c) on the magnitude of the delta between obs and now.
1413 // Avoiding excessive CAS operations to hot RW locations is critical.
1414 // See http://blogs.sun.com/dave/entry/cas_and_cache_trivia_invalidate
1415 return (prev == obsv) ? now : obsv ;
1416 } else {
1417 return oldgetTimeNanos();
1418 }
1419 }
1420
1421 // Time since start-up in seconds to a fine granularity.
1422 // Used by VMSelfDestructTimer and the MemProfiler.
1423 double os::elapsedTime() {
1424 return (double)(getTimeNanos() - first_hrtime) / (double)hrtime_hz;
1425 }
1426
1427 jlong os::elapsed_counter() {
1428 return (jlong)(getTimeNanos() - first_hrtime);
1429 }
1430
1431 jlong os::elapsed_frequency() {
1432 return hrtime_hz;
1433 }
1434
1435 // Return the real, user, and system times in seconds from an
1436 // arbitrary fixed point in the past.
1437 bool os::getTimesSecs(double* process_real_time,
1438 double* process_user_time,
|
330
331 jint os::Solaris::_os_thread_limit = 0;
332 volatile jint os::Solaris::_os_thread_count = 0;
333
334 julong os::available_memory() {
335 return Solaris::available_memory();
336 }
337
338 julong os::Solaris::available_memory() {
339 return (julong)sysconf(_SC_AVPHYS_PAGES) * os::vm_page_size();
340 }
341
342 julong os::Solaris::_physical_memory = 0;
343
344 julong os::physical_memory() {
345 return Solaris::physical_memory();
346 }
347
348 static hrtime_t first_hrtime = 0;
349 static const hrtime_t hrtime_hz = 1000*1000*1000;
350 static volatile hrtime_t max_hrtime = 0;
351
352
353 void os::Solaris::initialize_system_info() {
354 set_processor_count(sysconf(_SC_NPROCESSORS_CONF));
355 _processors_online = sysconf (_SC_NPROCESSORS_ONLN);
356 _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE);
357 }
358
359 int os::active_processor_count() {
360 int online_cpus = sysconf(_SC_NPROCESSORS_ONLN);
361 pid_t pid = getpid();
362 psetid_t pset = PS_NONE;
363 // Are we running in a processor set or is there any processor set around?
364 if (pset_bind(PS_QUERY, P_PID, pid, &pset) == 0) {
365 uint_t pset_cpus;
366 // Query the number of cpus available to us.
367 if (pset_info(pset, NULL, &pset_cpus, NULL) == 0) {
368 assert(pset_cpus > 0 && pset_cpus <= online_cpus, "sanity check");
369 _processors_online = pset_cpus;
370 return pset_cpus;
1343 "thr_setspecific: out of swap space");
1344 } else {
1345 fatal(err_msg("os::thread_local_storage_at_put: thr_setspecific failed "
1346 "(%s)", strerror(errno)));
1347 }
1348 } else {
1349 ThreadLocalStorage::set_thread_in_slot ((Thread *) value) ;
1350 }
1351 }
1352
1353 // This function could be called before TLS is initialized, for example, when
1354 // VM receives an async signal or when VM causes a fatal error during
1355 // initialization. Return NULL if thr_getspecific() fails.
1356 void* os::thread_local_storage_at(int index) {
1357 // %%% this is used only in threadLocalStorage.cpp
1358 void* r = NULL;
1359 return thr_getspecific((thread_key_t)index, &r) != 0 ? NULL : r;
1360 }
1361
1362
1363 // gethrtime() should be monotonic according to the documentation,
1364 // but is known not to guarantee this on virtualized platforms.
1365 // getTimeNanos() must be guaranteed not to move backwards, so we
1366 // are forced to add a check here.
1367 inline hrtime_t getTimeNanos() {
1368 const hrtime_t now = gethrtime();
1369 const hrtime_t prev = max_hrtime;
1370 if (now <= prev) {
1371 return prev; // same or retrograde time;
1372 }
1373 const hrtime_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&max_hrtime, prev);
1374 assert(obsv >= prev, "invariant"); // Monotonicity
1375 // If the CAS succeeded then we're done and return "now".
1376 // If the CAS failed and the observed value "obsv" is >= now then
1377 // we should return "obsv". If the CAS failed and now > obsv > prv then
1378 // some other thread raced this thread and installed a new value, in which case
1379 // we could either (a) retry the entire operation, (b) retry trying to install now
1380 // or (c) just return obsv. We use (c). No loop is required although in some cases
1381 // we might discard a higher "now" value in deference to a slightly lower but freshly
1382 // installed obsv value. That's entirely benign -- it admits no new orderings compared
1383 // to (a) or (b) -- and greatly reduces coherence traffic.
1384 // We might also condition (c) on the magnitude of the delta between obsv and now.
1385 // Avoiding excessive CAS operations to hot RW locations is critical.
1386 // See https://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate
1387 return (prev == obsv) ? now : obsv;
1388 }
1389
1390 // Time since start-up in seconds to a fine granularity.
1391 // Used by VMSelfDestructTimer and the MemProfiler.
1392 double os::elapsedTime() {
1393 return (double)(getTimeNanos() - first_hrtime) / (double)hrtime_hz;
1394 }
1395
1396 jlong os::elapsed_counter() {
1397 return (jlong)(getTimeNanos() - first_hrtime);
1398 }
1399
1400 jlong os::elapsed_frequency() {
1401 return hrtime_hz;
1402 }
1403
1404 // Return the real, user, and system times in seconds from an
1405 // arbitrary fixed point in the past.
1406 bool os::getTimesSecs(double* process_real_time,
1407 double* process_user_time,
|