398
399 jint os::Solaris::_os_thread_limit = 0;
400 volatile jint os::Solaris::_os_thread_count = 0;
401
402 julong os::available_memory() {
403 return Solaris::available_memory();
404 }
405
406 julong os::Solaris::available_memory() {
407 return (julong)sysconf(_SC_AVPHYS_PAGES) * os::vm_page_size();
408 }
409
410 julong os::Solaris::_physical_memory = 0;
411
412 julong os::physical_memory() {
413 return Solaris::physical_memory();
414 }
415
416 static hrtime_t first_hrtime = 0;
417 static const hrtime_t hrtime_hz = 1000*1000*1000;
418 const int LOCK_BUSY = 1;
419 const int LOCK_FREE = 0;
420 const int LOCK_INVALID = -1;
421 static volatile hrtime_t max_hrtime = 0;
422 static volatile int max_hrtime_lock = LOCK_FREE; // Update counter with LSB as lock-in-progress
423
424
425 void os::Solaris::initialize_system_info() {
426 set_processor_count(sysconf(_SC_NPROCESSORS_CONF));
427 _processors_online = sysconf (_SC_NPROCESSORS_ONLN);
428 _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE);
429 }
430
431 int os::active_processor_count() {
432 int online_cpus = sysconf(_SC_NPROCESSORS_ONLN);
433 pid_t pid = getpid();
434 psetid_t pset = PS_NONE;
435 // Are we running in a processor set or is there any processor set around?
436 if (pset_bind(PS_QUERY, P_PID, pid, &pset) == 0) {
437 uint_t pset_cpus;
438 // Query the number of cpus available to us.
439 if (pset_info(pset, NULL, &pset_cpus, NULL) == 0) {
440 assert(pset_cpus > 0 && pset_cpus <= online_cpus, "sanity check");
441 _processors_online = pset_cpus;
442 return pset_cpus;
1517 "thr_setspecific: out of swap space");
1518 } else {
1519 fatal(err_msg("os::thread_local_storage_at_put: thr_setspecific failed "
1520 "(%s)", strerror(errno)));
1521 }
1522 } else {
1523 ThreadLocalStorage::set_thread_in_slot ((Thread *) value) ;
1524 }
1525 }
1526
1527 // This function could be called before TLS is initialized, for example, when
1528 // VM receives an async signal or when VM causes a fatal error during
1529 // initialization. Return NULL if thr_getspecific() fails.
1530 void* os::thread_local_storage_at(int index) {
1531 // %%% this is used only in threadLocalStorage.cpp
1532 void* r = NULL;
1533 return thr_getspecific((thread_key_t)index, &r) != 0 ? NULL : r;
1534 }
1535
1536
1537 // gethrtime can move backwards if read from one cpu and then a different cpu
1538 // getTimeNanos is guaranteed to not move backward on Solaris
1539 // local spinloop created as faster for a CAS on an int than
1540 // a CAS on a 64bit jlong. Also Atomic::cmpxchg for jlong is not
1541 // supported on sparc v8 or pre supports_cx8 intel boxes.
1542 // oldgetTimeNanos for systems which do not support CAS on 64bit jlong
1543 // i.e. sparc v8 and pre supports_cx8 (i486) intel boxes
1544 inline hrtime_t oldgetTimeNanos() {
1545 int gotlock = LOCK_INVALID;
1546 hrtime_t newtime = gethrtime();
1547
1548 for (;;) {
1549 // grab lock for max_hrtime
1550 int curlock = max_hrtime_lock;
1551 if (curlock & LOCK_BUSY) continue;
1552 if (gotlock = Atomic::cmpxchg(LOCK_BUSY, &max_hrtime_lock, LOCK_FREE) != LOCK_FREE) continue;
1553 if (newtime > max_hrtime) {
1554 max_hrtime = newtime;
1555 } else {
1556 newtime = max_hrtime;
1557 }
1558 // release lock
1559 max_hrtime_lock = LOCK_FREE;
1560 return newtime;
1561 }
1562 }
1563 // gethrtime can move backwards if read from one cpu and then a different cpu
1564 // getTimeNanos is guaranteed to not move backward on Solaris
1565 inline hrtime_t getTimeNanos() {
1566 if (VM_Version::supports_cx8()) {
1567 const hrtime_t now = gethrtime();
1568 // Use atomic long load since 32-bit x86 uses 2 registers to keep long.
1569 const hrtime_t prev = Atomic::load((volatile jlong*)&max_hrtime);
1570 if (now <= prev) return prev; // same or retrograde time;
1571 const hrtime_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&max_hrtime, prev);
1572 assert(obsv >= prev, "invariant"); // Monotonicity
1573 // If the CAS succeeded then we're done and return "now".
1574 // If the CAS failed and the observed value "obs" is >= now then
1575 // we should return "obs". If the CAS failed and now > obs > prv then
1576 // some other thread raced this thread and installed a new value, in which case
1577 // we could either (a) retry the entire operation, (b) retry trying to install now
1578 // or (c) just return obs. We use (c). No loop is required although in some cases
1579 // we might discard a higher "now" value in deference to a slightly lower but freshly
1580 // installed obs value. That's entirely benign -- it admits no new orderings compared
1581 // to (a) or (b) -- and greatly reduces coherence traffic.
1582 // We might also condition (c) on the magnitude of the delta between obs and now.
1583 // Avoiding excessive CAS operations to hot RW locations is critical.
1584 // See http://blogs.sun.com/dave/entry/cas_and_cache_trivia_invalidate
1585 return (prev == obsv) ? now : obsv ;
1586 } else {
1587 return oldgetTimeNanos();
1588 }
1589 }
1590
1591 // Time since start-up in seconds to a fine granularity.
1592 // Used by VMSelfDestructTimer and the MemProfiler.
1593 double os::elapsedTime() {
1594 return (double)(getTimeNanos() - first_hrtime) / (double)hrtime_hz;
1595 }
1596
1597 jlong os::elapsed_counter() {
1598 return (jlong)(getTimeNanos() - first_hrtime);
1599 }
1600
1601 jlong os::elapsed_frequency() {
1602 return hrtime_hz;
1603 }
1604
1605 // Return the real, user, and system times in seconds from an
1606 // arbitrary fixed point in the past.
1607 bool os::getTimesSecs(double* process_real_time,
1608 double* process_user_time,
|
398
399 jint os::Solaris::_os_thread_limit = 0;
400 volatile jint os::Solaris::_os_thread_count = 0;
401
402 julong os::available_memory() {
403 return Solaris::available_memory();
404 }
405
406 julong os::Solaris::available_memory() {
407 return (julong)sysconf(_SC_AVPHYS_PAGES) * os::vm_page_size();
408 }
409
410 julong os::Solaris::_physical_memory = 0;
411
412 julong os::physical_memory() {
413 return Solaris::physical_memory();
414 }
415
416 static hrtime_t first_hrtime = 0;
417 static const hrtime_t hrtime_hz = 1000*1000*1000;
418 static volatile hrtime_t max_hrtime = 0;
419
420
421 void os::Solaris::initialize_system_info() {
422 set_processor_count(sysconf(_SC_NPROCESSORS_CONF));
423 _processors_online = sysconf (_SC_NPROCESSORS_ONLN);
424 _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE);
425 }
426
427 int os::active_processor_count() {
428 int online_cpus = sysconf(_SC_NPROCESSORS_ONLN);
429 pid_t pid = getpid();
430 psetid_t pset = PS_NONE;
431 // Are we running in a processor set or is there any processor set around?
432 if (pset_bind(PS_QUERY, P_PID, pid, &pset) == 0) {
433 uint_t pset_cpus;
434 // Query the number of cpus available to us.
435 if (pset_info(pset, NULL, &pset_cpus, NULL) == 0) {
436 assert(pset_cpus > 0 && pset_cpus <= online_cpus, "sanity check");
437 _processors_online = pset_cpus;
438 return pset_cpus;
1513 "thr_setspecific: out of swap space");
1514 } else {
1515 fatal(err_msg("os::thread_local_storage_at_put: thr_setspecific failed "
1516 "(%s)", strerror(errno)));
1517 }
1518 } else {
1519 ThreadLocalStorage::set_thread_in_slot ((Thread *) value) ;
1520 }
1521 }
1522
1523 // This function could be called before TLS is initialized, for example, when
1524 // VM receives an async signal or when VM causes a fatal error during
1525 // initialization. Return NULL if thr_getspecific() fails.
1526 void* os::thread_local_storage_at(int index) {
1527 // %%% this is used only in threadLocalStorage.cpp
1528 void* r = NULL;
1529 return thr_getspecific((thread_key_t)index, &r) != 0 ? NULL : r;
1530 }
1531
1532
1533 // gethrtime() should be monotonic according to the documentation,
1534 // but some virtualized platforms are known to break this guarantee.
1535 // getTimeNanos() must be guaranteed not to move backwards, so we
1536 // are forced to add a check here.
1537 inline hrtime_t getTimeNanos() {
1538 const hrtime_t now = gethrtime();
1539 const hrtime_t prev = max_hrtime;
1540 if (now <= prev) {
1541 return prev; // same or retrograde time;
1542 }
1543 const hrtime_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&max_hrtime, prev);
1544 assert(obsv >= prev, "invariant"); // Monotonicity
1545 // If the CAS succeeded then we're done and return "now".
1546 // If the CAS failed and the observed value "obsv" is >= now then
1547 // we should return "obsv". If the CAS failed and now > obsv > prv then
1548 // some other thread raced this thread and installed a new value, in which case
1549 // we could either (a) retry the entire operation, (b) retry trying to install now
1550 // or (c) just return obsv. We use (c). No loop is required although in some cases
1551 // we might discard a higher "now" value in deference to a slightly lower but freshly
1552 // installed obsv value. That's entirely benign -- it admits no new orderings compared
1553 // to (a) or (b) -- and greatly reduces coherence traffic.
1554 // We might also condition (c) on the magnitude of the delta between obsv and now.
1555 // Avoiding excessive CAS operations to hot RW locations is critical.
1556 // See https://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate
1557 return (prev == obsv) ? now : obsv;
1558 }
1559
1560 // Time since start-up in seconds to a fine granularity.
1561 // Used by VMSelfDestructTimer and the MemProfiler.
1562 double os::elapsedTime() {
1563 return (double)(getTimeNanos() - first_hrtime) / (double)hrtime_hz;
1564 }
1565
1566 jlong os::elapsed_counter() {
1567 return (jlong)(getTimeNanos() - first_hrtime);
1568 }
1569
1570 jlong os::elapsed_frequency() {
1571 return hrtime_hz;
1572 }
1573
1574 // Return the real, user, and system times in seconds from an
1575 // arbitrary fixed point in the past.
1576 bool os::getTimesSecs(double* process_real_time,
1577 double* process_user_time,
|