1 /*
   2  * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.  Oracle designates this
   8  * particular file as subject to the "Classpath" exception as provided
   9  * by Oracle in the LICENSE file that accompanied this code.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  */
  25 
  26 #include <fcntl.h>
  27 #include <kstat.h>
  28 #include <procfs.h>
  29 #include <unistd.h>
  30 #include <stdlib.h>
  31 #include <stdio.h>
  32 #include <string.h>
  33 #include <sys/sysinfo.h>
  34 #include <sys/lwp.h>
  35 #include <pthread.h>
  36 #include <utmpx.h>
  37 #include <dlfcn.h>
  38 #include <sys/loadavg.h>
  39 #include <jni.h>
  40 #include "jvm.h"
  41 #include "com_sun_management_internal_OperatingSystemImpl.h"
  42 
  43 typedef struct {
  44     kstat_t *kstat;
  45     uint64_t  last_idle;
  46     uint64_t  last_total;
  47     double  last_ratio;
  48 } cpuload_t;
  49 
  50 static cpuload_t   *cpu_loads = NULL;
  51 static unsigned int num_cpus;
  52 static kstat_ctl_t *kstat_ctrl = NULL;
  53 
  54 static void map_cpu_kstat_counters() {
  55     kstat_t     *kstat;
  56     int          i;
  57 
  58     // Get number of CPU(s)
  59     if ((num_cpus = sysconf(_SC_NPROCESSORS_ONLN)) == -1) {
  60         num_cpus = 1;
  61     }
  62 
  63     // Data structure for saving CPU load
  64     if ((cpu_loads = calloc(num_cpus,sizeof(cpuload_t))) == NULL) {
  65         return;
  66     }
  67 
  68     // Get kstat cpu_stat counters for every CPU
  69     // (loop over kstat to find our cpu_stat(s)
  70     i = 0;
  71     for (kstat = kstat_ctrl->kc_chain; kstat != NULL; kstat = kstat->ks_next) {
  72         if (strncmp(kstat->ks_module, "cpu_stat", 8) == 0) {
  73 
  74             if (kstat_read(kstat_ctrl, kstat, NULL) == -1) {
  75             // Failed to initialize kstat for this CPU so ignore it
  76             continue;
  77             }
  78 
  79             if (i == num_cpus) {
  80             // Found more cpu_stats than reported CPUs
  81             break;
  82             }
  83 
  84             cpu_loads[i++].kstat = kstat;
  85         }
  86     }
  87 }
  88 
  89 static int init_cpu_kstat_counters() {
  90     static int initialized = 0;
  91 
  92     // Concurrence in this method is prevented by the lock in
  93     // the calling method get_cpu_load();
  94     if(!initialized) {
  95         if ((kstat_ctrl = kstat_open()) != NULL) {
  96             map_cpu_kstat_counters();
  97             initialized = 1;
  98         }
  99     }
 100     return initialized ? 0 : -1;
 101 }
 102 
 103 static void update_cpu_kstat_counters() {
 104     if(kstat_chain_update(kstat_ctrl) != 0) {
 105         free(cpu_loads);
 106         map_cpu_kstat_counters();
 107     }
 108 }
 109 
 110 int read_cpustat(cpuload_t *load, cpu_stat_t *cpu_stat) {
 111     if (load->kstat == NULL) {
 112         // no handle.
 113         return -1;
 114     }
 115     if (kstat_read(kstat_ctrl, load->kstat, cpu_stat) == -1) {
 116         //  disabling for now, a kstat chain update is likely to happen next time
 117         load->kstat = NULL;
 118         return -1;
 119     }
 120     return 0;
 121 }
 122 
 123 double get_single_cpu_load(unsigned int n) {
 124     cpuload_t  *load;
 125     cpu_stat_t  cpu_stat;
 126     uint_t     *usage;
 127     uint64_t          c_idle;
 128     uint64_t          c_total;
 129     uint64_t          d_idle;
 130     uint64_t          d_total;
 131     int           i;
 132 
 133     if (n >= num_cpus) {
 134         return -1.0;
 135     }
 136 
 137     load = &cpu_loads[n];
 138     if (read_cpustat(load, &cpu_stat) < 0) {
 139         return -1.0;
 140     }
 141 
 142     usage   = cpu_stat.cpu_sysinfo.cpu;
 143     c_idle  = usage[CPU_IDLE];
 144 
 145     for (c_total = 0, i = 0; i < CPU_STATES; i++) {
 146         c_total += usage[i];
 147     }
 148 
 149     // Calculate diff against previous snapshot
 150     d_idle  = c_idle - load->last_idle;
 151     d_total = c_total - load->last_total;
 152 
 153     /** update if weve moved */
 154     if (d_total > 0) {
 155         // Save current values for next time around
 156         load->last_idle  = c_idle;
 157         load->last_total = c_total;
 158         load->last_ratio = (double) (d_total - d_idle) / d_total;
 159     }
 160 
 161     return load->last_ratio;
 162 }
 163 
 164 int get_info(const char *path, void *info, size_t s, off_t o) {
 165     int fd;
 166     int ret = 0;
 167     if ((fd = open(path, O_RDONLY)) < 0) {
 168         return -1;
 169     }
 170     if (pread(fd, info, s, o) != s) {
 171         ret = -1;
 172     }
 173     close(fd);
 174     return ret;
 175 }
 176 
 177 #define MIN(a, b)           ((a < b) ? a : b)
 178 
 179 static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
 180 
 181 /**
 182  * Return the cpu load (0-1) for proc number 'which' (or average all if which == -1)
 183  */
 184 double  get_cpu_load(int which) {
 185     double load =.0;
 186 
 187     pthread_mutex_lock(&lock);
 188     if(init_cpu_kstat_counters()==0) {
 189 
 190         update_cpu_kstat_counters();
 191 
 192         if (which == -1) {
 193             unsigned int i;
 194             double       t;
 195 
 196             for (t = .0, i = 0; i < num_cpus; i++) {
 197                 t += get_single_cpu_load(i);
 198             }
 199 
 200             // Cap total systemload to 1.0
 201             load = MIN((t / num_cpus), 1.0);
 202         } else {
 203             load = MIN(get_single_cpu_load(which), 1.0);
 204         }
 205     } else {
 206         load = -1.0;
 207     }
 208     pthread_mutex_unlock(&lock);
 209 
 210     return load;
 211 }
 212 
 213 /**
 214  * Return the cpu load (0-1) for the current process (i.e the JVM)
 215  * or -1.0 if the get_info() call failed
 216  */
 217 double get_process_load(void) {
 218     psinfo_t info;
 219 
 220     // Get the percentage of "recent cpu usage" from all the lwp:s in the JVM:s
 221     // process. This is returned as a value between 0.0 and 1.0 multiplied by 0x8000.
 222     if (get_info("/proc/self/psinfo",&info.pr_pctcpu, sizeof(info.pr_pctcpu), offsetof(psinfo_t, pr_pctcpu)) == 0) {
 223         return (double) info.pr_pctcpu / 0x8000;
 224     }
 225     return -1.0;
 226 }
 227 
 228 JNIEXPORT jdouble JNICALL
 229 Java_com_sun_management_internal_OperatingSystemImpl_getCpuLoad0
 230 (JNIEnv *env, jobject dummy)
 231 {
 232     return get_cpu_load(-1);
 233 }
 234 
 235 JNIEXPORT jdouble JNICALL
 236 Java_com_sun_management_internal_OperatingSystemImpl_getProcessCpuLoad0
 237 (JNIEnv *env, jobject dummy)
 238 {
 239     return get_process_load();
 240 }
 241 
 242 JNIEXPORT jdouble JNICALL
 243 Java_com_sun_management_internal_OperatingSystemImpl_getSingleCpuLoad0
 244 (JNIEnv *env, jobject mbean, jint cpu_number)
 245 {
 246     return -1.0;
 247 }
 248 
 249 JNIEXPORT jint JNICALL
 250 Java_com_sun_management_internal_OperatingSystemImpl_getHostConfiguredCpuCount0
 251 (JNIEnv *env, jobject mbean)
 252 {
 253     return -1;
 254 }