1 /*
   2  * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/shared/gcUtil.hpp"
  27 
  28 // Catch-all file for utility classes
  29 
  30 float AdaptiveWeightedAverage::compute_adaptive_average(float new_sample,
  31                                                         float average) {
  32   // We smooth the samples by not using weight() directly until we've
  33   // had enough data to make it meaningful. We'd like the first weight
  34   // used to be 1, the second to be 1/2, etc until we have
  35   // OLD_THRESHOLD/weight samples.
  36   unsigned count_weight = 0;
  37 
  38   // Avoid division by zero if the counter wraps (7158457)
  39   if (!is_old()) {
  40     count_weight = OLD_THRESHOLD/count();
  41   }
  42 
  43   unsigned adaptive_weight = (MAX2(weight(), count_weight));
  44 
  45   float new_avg = exp_avg(average, new_sample, adaptive_weight);
  46 
  47   return new_avg;
  48 }
  49 
  50 void AdaptiveWeightedAverage::sample(float new_sample) {
  51   increment_count();
  52   assert(count() != 0,
  53          "Wraparound -- history would be incorrectly discarded");
  54 
  55   // Compute the new weighted average
  56   float new_avg = compute_adaptive_average(new_sample, average());
  57   set_average(new_avg);
  58   _last_sample = new_sample;
  59 }
  60 
  61 void AdaptiveWeightedAverage::print() const {
  62   print_on(tty);
  63 }
  64 
  65 void AdaptiveWeightedAverage::print_on(outputStream* st) const {
  66   guarantee(false, "NYI");
  67 }
  68 
  69 void AdaptivePaddedAverage::print() const {
  70   print_on(tty);
  71 }
  72 
  73 void AdaptivePaddedAverage::print_on(outputStream* st) const {
  74   guarantee(false, "NYI");
  75 }
  76 
  77 void AdaptivePaddedNoZeroDevAverage::print() const {
  78   print_on(tty);
  79 }
  80 
  81 void AdaptivePaddedNoZeroDevAverage::print_on(outputStream* st) const {
  82   guarantee(false, "NYI");
  83 }
  84 
  85 void AdaptivePaddedAverage::sample(float new_sample) {
  86   // Compute new adaptive weighted average based on new sample.
  87   AdaptiveWeightedAverage::sample(new_sample);
  88 
  89   // Now update the deviation and the padded average.
  90   float new_avg = average();
  91   float new_dev = compute_adaptive_average(fabsd(new_sample - new_avg),
  92                                            deviation());
  93   set_deviation(new_dev);
  94   set_padded_average(new_avg + padding() * new_dev);
  95   _last_sample = new_sample;
  96 }
  97 
  98 void AdaptivePaddedNoZeroDevAverage::sample(float new_sample) {
  99   // Compute our parent classes sample information
 100   AdaptiveWeightedAverage::sample(new_sample);
 101 
 102   float new_avg = average();
 103   if (new_sample != 0) {
 104     // We only create a new deviation if the sample is non-zero
 105     float new_dev = compute_adaptive_average(fabsd(new_sample - new_avg),
 106                                              deviation());
 107 
 108     set_deviation(new_dev);
 109   }
 110   set_padded_average(new_avg + padding() * deviation());
 111   _last_sample = new_sample;
 112 }
 113 
 114 LinearLeastSquareFit::LinearLeastSquareFit(unsigned weight) :
 115   _sum_x(0), _sum_x_squared(0), _sum_y(0), _sum_xy(0),
 116   _intercept(0), _slope(0), _mean_x(weight), _mean_y(weight) {}
 117 
 118 void LinearLeastSquareFit::update(double x, double y) {
 119   _sum_x = _sum_x + x;
 120   _sum_x_squared = _sum_x_squared + x * x;
 121   _sum_y = _sum_y + y;
 122   _sum_xy = _sum_xy + x * y;
 123   _mean_x.sample(x);
 124   _mean_y.sample(y);
 125   assert(_mean_x.count() == _mean_y.count(), "Incorrect count");
 126   if ( _mean_x.count() > 1 ) {
 127     double slope_denominator;
 128     slope_denominator = (_mean_x.count() * _sum_x_squared - _sum_x * _sum_x);
 129     // Some tolerance should be injected here.  A denominator that is
 130     // nearly 0 should be avoided.
 131 
 132     if (slope_denominator != 0.0) {
 133       double slope_numerator;
 134       slope_numerator = (_mean_x.count() * _sum_xy - _sum_x * _sum_y);
 135       _slope = slope_numerator / slope_denominator;
 136 
 137       // The _mean_y and _mean_x are decaying averages and can
 138       // be used to discount earlier data.  If they are used,
 139       // first consider whether all the quantities should be
 140       // kept as decaying averages.
 141       // _intercept = _mean_y.average() - _slope * _mean_x.average();
 142       _intercept = (_sum_y - _slope * _sum_x) / ((double) _mean_x.count());
 143     }
 144   }
 145 }
 146 
 147 double LinearLeastSquareFit::y(double x) {
 148   double new_y;
 149 
 150   if ( _mean_x.count() > 1 ) {
 151     new_y = (_intercept + _slope * x);
 152     return new_y;
 153   } else {
 154     return _mean_y.average();
 155   }
 156 }
 157 
 158 // Both decrement_will_decrease() and increment_will_decrease() return
 159 // true for a slope of 0.  That is because a change is necessary before
 160 // a slope can be calculated and a 0 slope will, in general, indicate
 161 // that no calculation of the slope has yet been done.  Returning true
 162 // for a slope equal to 0 reflects the intuitive expectation of the
 163 // dependence on the slope.  Don't use the complement of these functions
 164 // since that untuitive expectation is not built into the complement.
 165 bool LinearLeastSquareFit::decrement_will_decrease() {
 166   return (_slope >= 0.00);
 167 }
 168 
 169 bool LinearLeastSquareFit::increment_will_decrease() {
 170   return (_slope <= 0.00);
 171 }