1 /*
   2  * Copyright (c) 2009, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.  Oracle designates this
   8  * particular file as subject to the "Classpath" exception as provided
   9  * by Oracle in the LICENSE file that accompanied this code.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  */
  25 
  26 package com.sun.scenario.effect.impl.state;
  27 
  28 import java.nio.FloatBuffer;
  29 import com.sun.javafx.geom.Rectangle;
  30 import com.sun.javafx.geom.transform.BaseTransform;
  31 import com.sun.scenario.effect.Color4f;
  32 import com.sun.scenario.effect.Effect;
  33 import com.sun.scenario.effect.FilterContext;
  34 import com.sun.scenario.effect.ImageData;
  35 import com.sun.scenario.effect.impl.EffectPeer;
  36 import com.sun.scenario.effect.impl.Renderer;
  37 
  38 /**
  39  * The helper class for defining a 1 dimensional linear convolution kernel
  40  * for either the LinearConvolve or LinearConvolveShadow shaders.
  41  * This class is abstract and must be subclassed for specific linear
  42  * convolutions.
  43  */
  44 public abstract class LinearConvolveKernel {
  45     public static final int MAX_KERNEL_SIZE = 128;
  46 
  47     public enum PassType {
  48         /**
  49          * The kernel on this pass will be applied horizontally with
  50          * the kernel centered symmetrically around each pixel.
  51          * The specific conditions indicated by this type are:
  52          * <ul>
  53          * <li>The kernel is an odd size {@code (2*k+1)}
  54          * <li>The data for destination pixel {@code (x,y)} is taken from
  55          *     pixels {@code x-k,y} through {@code (x+k,y)} with the weights
  56          *     applied in that same order.
  57          * <li>If the bounds of the source image are {@code (x,y,w,h)} then
  58          *     the bounds of the destination will be {@code (x-k,y,w+2*k,h)}.
  59          * </ul>
  60          */
  61         HORIZONTAL_CENTERED,
  62 
  63         /**
  64          * The kernel on this pass will be applied vertically with
  65          * the kernel centered symmetrically around each pixel.
  66          * The specific conditions indicated by this type are:
  67          * <ul>
  68          * <li>The kernel is an odd size {@code (2*k+1)}
  69          * <li>The data for destination pixel {@code (x,y)} is taken from
  70          *     pixels {@code x,y-k} through {@code (x,y+k)} with the weights
  71          *     applied in that same order.
  72          * <li>If the bounds of the source image are {@code (x,y,w,h)} then
  73          *     the bounds of the destination will be {@code (x,y-k,w,h+2*k)}.
  74          * </ul>
  75          */
  76         VERTICAL_CENTERED,
  77 
  78         /**
  79          * The kernel on this pass can be applied in any direction or with
  80          * any kind of offset.
  81          * No assumptions are made about the offset and delta of the kernel
  82          * vector.
  83          */
  84         GENERAL_VECTOR,
  85     };
  86 
  87     /**
  88      * Returns the peer sample count for a given kernel size.  There are
  89      * only a few peers defined to operate on specific sizes of convolution
  90      * kernel.  If there are peers defined only for kernel sizes of 8 and 16
  91      * and a given effect has a linear convolution kernel with 5 weights,
  92      * then the peer for size 8 will be used and the buffer of weights must
  93      * be padded out to the appropriate size with 0s so that the shader
  94      * constant pool will be fully initialized and the extra unneeded
  95      * convolution samples will be ignored by the 0 weights.
  96      * 
  97      * @param ksize the number of computed convolution kernel weights
  98      * @return the number of convolution weights which will be applied by
  99      *         the associated peer.
 100      */
 101     public static int getPeerSize(int ksize) {
 102         if (ksize < 32) return ((ksize + 3) & (~3));
 103         if (ksize <= MAX_KERNEL_SIZE) return ((ksize + 31) & (~31));
 104         throw new RuntimeException("No peer available for kernel size: "+ksize);
 105     }
 106 
 107     /**
 108      * Returns true if this is a LinearConvolveShadow operation, or false
 109      * if the operation is a regular LinearConvolve.
 110      *
 111      * @return true if this is a Shadow operation
 112      */
 113     public boolean isShadow() {
 114         return false;
 115     }
 116 
 117     /**
 118      * Returns the number of linear convolution passes the algorithm must make
 119      * to complete its work.  Most subclasses will use only 1 or 2 passes
 120      * (typically broken down into a horizontal pass and a vertical pass as
 121      * necessary).
 122      *
 123      * @return the number of passes to be made
 124      */
 125     public abstract int getNumberOfPasses();
 126 
 127     /**
 128      * Returns true if the entire operation of this linear convolution
 129      * would have no effect on the source data.
 130      * 
 131      * @return true if the operation is a NOP
 132      */
 133     public boolean isNop() {
 134         return false;
 135     }
 136 
 137     /**
 138      * Returns true if the operation of a particular pass of this linear
 139      * convolution would have no effect on the source data.
 140      *
 141      * @param pass the algorithm pass being performed
 142      * @return true if the given pass is a NOP
 143      */
 144     public boolean isNop(int pass) {
 145         return false;
 146     }
 147 
 148     /**
 149      * Returns the {@link PassType} that indicates the assumptions that
 150      * can be made in optimizing the application of the kernel on this
 151      * pass.
 152      * 
 153      * @param pass the algorithm pass being performed
 154      * @return the {@link PassType} that describes the kernel vector for
 155      *         this pass
 156      */
 157     public PassType getPassType(int pass) {
 158         return PassType.GENERAL_VECTOR;
 159     }
 160 
 161     /**
 162      * Returns the size of the output image needed for a given input
 163      * image dimensions and a given pass of the algorithm.
 164      * 
 165      * @param srcdimension the bounds of the input image
 166      * @param pass the algorithm pass being performed
 167      * @return the bounds of the result image
 168      */
 169     public abstract Rectangle getResultBounds(Rectangle srcdimension, int pass);
 170 
 171     /**
 172      * Returns the size of the scaled result image needed to hold the output
 173      * for a given input image dimensions and a given pass of the algorithm.
 174      * The image may be further scaled after the shader operation is through
 175      * to obtain the final result bounds.
 176      * This value is only of use to the actual shader to understand exactly
 177      * how much room to allocate for the shader result.
 178      *
 179      * @param srcdimension the bounds of the input image
 180      * @param pass the algorithm pass being performed
 181      * @return the bounds of the result image
 182      */
 183     public Rectangle getScaledResultBounds(Rectangle srcdimension, int pass) {
 184         return getResultBounds(srcdimension, pass);
 185     }
 186 
 187     /**
 188      * Returns an array of 4 floats used to initialize a float4 Shader
 189      * constant with the relative starting location of the first weight
 190      * in the convolution kernel and the incremental offset between each
 191      * sample to be weighted and convolved.  The values are stored in
 192      * the array in the following order:
 193      * <pre>
 194      *     shadervec.x = vector[0] = incdx // X offset between subsequent samples
 195      *     shadervec.y = vector[1] = incdy // Y offset between subsequent samples
 196      *     shadervec.z = vector[2] = startdx // X offset to first convolution sample
 197      *     shadervec.w = vector[3] = startdy // Y offset to first convolution sample
 198      * </pre>
 199      * These values are used in the shader loop as follows:
 200      * <pre>
 201      *     samplelocation = outputpixellocation.xy + shadervec.zw;
 202      *     for (each weight) {
 203      *         sum += weight * sample(samplelocation.xy);
 204      *         samplelocation.xy += shadervec.xy;
 205      *     }
 206      *
 207      * @param srcnativedimensions the native dimensions (including unused
 208      *                            padding) of the input source
 209      * @param pass the pass of the algorithm being performed
 210      * @return an array of 4 floats representing
 211      *         {@code [ incdx, incdy, startdx, startdy ]}
 212      */
 213     public abstract float[] getVector(Rectangle srcnativedimensions,
 214                                       BaseTransform transform, int pass);
 215 
 216     /**
 217      * Returns the size of the kernel for a given pass.
 218      *
 219      * @param pass the pass of the algorithm being performed
 220      * @return the size of the kernel for the given pass
 221      */
 222     public abstract int getKernelSize(int pass);
 223 
 224     /**
 225      * Returns the size of the kernel used in the shader for a given pass,
 226      * taking into account the scaling specified by the getPow2ScaleXY methods.
 227      *
 228      * @param pass the pass of the algorithm being performed
 229      * @return the size of the kernel for the scaled operation
 230      */
 231     public int getScaledKernelSize(int pass) {
 232         return getKernelSize(pass);
 233     }
 234 
 235     /**
 236      * Returns the number of power of 2 scales along the X axis.
 237      * Positive numbers mean to scale the image larger by the indicated
 238      * factors of 2.0.
 239      * Negative numbers mean to scale the image smaller by the indicated
 240      * factors of 0.5.
 241      * Overall the image will be scaled by {@code pow(2.0, getPow2ScaleX())}.
 242      * <p>
 243      * The kernel specified by the {@link #getWeights()} method will be
 244      * relative to the scale factor recommended by this method.
 245      * This scaling allows larger kernels to be reduced in size to save
 246      * computation if the resolution reduction will not alter the quality
 247      * of the convolution (eg. for blur convolutions).
 248      *
 249      * @return the power of 2.0 by which to scale the source image along the
 250      *         X axis.
 251      */
 252     public int getPow2ScaleX() {
 253         return 0;
 254     }
 255 
 256     /**
 257      * Returns the number of power of 2 scales along the Y axis.
 258      * Positive numbers mean to scale the image larger by the indicated
 259      * factors of 2.0.
 260      * Negative numbers mean to scale the image smaller by the indicated
 261      * factors of 0.5.
 262      * Overall the image will be scaled by {@code pow(2.0, getPow2ScaleY())}.
 263      * <p>
 264      * The kernel specified by the {@link #getWeights()} method will be
 265      * relative to the scale factor recommended by this method.
 266      * This scaling allows larger kernels to be reduced in size to save
 267      * computation if the resolution reduction will not alter the quality
 268      * of the convolution (eg. for blur convolutions).
 269      *
 270      * @return the power of 2.0 by which to scale the source image along the
 271      *         Y axis.
 272      */
 273     public int getPow2ScaleY() {
 274         return 0;
 275     }
 276 
 277     /**
 278      * A {@link FloatBuffer} padded out to the required size as specified by
 279      * the {@link #getPeerSize()} method.
 280      *
 281      * @param pass the pass of the algorithm being performed
 282      * @return a {@code FloatBuffer} containing the kernel convolution weights
 283      */
 284     public abstract FloatBuffer getWeights(int pass);
 285 
 286     /**
 287      * Returns the maximum number of valid float4 elements that should be
 288      * referenced from the buffer returned by getWeights() for the given pass.
 289      *
 290      * @param pass the pass of the algorithm being performed
 291      * @return the maximum number of valid float4 elements in the weights buffer
 292      */
 293     public int getWeightsArrayLength(int pass) {
 294         int ksize = getScaledKernelSize(pass);
 295         int psize = getPeerSize(ksize);
 296         return psize / 4;
 297     }
 298 
 299     final static float[] BLACK_COMPONENTS =
 300         Color4f.BLACK.getPremultipliedRGBComponents();
 301 
 302     /**
 303      * Returns the color components to be used for a linearly convolved shadow.
 304      * Only the LinearConvolveShadow shader uses this method.  State
 305      * subclasses that are only intended to be used with the LinearConvolve
 306      * shader do not need to override this method.
 307      *
 308      * @param pass the pass of the algorithm being performed
 309      * @return the color components for the shadow color for the given pass
 310      */
 311     public float[] getShadowColorComponents(int pass) {
 312         return BLACK_COMPONENTS;
 313     }
 314 
 315     public EffectPeer getPeer(Renderer r, FilterContext fctx, int pass) {
 316         if (isNop(pass)) {
 317             return null;
 318         }
 319         int ksize = getScaledKernelSize(pass);
 320         int psize = getPeerSize(ksize);
 321         String opname = isShadow() ? "LinearConvolveShadow" : "LinearConvolve";
 322         return r.getPeerInstance(fctx, opname, psize);
 323     }
 324 
 325     public Rectangle transform(Rectangle clip,
 326                                int xpow2scales, int ypow2scales)
 327     {
 328         // Modeled after Renderer.transform(fctx, img, hscale, vscale)
 329         if (clip == null || (xpow2scales | ypow2scales) == 0) {
 330             return clip;
 331         }
 332         clip = new Rectangle(clip);
 333         if (xpow2scales < 0) {
 334             xpow2scales = -xpow2scales;
 335             clip.width = (clip.width + (1 << xpow2scales) - 1) >> xpow2scales;
 336             clip.x >>= xpow2scales;
 337         } else if (xpow2scales > 0) {
 338             clip.width = clip.width << xpow2scales;
 339             clip.x <<= xpow2scales;
 340         }
 341         if (ypow2scales < 0) {
 342             ypow2scales = -ypow2scales;
 343             clip.height = (clip.height + (1 << ypow2scales) - 1) >> ypow2scales;
 344             clip.y >>= ypow2scales;
 345         } else if (ypow2scales > 0) {
 346             clip.height = clip.height << ypow2scales;
 347             clip.y <<= ypow2scales;
 348         }
 349         return clip;
 350     }
 351 
 352     public ImageData filterImageDatas(Effect effect,
 353                                       FilterContext fctx,
 354                                       BaseTransform transform,
 355                                       Rectangle outputClip,
 356                                       ImageData... inputs)
 357     {
 358         ImageData src = inputs[0];
 359         src.addref();
 360         if (isNop()) {
 361             return src;
 362         }
 363         Rectangle approxBounds = inputs[0].getUntransformedBounds();
 364         int approxW = approxBounds.width;
 365         int approxH = approxBounds.height;
 366         Renderer r = Renderer.getRenderer(fctx, effect, approxW, approxH);
 367         EffectPeer peer0 = getPeer(r, fctx, 0);
 368         EffectPeer peer1 = getPeer(r, fctx, 1);
 369         int hscale = 0;
 370         int vscale = 0;
 371         if (peer0 instanceof LinearConvolvePeer) {
 372             hscale = ((LinearConvolvePeer) peer0).getPow2ScaleX(this);
 373         }
 374         if (peer1 instanceof LinearConvolvePeer) {
 375             vscale = ((LinearConvolvePeer) peer1).getPow2ScaleY(this);
 376         }
 377         Rectangle filterClip = outputClip;
 378         if ((hscale | vscale) != 0) {
 379             src = r.transform(fctx, src, hscale, vscale);
 380             if (!src.validate(fctx)) {
 381                 src.unref();
 382                 return src;
 383             }
 384             filterClip = transform(outputClip, hscale, vscale);
 385         }
 386         if (filterClip != null) {
 387             // The inputClip was already grown by the padding when the
 388             // inputs were filtered, but now we need to make sure that
 389             // the peers pass out padded results from the already padded
 390             // input data, so we grow the clip here by the size of the
 391             // scaled kernel padding.
 392             int hgrow = getScaledKernelSize(0) / 2;
 393             int vgrow = getScaledKernelSize(1) / 2;
 394             if ((hgrow | vgrow) != 0) {
 395                 if (filterClip == outputClip) {
 396                     filterClip = new Rectangle(outputClip);
 397                 }
 398                 filterClip.grow(hgrow, vgrow);
 399             }
 400         }
 401         if (peer0 != null) {
 402             peer0.setPass(0);
 403             ImageData res = peer0.filter(effect, transform, filterClip, src);
 404             src.unref();
 405             src = res;
 406             if (!src.validate(fctx)) {
 407                 src.unref();
 408                 return src;
 409             }
 410         }
 411 
 412         if (peer1 != null) {
 413             peer1.setPass(1);
 414             ImageData res = peer1.filter(effect, transform, filterClip, src);
 415             src.unref();
 416             src = res;
 417             if (!src.validate(fctx)) {
 418                 src.unref();
 419                 return src;
 420             }
 421         }
 422 
 423         if ((hscale | vscale) != 0) {
 424             src = r.transform(fctx, src, -hscale, -vscale);
 425         }
 426         return src;
 427     }
 428 }