--- old/src/hotspot/cpu/x86/assembler_x86.cpp 2019-02-16 20:56:35.381822499 +0530 +++ new/src/hotspot/cpu/x86/assembler_x86.cpp 2019-02-16 20:56:35.177822494 +0530 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -7765,9 +7765,43 @@ } } +void Assembler::vmaxss(XMMRegister dst, XMMRegister nds, XMMRegister src) { + assert(VM_Version::supports_avx(), ""); + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); + emit_int8(0x5F); + emit_int8((unsigned char)(0xC0 | encode)); +} + +void Assembler::vmaxsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { + assert(VM_Version::supports_avx(), ""); + InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_rex_vex_w_reverted(); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); + emit_int8(0x5F); + emit_int8((unsigned char)(0xC0 | encode)); +} + +void Assembler::vminss(XMMRegister dst, XMMRegister nds, XMMRegister src) { + assert(VM_Version::supports_avx(), ""); + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); + emit_int8(0x5D); + emit_int8((unsigned char)(0xC0 | encode)); +} + +void Assembler::vminsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { + assert(VM_Version::supports_avx(), ""); + InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_rex_vex_w_reverted(); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); + emit_int8(0x5D); + emit_int8((unsigned char)(0xC0 | encode)); +} + void Assembler::cmppd(XMMRegister dst, XMMRegister nds, XMMRegister src, int cop, int vector_len) { assert(VM_Version::supports_avx(), ""); - assert(!VM_Version::supports_evex(), ""); + assert(vector_len <= AVX_256bit, ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); emit_int8((unsigned char)0xC2); @@ -7777,7 +7811,7 @@ void Assembler::blendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len) { assert(VM_Version::supports_avx(), ""); - assert(!VM_Version::supports_evex(), ""); + assert(vector_len <= AVX_256bit, ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); emit_int8((unsigned char)0x4B); @@ -7788,7 +7822,7 @@ void Assembler::cmpps(XMMRegister dst, XMMRegister nds, XMMRegister src, int cop, int vector_len) { assert(VM_Version::supports_avx(), ""); - assert(!VM_Version::supports_evex(), ""); + assert(vector_len <= AVX_256bit, ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); emit_int8((unsigned char)0xC2); @@ -7798,7 +7832,7 @@ void Assembler::blendvps(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len) { assert(VM_Version::supports_avx(), ""); - assert(!VM_Version::supports_evex(), ""); + assert(vector_len <= AVX_256bit, ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); emit_int8((unsigned char)0x4A); --- old/src/hotspot/cpu/x86/assembler_x86.hpp 2019-02-16 20:56:35.777822509 +0530 +++ new/src/hotspot/cpu/x86/assembler_x86.hpp 2019-02-16 20:56:35.573822504 +0530 @@ -1934,6 +1934,11 @@ void vsubss(XMMRegister dst, XMMRegister nds, Address src); void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src); + void vmaxss(XMMRegister dst, XMMRegister nds, XMMRegister src); + void vmaxsd(XMMRegister dst, XMMRegister nds, XMMRegister src); + void vminss(XMMRegister dst, XMMRegister nds, XMMRegister src); + void vminsd(XMMRegister dst, XMMRegister nds, XMMRegister src); + void shlxl(Register dst, Register src1, Register src2); void shlxq(Register dst, Register src1, Register src2); --- old/src/hotspot/cpu/x86/x86.ad 2019-02-16 20:56:36.149822517 +0530 +++ new/src/hotspot/cpu/x86/x86.ad 2019-02-16 20:56:35.945822513 +0530 @@ -1,5 +1,5 @@ // -// Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved. +// Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // // This code is free software; you can redistribute it and/or modify it @@ -1450,6 +1450,15 @@ if (UseSSE < 2) ret_value = false; break; +#ifdef _LP64 + case Op_MaxD: + case Op_MaxF: + case Op_MinD: + case Op_MinF: + if (UseAVX < 1) // enabled for AVX only + ret_value = false; + break; +#endif } return ret_value; // Per default match rules are supported. --- old/src/hotspot/cpu/x86/x86_64.ad 2019-02-16 20:56:36.545822527 +0530 +++ new/src/hotspot/cpu/x86/x86_64.ad 2019-02-16 20:56:36.337822522 +0530 @@ -3658,6 +3658,15 @@ %} // Float register operands +operand legRegF() %{ + constraint(ALLOC_IN_RC(float_reg_legacy)); + match(RegF); + + format %{ %} + interface(REG_INTER); +%} + +// Float register operands operand vlRegF() %{ constraint(ALLOC_IN_RC(float_reg_vl)); match(RegF); @@ -3676,6 +3685,15 @@ %} // Double register operands +operand legRegD() %{ + constraint(ALLOC_IN_RC(double_reg_legacy)); + match(RegD); + + format %{ %} + interface(REG_INTER); +%} + +// Double register operands operand vlRegD() %{ constraint(ALLOC_IN_RC(double_reg_vl)); match(RegD); @@ -5414,6 +5432,18 @@ %} // Load Float +instruct MoveF2LEG(legRegF dst, regF src) %{ + match(Set dst src); + format %{ "movss $dst,$src\t# if src != dst load float (4 bytes)" %} + ins_encode %{ + if ($dst$$reg != $src$$reg) { + __ movflt($dst$$XMMRegister, $src$$XMMRegister); + } + %} + ins_pipe( fpu_reg_reg ); +%} + +// Load Float instruct MoveVL2F(regF dst, vlRegF src) %{ match(Set dst src); format %{ "movss $dst,$src\t! load float (4 bytes)" %} @@ -5423,6 +5453,18 @@ ins_pipe( fpu_reg_reg ); %} +// Load Float +instruct MoveLEG2F(regF dst, legRegF src) %{ + match(Set dst src); + format %{ "movss $dst,$src\t# if src != dst load float (4 bytes)" %} + ins_encode %{ + if ($dst$$reg != $src$$reg) { + __ movflt($dst$$XMMRegister, $src$$XMMRegister); + } + %} + ins_pipe( fpu_reg_reg ); +%} + // Load Double instruct loadD_partial(regD dst, memory mem) %{ @@ -5461,6 +5503,18 @@ %} // Load Double +instruct MoveD2LEG(legRegD dst, regD src) %{ + match(Set dst src); + format %{ "movsd $dst,$src\t# if src != dst load double (8 bytes)" %} + ins_encode %{ + if ($dst$$reg != $src$$reg) { + __ movdbl($dst$$XMMRegister, $src$$XMMRegister); + } + %} + ins_pipe( fpu_reg_reg ); +%} + +// Load Double instruct MoveVL2D(regD dst, vlRegD src) %{ match(Set dst src); format %{ "movsd $dst,$src\t! load double (8 bytes)" %} @@ -5470,6 +5524,119 @@ ins_pipe( fpu_reg_reg ); %} +// Load Double +instruct MoveLEG2D(regD dst, legRegD src) %{ + match(Set dst src); + format %{ "movsd $dst,$src\t# if src != dst load double (8 bytes)" %} + ins_encode %{ + if ($dst$$reg != $src$$reg) { + __ movdbl($dst$$XMMRegister, $src$$XMMRegister); + } + %} + ins_pipe( fpu_reg_reg ); +%} + +// Following pseudo code describes the algorithm for max[FD]: +// Min algorithm is on similar lines +// btmp = (b < +0.0) ? a : b +// atmp = (b < +0.0) ? b : a +// Tmp = Max_Float(atmp , btmp) +// Res = (atmp == NaN) ? atmp : Tmp + +// max = java.lang.Math.max(float a, float b) +instruct maxF_reg(legRegF dst, legRegF a, legRegF b, legRegF tmp, legRegF atmp, legRegF btmp) %{ + predicate(UseAVX > 0); + match(Set dst (MaxF a b)); + effect(USE a, USE b, TEMP tmp, TEMP atmp, TEMP btmp); + format %{ + "blendvps $btmp,$b,$a,$b \n\t" + "blendvps $atmp,$a,$b,$b \n\t" + "vmaxss $tmp,$atmp,$btmp \n\t" + "cmpps.unordered $btmp,$atmp,$atmp \n\t" + "blendvps $dst,$tmp,$atmp,$btmp \n\t" + %} + ins_encode %{ + int vector_len = Assembler::AVX_128bit; + __ blendvps($btmp$$XMMRegister, $b$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, vector_len); + __ blendvps($atmp$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $b$$XMMRegister, vector_len); + __ vmaxss($tmp$$XMMRegister, $atmp$$XMMRegister, $btmp$$XMMRegister); + __ cmpps($btmp$$XMMRegister, $atmp$$XMMRegister, $atmp$$XMMRegister, Assembler::_false, vector_len); + __ blendvps($dst$$XMMRegister, $tmp$$XMMRegister, $atmp$$XMMRegister, $btmp$$XMMRegister, vector_len); + %} + ins_pipe( pipe_slow ); +%} + + +// max = java.lang.Math.max(double a, double b) +instruct maxD_reg(legRegD dst, legRegD a, legRegD b, legRegD tmp, legRegD atmp, legRegD btmp) %{ + predicate(UseAVX > 0); + match(Set dst (MaxD a b)); + effect(USE a, USE b, TEMP atmp, TEMP btmp, TEMP tmp); + format %{ + "blendvpd $btmp,$b,$a,$b \n\t" + "blendvpd $atmp,$a,$b,$b \n\t" + "vmaxsd $tmp,$atmp,$btmp \n\t" + "cmppd.unordered $btmp,$atmp,$atmp \n\t" + "blendvpd $dst,$tmp,$atmp,$btmp \n\t" + %} + ins_encode %{ + int vector_len = Assembler::AVX_128bit; + __ blendvpd($btmp$$XMMRegister, $b$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, vector_len); + __ blendvpd($atmp$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $b$$XMMRegister, vector_len); + __ vmaxsd($tmp$$XMMRegister, $atmp$$XMMRegister, $btmp$$XMMRegister); + __ cmppd($btmp$$XMMRegister, $atmp$$XMMRegister, $atmp$$XMMRegister, Assembler::_false, vector_len); + __ blendvpd($dst$$XMMRegister, $tmp$$XMMRegister, $atmp$$XMMRegister, $btmp$$XMMRegister, vector_len); + %} + ins_pipe( pipe_slow ); +%} + + +// min = java.lang.Math.min(float a, float b) +instruct minF_reg(legRegF dst, legRegF a, legRegF b, legRegF tmp, legRegF atmp, legRegF btmp) %{ + predicate(UseAVX > 0); + match(Set dst (MinF a b)); + effect(USE a, USE b, TEMP tmp, TEMP atmp, TEMP btmp); + format %{ + "blendvps $atmp,$a,$b,$a \n\t" + "blendvps $btmp,$b,$a,$a \n\t" + "vminss $tmp,$atmp,$btmp \n\t" + "cmpps.unordered $btmp,$atmp,$atmp \n\t" + "blendvps $dst,$tmp,$atmp,$btmp \n\t" + %} + ins_encode %{ + int vector_len = Assembler::AVX_128bit; + __ blendvps($atmp$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $a$$XMMRegister, vector_len); + __ blendvps($btmp$$XMMRegister, $b$$XMMRegister, $a$$XMMRegister, $a$$XMMRegister, vector_len); + __ vminss($tmp$$XMMRegister, $atmp$$XMMRegister, $btmp$$XMMRegister); + __ cmpps($btmp$$XMMRegister, $atmp$$XMMRegister, $atmp$$XMMRegister, Assembler::_false, vector_len); + __ blendvps($dst$$XMMRegister, $tmp$$XMMRegister, $atmp$$XMMRegister, $btmp$$XMMRegister, vector_len); + %} + ins_pipe( pipe_slow ); +%} + +// min = java.lang.Math.min(double a, double b) +instruct minD_reg(legRegD dst, legRegD a, legRegD b, legRegD tmp, legRegD atmp, legRegD btmp) %{ + predicate(UseAVX > 0); + match(Set dst (MinD a b)); + effect(USE a, USE b, TEMP tmp, TEMP atmp, TEMP btmp); + format %{ + "blendvpd $atmp,$a,$b,$a \n\t" + "blendvpd $btmp,$b,$a,$a \n\t" + "vminsd $tmp,$atmp,$btmp \n\t" + "cmppd.unordered $btmp,$atmp,$atmp \n\t" + "blendvpd $dst,$tmp,$atmp,$btmp \n\t" + %} + ins_encode %{ + int vector_len = Assembler::AVX_128bit; + __ blendvpd($atmp$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $a$$XMMRegister, vector_len); + __ blendvpd($btmp$$XMMRegister, $b$$XMMRegister, $a$$XMMRegister, $a$$XMMRegister, vector_len); + __ vminsd($tmp$$XMMRegister, $atmp$$XMMRegister, $btmp$$XMMRegister); + __ cmppd($btmp$$XMMRegister, $atmp$$XMMRegister, $atmp$$XMMRegister, Assembler::_false, vector_len); + __ blendvpd($dst$$XMMRegister, $tmp$$XMMRegister, $atmp$$XMMRegister, $btmp$$XMMRegister, vector_len); + %} + ins_pipe( pipe_slow ); +%} + // Load Effective Address instruct leaP8(rRegP dst, indOffset8 mem) %{ --- old/test/hotspot/jtreg/compiler/intrinsics/math/TestFpMinMaxIntrinsics.java 2019-02-16 20:56:36.941822536 +0530 +++ new/test/hotspot/jtreg/compiler/intrinsics/math/TestFpMinMaxIntrinsics.java 2019-02-16 20:56:36.737822531 +0530 @@ -1,6 +1,6 @@ /* - * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2018, Arm Limited. All rights reserved. + * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2019, Arm Limited. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,6 +36,10 @@ * -Xcomp -XX:-TieredCompilation * -XX:CompileOnly=java/lang/Math * compiler.intrinsics.math.TestFpMinMaxIntrinsics + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions + * -XX:-TieredCompilation -XX:CompileThresholdScaling=0.1 + * -XX:CompileCommand=print,compiler/intrinsics/math/TestFpMinMaxIntrinsics.*Test* + * compiler.intrinsics.math.TestFpMinMaxIntrinsics */ package compiler.intrinsics.math; @@ -64,24 +68,46 @@ // a b min max { fPos, fPos, fPos, fPos }, { fPos, fNeg, fNeg, fPos }, + { fPosZero, fNegZero, fNegZero, fPosZero }, + { fNegZero, fPosZero, fNegZero, fPosZero }, { fNegZero, fNegZero, fNegZero, fNegZero }, + { fPos, fPosInf, fPos, fPosInf }, { fNeg, fNegInf, fNegInf, fNeg }, + { fPos, fNaN, fNaN, fNaN }, + { fNaN, fPos, fNaN, fNaN }, + { fNeg, fNaN, fNaN, fNaN }, + { fNaN, fNeg, fNaN, fNaN }, + + { fPosInf, fNaN, fNaN, fNaN }, + { fNaN, fPosInf, fNaN, fNaN }, { fNegInf, fNaN, fNaN, fNaN }, + { fNaN, fNegInf, fNaN, fNaN } }; private static final double[][] d_cases = { // a b min max { dPos, dPos, dPos, dPos }, { dPos, dNeg, dNeg, dPos }, + { dPosZero, dNegZero, dNegZero, dPosZero }, + { dNegZero, dPosZero, dNegZero, dPosZero }, { dNegZero, dNegZero, dNegZero, dNegZero }, + { dPos, dPosInf, dPos, dPosInf }, { dNeg, dNegInf, dNegInf, dNeg }, + { dPos, dNaN, dNaN, dNaN }, + { dNaN, dPos, dNaN, dNaN }, + { dNeg, dNaN, dNaN, dNaN }, + { dNaN, dNeg, dNaN, dNaN }, + + { dPosInf, dNaN, dNaN, dNaN }, + { dNaN, dPosInf, dNaN, dNaN }, { dNegInf, dNaN, dNaN, dNaN }, + { dNaN, dNegInf, dNaN, dNaN } }; private static void fTest(float[] row) { @@ -109,7 +135,7 @@ return; } if (min != row[2] || max != row[3]) { - throw new AssertionError("Unexpected result of double min/max" + + throw new AssertionError("Unexpected result of double min/max: " + "a = " + row[0] + ", b = " + row[1] + ", " + "result = (" + min + ", " + max + "), " + "expected = (" + row[2] + ", " + row[3] + ")"); @@ -117,9 +143,10 @@ } public static void main(String[] args) { - Arrays.stream(f_cases).forEach(TestFpMinMaxIntrinsics::fTest); - Arrays.stream(d_cases).forEach(TestFpMinMaxIntrinsics::dTest); - System.out.println("PASS"); + for (int i = 0 ; i < 10_000 ; i++) { + Arrays.stream(f_cases).forEach(TestFpMinMaxIntrinsics::fTest); + Arrays.stream(d_cases).forEach(TestFpMinMaxIntrinsics::dTest); + } } } --- /dev/null 2019-01-21 08:42:35.435473376 +0530 +++ new/test/micro/org/openjdk/bench/java/math/FpMinMaxBenchmark.java 2019-02-16 20:56:37.109822540 +0530 @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2014, Oracle America, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * * Neither the name of Oracle nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +package org.openjdk.bench.java.math; + +import java.util.Random; +import java.util.concurrent.TimeUnit; +import org.openjdk.jmh.annotations.*; +import org.openjdk.jmh.infra.Blackhole; + +@OutputTimeUnit(TimeUnit.MICROSECONDS) +@State(Scope.Thread) +public class FpMaxMinBenchmark { + + public final int TESTSIZE = 2048; + + public float[] FargV1; + public float[] FargV2; + public double[] DargV1; + public double[] DargV2; + + public final float[] FspecialVals = { + 0.0f, -0.0f, Float.NaN, Float.NEGATIVE_INFINITY, Float.POSITIVE_INFINITY}; + + public final double[] DspecialVals = { + 0.0, -0.0, Double.NaN, Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY}; + + @Setup(Level.Trial) + public void BmSetup() { + Random r = new Random(); + FargV1 = new float[TESTSIZE]; + FargV2 = new float[TESTSIZE]; + DargV1 = new double[TESTSIZE]; + DargV2 = new double[TESTSIZE]; + + for (int i = 0; i < TESTSIZE; i++) { + FargV1[i] = r.nextFloat() * 100f; + FargV2[i] = r.nextFloat() * 100f; + DargV1[i] = r.nextDouble() * 100f; + DargV2[i] = r.nextDouble() * 100f; + } + + for (int i = 0, j = 0; i < TESTSIZE; i += 100) { + FargV1[i] = FspecialVals[j++ % FspecialVals.length]; + DargV1[i] = DspecialVals[j++ % DspecialVals.length]; + } + + for (int i = 50, j = 0; i < TESTSIZE; i += 50) { + FargV2[i] = FspecialVals[j++ % FspecialVals.length]; + DargV2[i] = DspecialVals[j++ % DspecialVals.length]; + } + } + + @Benchmark + @OperationsPerInvocation(TESTSIZE *TESTSIZE) + public void testMaxF(Blackhole bh) { + float Res = 0.0f; + for (int i = 0; i < TESTSIZE * TESTSIZE; i++) + Res = Math.max(FargV1[i % TESTSIZE], FargV2[i % TESTSIZE]); + bh.consume(Res); + } + + @Benchmark + @OperationsPerInvocation(TESTSIZE *TESTSIZE) + public void testMaxD(Blackhole bh) { + double Res = 0.0; + for (int i = 0; i < TESTSIZE * TESTSIZE; i++) + Res = Math.max(DargV1[i % TESTSIZE], DargV2[i % TESTSIZE]); + bh.consume(Res); + } + + @Benchmark + @OperationsPerInvocation(TESTSIZE *TESTSIZE) + public void testMinF(Blackhole bh) { + float Res = 0.0f; + for (int i = 0; i < TESTSIZE * TESTSIZE; i++) + Res = Math.min(FargV1[i % TESTSIZE], FargV2[i % TESTSIZE]); + bh.consume(Res); + } + + @Benchmark + @OperationsPerInvocation(TESTSIZE *TESTSIZE) + public void testMinD(Blackhole bh) { + double Res = 0.0; + for (int i = 0; i < TESTSIZE * TESTSIZE; i++) + Res = Math.min(DargV1[i % TESTSIZE], DargV2[i % TESTSIZE]); + bh.consume(Res); + } +}