1 /* 2 * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. Oracle designates this 8 * particular file as subject to the "Classpath" exception as provided 9 * by Oracle in the LICENSE file that accompanied this code. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have 23 * questions. 24 */ 25 package jdk.incubator.vector; 26 27 import java.nio.ByteBuffer; 28 import java.nio.IntBuffer; 29 import java.nio.ByteOrder; 30 import java.util.Arrays; 31 import java.util.Objects; 32 import java.util.function.BinaryOperator; 33 import java.util.function.IntUnaryOperator; 34 import java.util.function.Function; 35 import java.util.function.UnaryOperator; 36 import java.util.concurrent.ThreadLocalRandom; 37 38 import jdk.internal.misc.Unsafe; 39 import jdk.internal.vm.annotation.ForceInline; 40 41 import static jdk.incubator.vector.VectorIntrinsics.*; 42 import static jdk.incubator.vector.VectorOperators.*; 43 44 // -- This file was mechanically generated: Do not edit! -- // 45 46 /** 47 * A specialized {@link Vector} representing an ordered immutable sequence of 48 * {@code int} values. 49 */ 50 @SuppressWarnings("cast") // warning: redundant cast 51 public abstract class IntVector extends AbstractVector<Integer> { 52 53 IntVector() {} 54 55 static final int FORBID_OPCODE_KIND = VO_ONLYFP; 56 57 @ForceInline 58 static int opCode(Operator op) { 59 return VectorOperators.opCode(op, VO_OPCODE_VALID, FORBID_OPCODE_KIND); 60 } 61 @ForceInline 62 static int opCode(Operator op, int requireKind) { 63 requireKind |= VO_OPCODE_VALID; 64 return VectorOperators.opCode(op, requireKind, FORBID_OPCODE_KIND); 65 } 66 @ForceInline 67 static boolean opKind(Operator op, int bit) { 68 return VectorOperators.opKind(op, bit); 69 } 70 71 // Virtualized factories and operators, 72 // coded with portable definitions. 73 // These are all @ForceInline in case 74 // they need to be used performantly. 75 // The various shape-specific subclasses 76 // also specialize them by wrapping 77 // them in a call like this: 78 // return (Byte128Vector) 79 // super.bOp((Byte128Vector) o); 80 // The purpose of that is to forcibly inline 81 // the generic definition from this file 82 // into a sharply type- and size-specific 83 // wrapper in the subclass file, so that 84 // the JIT can specialize the code. 85 // The code is only inlined and expanded 86 // if it gets hot. Think of it as a cheap 87 // and lazy version of C++ templates. 88 89 // Virtualized getter 90 91 /*package-private*/ 92 abstract int[] getElements(); 93 94 // Virtualized constructors 95 96 /** 97 * Build a vector directly using my own constructor. 98 * It is an error if the array is aliased elsewhere. 99 */ 100 /*package-private*/ 101 abstract IntVector vectorFactory(int[] vec); 102 103 /** 104 * Build a mask directly using my species. 105 * It is an error if the array is aliased elsewhere. 106 */ 107 /*package-private*/ 108 @ForceInline 109 final 110 AbstractMask<Integer> maskFactory(boolean[] bits) { 111 return vspecies().maskFactory(bits); 112 } 113 114 // Constant loader (takes dummy as vector arg) 115 interface FVOp { 116 int apply(int i); 117 } 118 119 /*package-private*/ 120 @ForceInline 121 final 122 IntVector vOp(FVOp f) { 123 int[] res = new int[length()]; 124 for (int i = 0; i < res.length; i++) { 125 res[i] = f.apply(i); 126 } 127 return vectorFactory(res); 128 } 129 130 @ForceInline 131 final 132 IntVector vOp(VectorMask<Integer> m, FVOp f) { 133 int[] res = new int[length()]; 134 boolean[] mbits = ((AbstractMask<Integer>)m).getBits(); 135 for (int i = 0; i < res.length; i++) { 136 if (mbits[i]) { 137 res[i] = f.apply(i); 138 } 139 } 140 return vectorFactory(res); 141 } 142 143 // Unary operator 144 145 /*package-private*/ 146 interface FUnOp { 147 int apply(int i, int a); 148 } 149 150 /*package-private*/ 151 abstract 152 IntVector uOp(FUnOp f); 153 @ForceInline 154 final 155 IntVector uOpTemplate(FUnOp f) { 156 int[] vec = getElements(); 157 int[] res = new int[length()]; 158 for (int i = 0; i < res.length; i++) { 159 res[i] = f.apply(i, vec[i]); 160 } 161 return vectorFactory(res); 162 } 163 164 /*package-private*/ 165 abstract 166 IntVector uOp(VectorMask<Integer> m, 167 FUnOp f); 168 @ForceInline 169 final 170 IntVector uOpTemplate(VectorMask<Integer> m, 171 FUnOp f) { 172 int[] vec = getElements(); 173 int[] res = new int[length()]; 174 boolean[] mbits = ((AbstractMask<Integer>)m).getBits(); 175 for (int i = 0; i < res.length; i++) { 176 res[i] = mbits[i] ? f.apply(i, vec[i]) : vec[i]; 177 } 178 return vectorFactory(res); 179 } 180 181 // Binary operator 182 183 /*package-private*/ 184 interface FBinOp { 185 int apply(int i, int a, int b); 186 } 187 188 /*package-private*/ 189 abstract 190 IntVector bOp(Vector<Integer> o, 191 FBinOp f); 192 @ForceInline 193 final 194 IntVector bOpTemplate(Vector<Integer> o, 195 FBinOp f) { 196 int[] res = new int[length()]; 197 int[] vec1 = this.getElements(); 198 int[] vec2 = ((IntVector)o).getElements(); 199 for (int i = 0; i < res.length; i++) { 200 res[i] = f.apply(i, vec1[i], vec2[i]); 201 } 202 return vectorFactory(res); 203 } 204 205 /*package-private*/ 206 abstract 207 IntVector bOp(Vector<Integer> o, 208 VectorMask<Integer> m, 209 FBinOp f); 210 @ForceInline 211 final 212 IntVector bOpTemplate(Vector<Integer> o, 213 VectorMask<Integer> m, 214 FBinOp f) { 215 int[] res = new int[length()]; 216 int[] vec1 = this.getElements(); 217 int[] vec2 = ((IntVector)o).getElements(); 218 boolean[] mbits = ((AbstractMask<Integer>)m).getBits(); 219 for (int i = 0; i < res.length; i++) { 220 res[i] = mbits[i] ? f.apply(i, vec1[i], vec2[i]) : vec1[i]; 221 } 222 return vectorFactory(res); 223 } 224 225 // Ternary operator 226 227 /*package-private*/ 228 interface FTriOp { 229 int apply(int i, int a, int b, int c); 230 } 231 232 /*package-private*/ 233 abstract 234 IntVector tOp(Vector<Integer> o1, 235 Vector<Integer> o2, 236 FTriOp f); 237 @ForceInline 238 final 239 IntVector tOpTemplate(Vector<Integer> o1, 240 Vector<Integer> o2, 241 FTriOp f) { 242 int[] res = new int[length()]; 243 int[] vec1 = this.getElements(); 244 int[] vec2 = ((IntVector)o1).getElements(); 245 int[] vec3 = ((IntVector)o2).getElements(); 246 for (int i = 0; i < res.length; i++) { 247 res[i] = f.apply(i, vec1[i], vec2[i], vec3[i]); 248 } 249 return vectorFactory(res); 250 } 251 252 /*package-private*/ 253 abstract 254 IntVector tOp(Vector<Integer> o1, 255 Vector<Integer> o2, 256 VectorMask<Integer> m, 257 FTriOp f); 258 @ForceInline 259 final 260 IntVector tOpTemplate(Vector<Integer> o1, 261 Vector<Integer> o2, 262 VectorMask<Integer> m, 263 FTriOp f) { 264 int[] res = new int[length()]; 265 int[] vec1 = this.getElements(); 266 int[] vec2 = ((IntVector)o1).getElements(); 267 int[] vec3 = ((IntVector)o2).getElements(); 268 boolean[] mbits = ((AbstractMask<Integer>)m).getBits(); 269 for (int i = 0; i < res.length; i++) { 270 res[i] = mbits[i] ? f.apply(i, vec1[i], vec2[i], vec3[i]) : vec1[i]; 271 } 272 return vectorFactory(res); 273 } 274 275 // Reduction operator 276 277 /*package-private*/ 278 abstract 279 int rOp(int v, FBinOp f); 280 @ForceInline 281 final 282 int rOpTemplate(int v, FBinOp f) { 283 int[] vec = getElements(); 284 for (int i = 0; i < vec.length; i++) { 285 v = f.apply(i, v, vec[i]); 286 } 287 return v; 288 } 289 290 // Memory reference 291 292 /*package-private*/ 293 interface FLdOp<M> { 294 int apply(M memory, int offset, int i); 295 } 296 297 /*package-private*/ 298 @ForceInline 299 final 300 <M> IntVector ldOp(M memory, int offset, 301 FLdOp<M> f) { 302 //dummy; no vec = getElements(); 303 int[] res = new int[length()]; 304 for (int i = 0; i < res.length; i++) { 305 res[i] = f.apply(memory, offset, i); 306 } 307 return vectorFactory(res); 308 } 309 310 /*package-private*/ 311 @ForceInline 312 final 313 <M> IntVector ldOp(M memory, int offset, 314 VectorMask<Integer> m, 315 FLdOp<M> f) { 316 //int[] vec = getElements(); 317 int[] res = new int[length()]; 318 boolean[] mbits = ((AbstractMask<Integer>)m).getBits(); 319 for (int i = 0; i < res.length; i++) { 320 if (mbits[i]) { 321 res[i] = f.apply(memory, offset, i); 322 } 323 } 324 return vectorFactory(res); 325 } 326 327 interface FStOp<M> { 328 void apply(M memory, int offset, int i, int a); 329 } 330 331 /*package-private*/ 332 @ForceInline 333 final 334 <M> void stOp(M memory, int offset, 335 FStOp<M> f) { 336 int[] vec = getElements(); 337 for (int i = 0; i < vec.length; i++) { 338 f.apply(memory, offset, i, vec[i]); 339 } 340 } 341 342 /*package-private*/ 343 @ForceInline 344 final 345 <M> void stOp(M memory, int offset, 346 VectorMask<Integer> m, 347 FStOp<M> f) { 348 int[] vec = getElements(); 349 boolean[] mbits = ((AbstractMask<Integer>)m).getBits(); 350 for (int i = 0; i < vec.length; i++) { 351 if (mbits[i]) { 352 f.apply(memory, offset, i, vec[i]); 353 } 354 } 355 } 356 357 // Binary test 358 359 /*package-private*/ 360 interface FBinTest { 361 boolean apply(int cond, int i, int a, int b); 362 } 363 364 /*package-private*/ 365 @ForceInline 366 final 367 AbstractMask<Integer> bTest(int cond, 368 Vector<Integer> o, 369 FBinTest f) { 370 int[] vec1 = getElements(); 371 int[] vec2 = ((IntVector)o).getElements(); 372 boolean[] bits = new boolean[length()]; 373 for (int i = 0; i < length(); i++){ 374 bits[i] = f.apply(cond, i, vec1[i], vec2[i]); 375 } 376 return maskFactory(bits); 377 } 378 379 /*package-private*/ 380 @ForceInline 381 static boolean doBinTest(int cond, int a, int b) { 382 switch (cond) { 383 case BT_eq: return a == b; 384 case BT_ne: return a != b; 385 case BT_lt: return a < b; 386 case BT_le: return a <= b; 387 case BT_gt: return a > b; 388 case BT_ge: return a >= b; 389 } 390 throw new AssertionError(Integer.toHexString(cond)); 391 } 392 393 /*package-private*/ 394 @Override 395 abstract IntSpecies vspecies(); 396 397 /*package-private*/ 398 @ForceInline 399 static long toBits(int e) { 400 return e; 401 } 402 403 /*package-private*/ 404 @ForceInline 405 static int fromBits(long bits) { 406 return ((int)bits); 407 } 408 409 // Static factories (other than memory operations) 410 411 // Note: A surprising behavior in javadoc 412 // sometimes makes a lone /** {@inheritDoc} */ 413 // comment drop the method altogether, 414 // apparently if the method mentions an 415 // parameter or return type of Vector<Integer> 416 // instead of Vector<E> as originally specified. 417 // Adding an empty HTML fragment appears to 418 // nudge javadoc into providing the desired 419 // inherited documentation. We use the HTML 420 // comment <!--workaround--> for this. 421 422 /** 423 * {@inheritDoc} <!--workaround--> 424 */ 425 @ForceInline 426 public static IntVector zero(VectorSpecies<Integer> species) { 427 IntSpecies vsp = (IntSpecies) species; 428 return VectorIntrinsics.broadcastCoerced(vsp.vectorType(), int.class, species.length(), 429 0, vsp, 430 ((bits_, s_) -> s_.rvOp(i -> bits_))); 431 } 432 433 /** 434 * Returns a vector of the same species as this one 435 * where all lane elements are set to 436 * the primitive value {@code e}. 437 * 438 * The contents of the current vector are discarded; 439 * only the species is relevant to this operation. 440 * 441 * <p> This method returns the value of this expression: 442 * {@code IntVector.broadcast(this.species(), e)}. 443 * 444 * @apiNote 445 * Unlike the similar method named {@code broadcast()} 446 * in the supertype {@code Vector}, this method does not 447 * need to validate its argument, and cannot throw 448 * {@code IllegalArgumentException}. This method is 449 * therefore preferable to the supertype method. 450 * 451 * @param e the value to broadcast 452 * @return a vector where all lane elements are set to 453 * the primitive value {@code e} 454 * @see #broadcast(VectorSpecies,long) 455 * @see Vector#broadcast(long) 456 * @see VectorSpecies#broadcast(long) 457 */ 458 public abstract IntVector broadcast(int e); 459 460 /** 461 * Returns a vector of the given species 462 * where all lane elements are set to 463 * the primitive value {@code e}. 464 * 465 * @param species species of the desired vector 466 * @param e the value to broadcast 467 * @return a vector where all lane elements are set to 468 * the primitive value {@code e} 469 * @see #broadcast(long) 470 * @see Vector#broadcast(long) 471 * @see VectorSpecies#broadcast(long) 472 */ 473 public static IntVector broadcast(VectorSpecies<Integer> species, int e) { 474 IntSpecies vsp = (IntSpecies) species; 475 return vsp.broadcast(e); 476 } 477 478 /*package-private*/ 479 @ForceInline 480 final IntVector broadcastTemplate(int e) { 481 IntSpecies vsp = vspecies(); 482 return vsp.broadcast(e); 483 } 484 485 /** 486 * {@inheritDoc} <!--workaround--> 487 * @apiNote 488 * When working with vector subtypes like {@code IntVector}, 489 * {@linkplain #broadcast(int) the more strongly typed method} 490 * is typically selected. It can be explicitly selected 491 * using a cast: {@code v.broadcast((int)e)}. 492 * The two expressions will produce numerically identical results. 493 */ 494 @Override 495 public abstract IntVector broadcast(long e); 496 497 /** 498 * Returns a vector of the given species 499 * where all lane elements are set to 500 * the primitive value {@code e}. 501 * 502 * The {@code long} value must be accurately representable 503 * by the {@code ETYPE} of the vector species, so that 504 * {@code e==(long)(ETYPE)e}. 505 * 506 * @param species species of the desired vector 507 * @param e the value to broadcast 508 * @return a vector where all lane elements are set to 509 * the primitive value {@code e} 510 * @throws IllegalArgumentException 511 * if the given {@code long} value cannot 512 * be represented by the vector's {@code ETYPE} 513 * @see #broadcast(VectorSpecies,int) 514 * @see VectorSpecies#checkValue(VectorSpecies,int) 515 */ 516 public static IntVector broadcast(VectorSpecies<Integer> species, long e) { 517 IntSpecies vsp = (IntSpecies) species; 518 return vsp.broadcast(e); 519 } 520 521 /*package-private*/ 522 @ForceInline 523 final IntVector broadcastTemplate(long e) { 524 return vspecies().broadcast(e); 525 } 526 527 /** 528 * Returns a vector where each lane element is set to given 529 * primitive values. 530 * <p> 531 * For each vector lane, where {@code N} is the vector lane index, the 532 * the primitive value at index {@code N} is placed into the resulting 533 * vector at lane index {@code N}. 534 * 535 * @param species species of the desired vector 536 * @param es the given primitive values 537 * @return a vector where each lane element is set to given primitive 538 * values 539 * @throws IllegalArgumentException 540 * if {@code es.length != species.length()} 541 */ 542 @ForceInline 543 @SuppressWarnings("unchecked") 544 public static IntVector fromValues(VectorSpecies<Integer> species, int... es) { 545 IntSpecies vsp = (IntSpecies) species; 546 int vlength = vsp.laneCount(); 547 VectorIntrinsics.requireLength(es.length, vlength); 548 // Get an unaliased copy and use it directly: 549 return vsp.vectorFactory(Arrays.copyOf(es, vlength)); 550 } 551 552 /** 553 * Returns a vector where the first lane element is set to the primtive 554 * value {@code e}, all other lane elements are set to the default 555 * value. 556 * 557 * @param species species of the desired vector 558 * @param e the value 559 * @return a vector where the first lane element is set to the primitive 560 * value {@code e} 561 */ 562 // FIXME: Does this carry its weight? 563 @ForceInline 564 public static IntVector single(VectorSpecies<Integer> species, int e) { 565 return zero(species).withLane(0, e); 566 } 567 568 /** 569 * Returns a vector where each lane element is set to a randomly 570 * generated primitive value. 571 * 572 * The semantics are equivalent to calling 573 * {@link ThreadLocalRandom#nextInt()} 574 * for each lane, from first to last. 575 * 576 * @param species species of the desired vector 577 * @return a vector where each lane elements is set to a randomly 578 * generated primitive value 579 */ 580 public static IntVector random(VectorSpecies<Integer> species) { 581 IntSpecies vsp = (IntSpecies) species; 582 ThreadLocalRandom r = ThreadLocalRandom.current(); 583 return vsp.vOp(i -> nextRandom(r)); 584 } 585 private static int nextRandom(ThreadLocalRandom r) { 586 return r.nextInt(); 587 } 588 589 // Unary lanewise support 590 591 /** 592 * {@inheritDoc} <!--workaround--> 593 */ 594 public abstract 595 IntVector lanewise(VectorOperators.Unary op); 596 597 @ForceInline 598 final 599 IntVector lanewiseTemplate(VectorOperators.Unary op) { 600 if (opKind(op, VO_SPECIAL)) { 601 if (op == ZOMO) { 602 return blend(broadcast(-1), compare(NE, 0)); 603 } 604 if (op == NEG) { 605 // FIXME: Support this in the JIT. 606 return broadcast(0).lanewiseTemplate(SUB, this); 607 } 608 } 609 int opc = opCode(op); 610 return VectorIntrinsics.unaryOp( 611 opc, getClass(), int.class, length(), 612 this, 613 UN_IMPL.find(op, opc, (opc_) -> { 614 switch (opc_) { 615 case VECTOR_OP_NEG: return v0 -> 616 v0.uOp((i, a) -> (int) -a); 617 case VECTOR_OP_ABS: return v0 -> 618 v0.uOp((i, a) -> (int) Math.abs(a)); 619 case VECTOR_OP_NOT: return v0 -> 620 v0.uOp((i, a) -> (int) ~a); 621 default: return null; 622 }})); 623 } 624 private static final 625 ImplCache<Unary,UnaryOperator<IntVector>> UN_IMPL 626 = new ImplCache<>(Unary.class, IntVector.class); 627 628 /** 629 * {@inheritDoc} <!--workaround--> 630 */ 631 @ForceInline 632 public final 633 IntVector lanewise(VectorOperators.Unary op, 634 VectorMask<Integer> m) { 635 return blend(lanewise(op), m); 636 } 637 638 // Binary lanewise support 639 640 /** 641 * {@inheritDoc} <!--workaround--> 642 * @see #lanewise(VectorOperators.Binary,int) 643 * @see #lanewise(VectorOperators.Binary,int,VectorMask) 644 */ 645 @Override 646 public abstract 647 IntVector lanewise(VectorOperators.Binary op, 648 Vector<Integer> v); 649 @ForceInline 650 final 651 IntVector lanewiseTemplate(VectorOperators.Binary op, 652 Vector<Integer> v) { 653 IntVector that = (IntVector) v; 654 that.check(this); 655 if (opKind(op, VO_SPECIAL | VO_SHIFT)) { 656 if (op == FIRST_NONZERO) { 657 // FIXME: Support this in the JIT. 658 VectorMask<Integer> thisNZ 659 = this.viewAsIntegralLanes().compare(NE, (int) 0); 660 that = that.blend((int) 0, thisNZ.cast(vspecies())); 661 op = OR_UNCHECKED; 662 } 663 if (opKind(op, VO_SHIFT)) { 664 // As per shift specification for Java, mask the shift count. 665 // This allows the JIT to ignore some ISA details. 666 that = that.lanewise(AND, SHIFT_MASK); 667 } 668 if (op == ROR || op == ROL) { // FIXME: JIT should do this 669 IntVector neg = that.lanewise(NEG); 670 IntVector hi = this.lanewise(LSHL, (op == ROR) ? neg : that); 671 IntVector lo = this.lanewise(LSHR, (op == ROR) ? that : neg); 672 return hi.lanewise(OR, lo); 673 } else if (op == ANDC2) { 674 // FIXME: Support this in the JIT. 675 that = that.lanewise(NOT); 676 op = AND; 677 } else if (op == DIV) { 678 VectorMask<Integer> eqz = that.eq((int)0); 679 if (eqz.anyTrue()) { 680 throw that.divZeroException(); 681 } 682 } 683 } 684 int opc = opCode(op); 685 return VectorIntrinsics.binaryOp( 686 opc, getClass(), int.class, length(), 687 this, that, 688 BIN_IMPL.find(op, opc, (opc_) -> { 689 switch (opc_) { 690 case VECTOR_OP_ADD: return (v0, v1) -> 691 v0.bOp(v1, (i, a, b) -> (int)(a + b)); 692 case VECTOR_OP_SUB: return (v0, v1) -> 693 v0.bOp(v1, (i, a, b) -> (int)(a - b)); 694 case VECTOR_OP_MUL: return (v0, v1) -> 695 v0.bOp(v1, (i, a, b) -> (int)(a * b)); 696 case VECTOR_OP_DIV: return (v0, v1) -> 697 v0.bOp(v1, (i, a, b) -> (int)(a / b)); 698 case VECTOR_OP_MAX: return (v0, v1) -> 699 v0.bOp(v1, (i, a, b) -> (int)Math.max(a, b)); 700 case VECTOR_OP_MIN: return (v0, v1) -> 701 v0.bOp(v1, (i, a, b) -> (int)Math.min(a, b)); 702 case VECTOR_OP_FIRST_NONZERO: return (v0, v1) -> 703 v0.bOp(v1, (i, a, b) -> toBits(a) != 0 ? a : b); 704 case VECTOR_OP_AND: return (v0, v1) -> 705 v0.bOp(v1, (i, a, b) -> (int)(a & b)); 706 case VECTOR_OP_OR: return (v0, v1) -> 707 v0.bOp(v1, (i, a, b) -> (int)(a | b)); 708 case VECTOR_OP_ANDC2: return (v0, v1) -> 709 v0.bOp(v1, (i, a, b) -> (int)(a & ~b)); 710 case VECTOR_OP_XOR: return (v0, v1) -> 711 v0.bOp(v1, (i, a, b) -> (int)(a ^ b)); 712 case VECTOR_OP_LSHIFT: return (v0, v1) -> 713 v0.bOp(v1, (i, a, n) -> (int)(a << n)); 714 case VECTOR_OP_RSHIFT: return (v0, v1) -> 715 v0.bOp(v1, (i, a, n) -> (int)(a >> n)); 716 case VECTOR_OP_URSHIFT: return (v0, v1) -> 717 v0.bOp(v1, (i, a, n) -> (int)((a & LSHR_SETUP_MASK) >>> n)); 718 case VECTOR_OP_LROTATE: return (v0, v1) -> 719 v0.bOp(v1, (i, a, n) -> (int)((a << n)|(a >> -n))); 720 case VECTOR_OP_RROTATE: return (v0, v1) -> 721 v0.bOp(v1, (i, a, n) -> (int)((a >> n)|(a << -n))); 722 default: return null; 723 }})); 724 } 725 private static final 726 ImplCache<Binary,BinaryOperator<IntVector>> BIN_IMPL 727 = new ImplCache<>(Binary.class, IntVector.class); 728 729 /** 730 * {@inheritDoc} <!--workaround--> 731 * @see #lanewise(VectorOperators.Binary,int,VectorMask) 732 */ 733 @ForceInline 734 public final 735 IntVector lanewise(VectorOperators.Binary op, 736 Vector<Integer> v, 737 VectorMask<Integer> m) { 738 IntVector that = (IntVector) v; 739 if (op == DIV) { 740 // suppress div/0 exceptions in unset lanes 741 that = that.lanewise(NOT, that.eq((int)0)); 742 return blend(lanewise(DIV, that), m); 743 } 744 return blend(lanewise(op, v), m); 745 } 746 // FIXME: Maybe all of the public final methods in this file (the 747 // simple ones that just call lanewise) should be pushed down to 748 // the X-VectorBits template. They can't optimize properly at 749 // this level, and must rely on inlining. Does it work? 750 // (If it works, of course keep the code here.) 751 752 /** 753 * Combines the lane values of this vector 754 * with the value of a broadcast scalar. 755 * 756 * This is a lane-wise binary operation which applies 757 * the selected operation to each lane. 758 * The return value will be equal to this expression: 759 * {@code this.lanewise(op, this.broadcast(e))}. 760 * 761 * @param e the input scalar 762 * @return the result of applying the operation lane-wise 763 * to the two input vectors 764 * @throws UnsupportedOperationException if this vector does 765 * not support the requested operation 766 * @see #lanewise(VectorOperators.Binary,Vector) 767 * @see #lanewise(VectorOperators.Binary,int,VectorMask) 768 */ 769 @ForceInline 770 public final 771 IntVector lanewise(VectorOperators.Binary op, 772 int e) { 773 int opc = opCode(op); 774 if (opKind(op, VO_SHIFT) && (int)(int)e == e) { 775 return lanewiseShift(op, (int) e); 776 } 777 if (op == ANDC2) { 778 op = AND; e = (int) ~e; 779 } 780 return lanewise(op, broadcast(e)); 781 } 782 783 /** 784 * Combines the lane values of this vector 785 * with the value of a broadcast scalar, 786 * with selection of lane elements controlled by a mask. 787 * 788 * This is a masked lane-wise binary operation which applies 789 * the selected operation to each lane. 790 * The return value will be equal to this expression: 791 * {@code this.lanewise(op, this.broadcast(e), m)}. 792 * 793 * @param e the input scalar 794 * @param m the mask controlling lane selection 795 * @return the result of applying the operation lane-wise 796 * to the input vector and the scalar 797 * @throws UnsupportedOperationException if this vector does 798 * not support the requested operation 799 * @see #lanewise(VectorOperators.Binary,Vector,VectorMask) 800 * @see #lanewise(VectorOperators.Binary,int) 801 */ 802 @ForceInline 803 public final 804 IntVector lanewise(VectorOperators.Binary op, 805 int e, 806 VectorMask<Integer> m) { 807 return blend(lanewise(op, e), m); 808 } 809 810 /** 811 * {@inheritDoc} <!--workaround--> 812 * @apiNote 813 * When working with vector subtypes like {@code IntVector}, 814 * {@linkplain #lanewise(VectorOperators.Binary,int) 815 * the more strongly typed method} 816 * is typically selected. It can be explicitly selected 817 * using a cast: {@code v.lanewise(op,(int)e)}. 818 * The two expressions will produce numerically identical results. 819 */ 820 @ForceInline 821 public final 822 IntVector lanewise(VectorOperators.Binary op, 823 long e) { 824 int e1 = (int) e; 825 if ((long)e1 != e 826 // allow shift ops to clip down their int parameters 827 && !(opKind(op, VO_SHIFT) && (int)e1 == e) 828 ) { 829 vspecies().checkValue(e); // for exception 830 } 831 return lanewise(op, e1); 832 } 833 834 /** 835 * {@inheritDoc} <!--workaround--> 836 * @apiNote 837 * When working with vector subtypes like {@code IntVector}, 838 * {@linkplain #lanewise(VectorOperators.Binary,int,VectorMask) 839 * the more strongly typed method} 840 * is typically selected. It can be explicitly selected 841 * using a cast: {@code v.lanewise(op,(int)e,m)}. 842 * The two expressions will produce numerically identical results. 843 */ 844 @ForceInline 845 public final 846 IntVector lanewise(VectorOperators.Binary op, 847 long e, VectorMask<Integer> m) { 848 return blend(lanewise(op, e), m); 849 } 850 851 /*package-private*/ 852 abstract IntVector 853 lanewiseShift(VectorOperators.Binary op, int e); 854 855 /*package-private*/ 856 @ForceInline 857 final IntVector 858 lanewiseShiftTemplate(VectorOperators.Binary op, int e) { 859 // Special handling for these. FIXME: Refactor? 860 int opc = opCode(op); 861 assert(opKind(op, VO_SHIFT)); 862 // As per shift specification for Java, mask the shift count. 863 e &= SHIFT_MASK; 864 if (op == ROR || op == ROL) { // FIXME: JIT should do this 865 IntVector hi = this.lanewise(LSHL, (op == ROR) ? -e : e); 866 IntVector lo = this.lanewise(LSHR, (op == ROR) ? e : -e); 867 return hi.lanewise(OR, lo); 868 } 869 return VectorIntrinsics.broadcastInt( 870 opc, getClass(), int.class, length(), 871 this, e, 872 BIN_INT_IMPL.find(op, opc, (opc_) -> { 873 switch (opc_) { 874 case VECTOR_OP_LSHIFT: return (v, n) -> 875 v.uOp((i, a) -> (int)(a << n)); 876 case VECTOR_OP_RSHIFT: return (v, n) -> 877 v.uOp((i, a) -> (int)(a >> n)); 878 case VECTOR_OP_URSHIFT: return (v, n) -> 879 v.uOp((i, a) -> (int)((a & LSHR_SETUP_MASK) >>> n)); 880 case VECTOR_OP_LROTATE: return (v, n) -> 881 v.uOp((i, a) -> (int)((a << n)|(a >> -n))); 882 case VECTOR_OP_RROTATE: return (v, n) -> 883 v.uOp((i, a) -> (int)((a >> n)|(a << -n))); 884 default: return null; 885 }})); 886 } 887 private static final 888 ImplCache<Binary,VectorBroadcastIntOp<IntVector>> BIN_INT_IMPL 889 = new ImplCache<>(Binary.class, IntVector.class); 890 891 // As per shift specification for Java, mask the shift count. 892 // We mask 0X3F (long), 0X1F (int), 0x0F (short), 0x7 (byte). 893 // The latter two maskings go beyond the JLS, but seem reasonable 894 // since our lane types are first-class types, not just dressed 895 // up ints. 896 private static final int SHIFT_MASK = (Integer.SIZE - 1); 897 private static final int LSHR_SETUP_MASK = -1; 898 899 // Ternary lanewise support 900 901 // Ternary operators come in eight variations: 902 // lanewise(op, [broadcast(e1)|v1], [broadcast(e2)|v2]) 903 // lanewise(op, [broadcast(e1)|v1], [broadcast(e2)|v2], mask) 904 905 // It is annoying to support all of these variations of masking 906 // and broadcast, but it would be more surprising not to continue 907 // the obvious pattern started by unary and binary. 908 909 /** 910 * {@inheritDoc} <!--workaround--> 911 * @see #lanewise(VectorOperators.Ternary,int,int,VectorMask) 912 * @see #lanewise(VectorOperators.Ternary,Vector,int,VectorMask) 913 * @see #lanewise(VectorOperators.Ternary,int,Vector,VectorMask) 914 * @see #lanewise(VectorOperators.Ternary,int,int) 915 * @see #lanewise(VectorOperators.Ternary,Vector,int) 916 * @see #lanewise(VectorOperators.Ternary,int,Vector) 917 */ 918 @Override 919 public abstract 920 IntVector lanewise(VectorOperators.Ternary op, 921 Vector<Integer> v1, 922 Vector<Integer> v2); 923 @ForceInline 924 final 925 IntVector lanewiseTemplate(VectorOperators.Ternary op, 926 Vector<Integer> v1, 927 Vector<Integer> v2) { 928 IntVector that = (IntVector) v1; 929 IntVector tother = (IntVector) v2; 930 // It's a word: https://www.dictionary.com/browse/tother 931 // See also Chapter 11 of Dickens, Our Mutual Friend: 932 // "Totherest Governor," replied Mr Riderhood... 933 that.check(this); 934 tother.check(this); 935 if (op == BITWISE_BLEND) { 936 // FIXME: Support this in the JIT. 937 that = this.lanewise(XOR, that).lanewise(AND, tother); 938 return this.lanewise(XOR, that); 939 } 940 int opc = opCode(op); 941 return VectorIntrinsics.ternaryOp( 942 opc, getClass(), int.class, length(), 943 this, that, tother, 944 TERN_IMPL.find(op, opc, (opc_) -> { 945 switch (opc_) { 946 case VECTOR_OP_BITWISE_BLEND: return (v0, v1_, v2_) -> 947 v0.tOp(v1_, v2_, (i, a, b, c) -> (int)(a^((a^b)&c))); 948 default: return null; 949 }})); 950 } 951 private static final 952 ImplCache<Ternary,TernaryOperation<IntVector>> TERN_IMPL 953 = new ImplCache<>(Ternary.class, IntVector.class); 954 955 /** 956 * {@inheritDoc} <!--workaround--> 957 * @see #lanewise(VectorOperators.Ternary,int,int,VectorMask) 958 * @see #lanewise(VectorOperators.Ternary,Vector,int,VectorMask) 959 * @see #lanewise(VectorOperators.Ternary,int,Vector,VectorMask) 960 */ 961 @ForceInline 962 public final 963 IntVector lanewise(VectorOperators.Ternary op, 964 Vector<Integer> v1, 965 Vector<Integer> v2, 966 VectorMask<Integer> m) { 967 return blend(lanewise(op, v1, v2), m); 968 } 969 970 /** 971 * Combines the lane values of this vector 972 * with the values of two broadcast scalars. 973 * 974 * This is a lane-wise ternary operation which applies 975 * the selected operation to each lane. 976 * The return value will be equal to this expression: 977 * {@code this.lanewise(op, this.broadcast(e1), this.broadcast(e2))}. 978 * 979 * @param e1 the first input scalar 980 * @param e2 the second input scalar 981 * @return the result of applying the operation lane-wise 982 * to the input vector and the scalars 983 * @throws UnsupportedOperationException if this vector does 984 * not support the requested operation 985 * @see #lanewise(VectorOperators.Ternary,Vector,Vector) 986 * @see #lanewise(VectorOperators.Ternary,int,int,VectorMask) 987 */ 988 @ForceInline 989 public final 990 IntVector lanewise(VectorOperators.Ternary op, //(op,e1,e2) 991 int e1, 992 int e2) { 993 return lanewise(op, broadcast(e1), broadcast(e1)); 994 } 995 996 /** 997 * Combines the lane values of this vector 998 * with the values of two broadcast scalars, 999 * with selection of lane elements controlled by a mask. 1000 * 1001 * This is a masked lane-wise ternary operation which applies 1002 * the selected operation to each lane. 1003 * The return value will be equal to this expression: 1004 * {@code this.lanewise(op, this.broadcast(e1), this.broadcast(e2), m)}. 1005 * 1006 * @param e1 the first input scalar 1007 * @param e2 the second input scalar 1008 * @param m the mask controlling lane selection 1009 * @return the result of applying the operation lane-wise 1010 * to the input vector and the scalars 1011 * @throws UnsupportedOperationException if this vector does 1012 * not support the requested operation 1013 * @see #lanewise(VectorOperators.Ternary,Vector,Vector,VectorMask) 1014 * @see #lanewise(VectorOperators.Ternary,int,int) 1015 */ 1016 @ForceInline 1017 public final 1018 IntVector lanewise(VectorOperators.Ternary op, //(op,e1,e2,m) 1019 int e1, 1020 int e2, 1021 VectorMask<Integer> m) { 1022 return blend(lanewise(op, e1, e2), m); 1023 } 1024 1025 /** 1026 * Combines the lane values of this vector 1027 * with the values of another vector and a broadcast scalar. 1028 * 1029 * This is a lane-wise ternary operation which applies 1030 * the selected operation to each lane. 1031 * The return value will be equal to this expression: 1032 * {@code this.lanewise(op, v1, this.broadcast(e2))}. 1033 * 1034 * @param v1 the other input vector 1035 * @param e2 the input scalar 1036 * @return the result of applying the operation lane-wise 1037 * to the input vectors and the scalar 1038 * @throws UnsupportedOperationException if this vector does 1039 * not support the requested operation 1040 * @see #lanewise(VectorOperators.Ternary,int,int) 1041 * @see #lanewise(VectorOperators.Ternary,Vector,int,VectorMask) 1042 */ 1043 @ForceInline 1044 public final 1045 IntVector lanewise(VectorOperators.Ternary op, //(op,v1,e2) 1046 Vector<Integer> v1, 1047 int e2) { 1048 return lanewise(op, v1, broadcast(e2)); 1049 } 1050 1051 /** 1052 * Combines the lane values of this vector 1053 * with the values of another vector and a broadcast scalar, 1054 * with selection of lane elements controlled by a mask. 1055 * 1056 * This is a masked lane-wise ternary operation which applies 1057 * the selected operation to each lane. 1058 * The return value will be equal to this expression: 1059 * {@code this.lanewise(op, v1, this.broadcast(e2), m)}. 1060 * 1061 * @param v1 the other input vector 1062 * @param e2 the input scalar 1063 * @param m the mask controlling lane selection 1064 * @return the result of applying the operation lane-wise 1065 * to the input vectors and the scalar 1066 * @throws UnsupportedOperationException if this vector does 1067 * not support the requested operation 1068 * @see #lanewise(VectorOperators.Ternary,Vector,Vector) 1069 * @see #lanewise(VectorOperators.Ternary,int,int,VectorMask) 1070 * @see #lanewise(VectorOperators.Ternary,Vector,int) 1071 */ 1072 @ForceInline 1073 public final 1074 IntVector lanewise(VectorOperators.Ternary op, //(op,v1,e2,m) 1075 Vector<Integer> v1, 1076 int e2, 1077 VectorMask<Integer> m) { 1078 return blend(lanewise(op, v1, e2), m); 1079 } 1080 1081 /** 1082 * Combines the lane values of this vector 1083 * with the values of another vector and a broadcast scalar. 1084 * 1085 * This is a lane-wise ternary operation which applies 1086 * the selected operation to each lane. 1087 * The return value will be equal to this expression: 1088 * {@code this.lanewise(op, this.broadcast(e1), v2)}. 1089 * 1090 * @param e1 the input scalar 1091 * @param v2 the other input vector 1092 * @return the result of applying the operation lane-wise 1093 * to the input vectors and the scalar 1094 * @throws UnsupportedOperationException if this vector does 1095 * not support the requested operation 1096 * @see #lanewise(VectorOperators.Ternary,Vector,Vector) 1097 * @see #lanewise(VectorOperators.Ternary,int,Vector,VectorMask) 1098 */ 1099 @ForceInline 1100 public final 1101 IntVector lanewise(VectorOperators.Ternary op, //(op,e1,v2) 1102 int e1, 1103 Vector<Integer> v2) { 1104 return lanewise(op, broadcast(e1), v2); 1105 } 1106 1107 /** 1108 * Combines the lane values of this vector 1109 * with the values of another vector and a broadcast scalar, 1110 * with selection of lane elements controlled by a mask. 1111 * 1112 * This is a masked lane-wise ternary operation which applies 1113 * the selected operation to each lane. 1114 * The return value will be equal to this expression: 1115 * {@code this.lanewise(op, this.broadcast(e1), v2, m)}. 1116 * 1117 * @param e1 the input scalar 1118 * @param v2 the other input vector 1119 * @param m the mask controlling lane selection 1120 * @return the result of applying the operation lane-wise 1121 * to the input vectors and the scalar 1122 * @throws UnsupportedOperationException if this vector does 1123 * not support the requested operation 1124 * @see #lanewise(VectorOperators.Ternary,Vector,Vector,VectorMask) 1125 * @see #lanewise(VectorOperators.Ternary,int,Vector) 1126 */ 1127 @ForceInline 1128 public final 1129 IntVector lanewise(VectorOperators.Ternary op, //(op,e1,v2,m) 1130 int e1, 1131 Vector<Integer> v2, 1132 VectorMask<Integer> m) { 1133 return blend(lanewise(op, e1, v2), m); 1134 } 1135 1136 // (Thus endeth the Great and Mighty Ternary Ogdoad.) 1137 // https://en.wikipedia.org/wiki/Ogdoad 1138 1139 /// FULL-SERVICE BINARY METHODS: ADD, SUB, MUL, DIV 1140 // 1141 // These include masked and non-masked versions. 1142 // This subclass adds broadcast (masked or not). 1143 1144 /** 1145 * {@inheritDoc} <!--workaround--> 1146 * @see #add(int) 1147 */ 1148 @Override 1149 @ForceInline 1150 public final IntVector add(Vector<Integer> v) { 1151 return lanewise(ADD, v); 1152 } 1153 1154 /** 1155 * Adds this vector to the broadcast of an input scalar. 1156 * 1157 * This is a lane-wise binary operation which applies 1158 * the primitive addition operation ({@code +}) to each lane. 1159 * 1160 * This method is also equivalent to the expression 1161 * {@link #lanewise(VectorOperators.Binary,int) 1162 * lanewise}{@code (}{@link VectorOperators#ADD 1163 * ADD}{@code , e)}. 1164 * 1165 * @param e the input scalar 1166 * @return the result of adding each lane of this vector to the scalar 1167 * @see #add(Vector) 1168 * @see #broadcast(int) 1169 * @see #add(int,VectorMask) 1170 * @see VectorOperators#ADD 1171 * @see #lanewise(VectorOperators.Binary,Vector) 1172 * @see #lanewise(VectorOperators.Binary,int) 1173 */ 1174 @ForceInline 1175 public final 1176 IntVector add(int e) { 1177 return lanewise(ADD, e); 1178 } 1179 1180 /** 1181 * {@inheritDoc} <!--workaround--> 1182 * @see #add(int,VectorMask) 1183 */ 1184 @Override 1185 @ForceInline 1186 public final IntVector add(Vector<Integer> v, 1187 VectorMask<Integer> m) { 1188 return lanewise(ADD, v, m); 1189 } 1190 1191 /** 1192 * Adds this vector to the broadcast of an input scalar, 1193 * selecting lane elements controlled by a mask. 1194 * 1195 * This is a masked lane-wise binary operation which applies 1196 * the primitive addition operation ({@code +}) to each lane. 1197 * 1198 * This method is also equivalent to the expression 1199 * {@link #lanewise(VectorOperators.Binary,int,VectorMask) 1200 * lanewise}{@code (}{@link VectorOperators#ADD 1201 * ADD}{@code , s, m)}. 1202 * 1203 * @param e the input scalar 1204 * @param m the mask controlling lane selection 1205 * @return the result of adding each lane of this vector to the scalar 1206 * @see #add(Vector,VectorMask) 1207 * @see #broadcast(int) 1208 * @see #add(int) 1209 * @see VectorOperators#ADD 1210 * @see #lanewise(VectorOperators.Binary,Vector) 1211 * @see #lanewise(VectorOperators.Binary,int) 1212 */ 1213 @ForceInline 1214 public final IntVector add(int e, 1215 VectorMask<Integer> m) { 1216 return lanewise(ADD, e, m); 1217 } 1218 1219 /** 1220 * {@inheritDoc} <!--workaround--> 1221 * @see #sub(int) 1222 */ 1223 @Override 1224 @ForceInline 1225 public final IntVector sub(Vector<Integer> v) { 1226 return lanewise(SUB, v); 1227 } 1228 1229 /** 1230 * Subtracts an input scalar from this vector. 1231 * 1232 * This is a masked lane-wise binary operation which applies 1233 * the primitive subtraction operation ({@code -}) to each lane. 1234 * 1235 * This method is also equivalent to the expression 1236 * {@link #lanewise(VectorOperators.Binary,int) 1237 * lanewise}{@code (}{@link VectorOperators#SUB 1238 * SUB}{@code , e)}. 1239 * 1240 * @param e the input scalar 1241 * @return the result of subtracting the scalar from each lane of this vector 1242 * @see #sub(Vector) 1243 * @see #broadcast(int) 1244 * @see #sub(int,VectorMask) 1245 * @see VectorOperators#SUB 1246 * @see #lanewise(VectorOperators.Binary,Vector) 1247 * @see #lanewise(VectorOperators.Binary,int) 1248 */ 1249 @ForceInline 1250 public final IntVector sub(int e) { 1251 return lanewise(SUB, e); 1252 } 1253 1254 /** 1255 * {@inheritDoc} <!--workaround--> 1256 * @see #sub(int,VectorMask) 1257 */ 1258 @Override 1259 @ForceInline 1260 public final IntVector sub(Vector<Integer> v, 1261 VectorMask<Integer> m) { 1262 return lanewise(SUB, v, m); 1263 } 1264 1265 /** 1266 * Subtracts an input scalar from this vector 1267 * under the control of a mask. 1268 * 1269 * This is a masked lane-wise binary operation which applies 1270 * the primitive subtraction operation ({@code -}) to each lane. 1271 * 1272 * This method is also equivalent to the expression 1273 * {@link #lanewise(VectorOperators.Binary,int,VectorMask) 1274 * lanewise}{@code (}{@link VectorOperators#SUB 1275 * SUB}{@code , s, m)}. 1276 * 1277 * @param e the input scalar 1278 * @param m the mask controlling lane selection 1279 * @return the result of subtracting the scalar from each lane of this vector 1280 * @see #sub(Vector,VectorMask) 1281 * @see #broadcast(int) 1282 * @see #sub(int) 1283 * @see VectorOperators#SUB 1284 * @see #lanewise(VectorOperators.Binary,Vector) 1285 * @see #lanewise(VectorOperators.Binary,int) 1286 */ 1287 @ForceInline 1288 public final IntVector sub(int e, 1289 VectorMask<Integer> m) { 1290 return lanewise(SUB, e, m); 1291 } 1292 1293 /** 1294 * {@inheritDoc} <!--workaround--> 1295 * @see #mul(int) 1296 */ 1297 @Override 1298 @ForceInline 1299 public final IntVector mul(Vector<Integer> v) { 1300 return lanewise(MUL, v); 1301 } 1302 1303 /** 1304 * Multiplies this vector by the broadcast of an input scalar. 1305 * 1306 * This is a lane-wise binary operation which applies 1307 * the primitive multiplication operation ({@code *}) to each lane. 1308 * 1309 * This method is also equivalent to the expression 1310 * {@link #lanewise(VectorOperators.Binary,int) 1311 * lanewise}{@code (}{@link VectorOperators#MUL 1312 * MUL}{@code , e)}. 1313 * 1314 * @param e the input scalar 1315 * @return the result of multiplying this vector by the given scalar 1316 * @see #mul(Vector) 1317 * @see #broadcast(int) 1318 * @see #mul(int,VectorMask) 1319 * @see VectorOperators#MUL 1320 * @see #lanewise(VectorOperators.Binary,Vector) 1321 * @see #lanewise(VectorOperators.Binary,int) 1322 */ 1323 @ForceInline 1324 public final IntVector mul(int e) { 1325 return lanewise(MUL, e); 1326 } 1327 1328 /** 1329 * {@inheritDoc} <!--workaround--> 1330 * @see #mul(int,VectorMask) 1331 */ 1332 @Override 1333 @ForceInline 1334 public final IntVector mul(Vector<Integer> v, 1335 VectorMask<Integer> m) { 1336 return lanewise(MUL, v, m); 1337 } 1338 1339 /** 1340 * Multiplies this vector by the broadcast of an input scalar, 1341 * selecting lane elements controlled by a mask. 1342 * 1343 * This is a masked lane-wise binary operation which applies 1344 * the primitive multiplication operation ({@code *}) to each lane. 1345 * 1346 * This method is also equivalent to the expression 1347 * {@link #lanewise(VectorOperators.Binary,int,VectorMask) 1348 * lanewise}{@code (}{@link VectorOperators#MUL 1349 * MUL}{@code , s, m)}. 1350 * 1351 * @param e the input scalar 1352 * @param m the mask controlling lane selection 1353 * @return the result of muling each lane of this vector to the scalar 1354 * @see #mul(Vector,VectorMask) 1355 * @see #broadcast(int) 1356 * @see #mul(int) 1357 * @see VectorOperators#MUL 1358 * @see #lanewise(VectorOperators.Binary,Vector) 1359 * @see #lanewise(VectorOperators.Binary,int) 1360 */ 1361 @ForceInline 1362 public final IntVector mul(int e, 1363 VectorMask<Integer> m) { 1364 return lanewise(MUL, e, m); 1365 } 1366 1367 /** 1368 * {@inheritDoc} <!--workaround--> 1369 * @see #div(int) 1370 */ 1371 @Override 1372 @ForceInline 1373 public final IntVector div(Vector<Integer> v) { 1374 return lanewise(DIV, v); 1375 } 1376 1377 /** 1378 * Divides this vector by the broadcast of an input scalar. 1379 * 1380 * This is a lane-wise binary operation which applies 1381 * the primitive division operation ({@code /}) to each lane. 1382 * 1383 * This method is also equivalent to the expression 1384 * {@link #lanewise(VectorOperators.Binary,int) 1385 * lanewise}{@code (}{@link VectorOperators#DIV 1386 * DIV}{@code , e)}. 1387 * 1388 * <p> 1389 * If the underlying scalar operator does not support 1390 * division by zero, but is presented with a zero divisor, 1391 * an {@code ArithmeticException} will be thrown. 1392 * 1393 * @param e the input scalar 1394 * @return the result of dividing each lane of this vector by the scalar 1395 * @see #div(Vector) 1396 * @see #broadcast(int) 1397 * @see #div(int,VectorMask) 1398 * @see VectorOperators#DIV 1399 * @see #lanewise(VectorOperators.Binary,Vector) 1400 * @see #lanewise(VectorOperators.Binary,int) 1401 */ 1402 @ForceInline 1403 public final IntVector div(int e) { 1404 return lanewise(DIV, e); 1405 } 1406 1407 /** 1408 /** 1409 * {@inheritDoc} <!--workaround--> 1410 * @see #div(int,VectorMask) 1411 */ 1412 @Override 1413 @ForceInline 1414 public final IntVector div(Vector<Integer> v, 1415 VectorMask<Integer> m) { 1416 return lanewise(DIV, v, m); 1417 } 1418 1419 /** 1420 * Divides this vector by the broadcast of an input scalar, 1421 * selecting lane elements controlled by a mask. 1422 * 1423 * This is a masked lane-wise binary operation which applies 1424 * the primitive division operation ({@code /}) to each lane. 1425 * 1426 * This method is also equivalent to the expression 1427 * {@link #lanewise(VectorOperators.Binary,int,VectorMask) 1428 * lanewise}{@code (}{@link VectorOperators#DIV 1429 * DIV}{@code , s, m)}. 1430 * 1431 * <p> 1432 * If the underlying scalar operator does not support 1433 * division by zero, but is presented with a zero divisor, 1434 * an {@code ArithmeticException} will be thrown. 1435 * 1436 * @param e the input scalar 1437 * @param m the mask controlling lane selection 1438 * @return the result of dividing each lane of this vector by the scalar 1439 * @see #div(Vector,VectorMask) 1440 * @see #broadcast(int) 1441 * @see #div(int) 1442 * @see VectorOperators#DIV 1443 * @see #lanewise(VectorOperators.Binary,Vector) 1444 * @see #lanewise(VectorOperators.Binary,int) 1445 */ 1446 @ForceInline 1447 public final IntVector div(int e, 1448 VectorMask<Integer> m) { 1449 return lanewise(DIV, e, m); 1450 } 1451 1452 /// END OF FULL-SERVICE BINARY METHODS 1453 1454 /// SECOND-TIER BINARY METHODS 1455 // 1456 // There are no masked versions. 1457 1458 /** 1459 * {@inheritDoc} <!--workaround--> 1460 */ 1461 @Override 1462 @ForceInline 1463 public final IntVector min(Vector<Integer> v) { 1464 return lanewise(MIN, v); 1465 } 1466 1467 // FIXME: "broadcast of an input scalar" is really wordy. Reduce? 1468 /** 1469 * Computes the smaller of this vector and the broadcast of an input scalar. 1470 * 1471 * This is a lane-wise binary operation which appliesthe 1472 * operation {@code (a, b) -> a < b ? a : b} to each pair of 1473 * corresponding lane values. 1474 * 1475 * This method is also equivalent to the expression 1476 * {@link #lanewise(VectorOperators.Binary,int) 1477 * lanewise}{@code (}{@link VectorOperators#MIN 1478 * MIN}{@code , e)}. 1479 * 1480 * @param e the input scalar 1481 * @return the result of multiplying this vector by the given scalar 1482 * @see #min(Vector) 1483 * @see #broadcast(int) 1484 * @see VectorOperators#MIN 1485 * @see #lanewise(VectorOperators.Binary,int,VectorMask) 1486 */ 1487 @ForceInline 1488 public final IntVector min(int e) { 1489 return lanewise(MIN, e); 1490 } 1491 1492 /** 1493 * {@inheritDoc} <!--workaround--> 1494 */ 1495 @Override 1496 @ForceInline 1497 public final IntVector max(Vector<Integer> v) { 1498 return lanewise(MAX, v); 1499 } 1500 1501 /** 1502 * Computes the larger of this vector and the broadcast of an input scalar. 1503 * 1504 * This is a lane-wise binary operation which appliesthe 1505 * operation {@code (a, b) -> a > b ? a : b} to each pair of 1506 * corresponding lane values. 1507 * 1508 * This method is also equivalent to the expression 1509 * {@link #lanewise(VectorOperators.Binary,int) 1510 * lanewise}{@code (}{@link VectorOperators#MAX 1511 * MAX}{@code , e)}. 1512 * 1513 * @param e the input scalar 1514 * @return the result of multiplying this vector by the given scalar 1515 * @see #max(Vector) 1516 * @see #broadcast(int) 1517 * @see VectorOperators#MAX 1518 * @see #lanewise(VectorOperators.Binary,int,VectorMask) 1519 */ 1520 @ForceInline 1521 public final IntVector max(int e) { 1522 return lanewise(MAX, e); 1523 } 1524 1525 // common bitwise operators: and, or, not (with scalar versions) 1526 /** 1527 * Computes the bitwise logical conjunction ({@code &}) 1528 * of this vector and a second input vector. 1529 * 1530 * This is a lane-wise binary operation which applies the 1531 * the primitive bitwise "and" operation ({@code &}) 1532 * to each pair of corresponding lane values. 1533 * 1534 * This method is also equivalent to the expression 1535 * {@link #lanewise(VectorOperators.Binary,Vector) 1536 * lanewise}{@code (}{@link VectorOperators#AND 1537 * AND}{@code , v)}. 1538 * 1539 * <p> 1540 * This is not a full-service named operation like 1541 * {@link #add(Vector) add}. A masked version of 1542 * version of this operation is not directly available 1543 * but may be obtained via the masked version of 1544 * {@code lanewise}. 1545 * 1546 * @param v a second input vector 1547 * @return the bitwise {@code &} of this vector and the second input vector 1548 * @see #and(int) 1549 * @see #or(Vector) 1550 * @see #not() 1551 * @see VectorOperators#AND 1552 * @see #lanewise(VectorOperators.Binary,Vector,VectorMask) 1553 */ 1554 @ForceInline 1555 public final IntVector and(Vector<Integer> v) { 1556 return lanewise(AND, v); 1557 } 1558 1559 /** 1560 * Computes the bitwise logical conjunction ({@code &}) 1561 * of this vector and a scalar. 1562 * 1563 * This is a lane-wise binary operation which applies the 1564 * the primitive bitwise "and" operation ({@code &}) 1565 * to each pair of corresponding lane values. 1566 * 1567 * This method is also equivalent to the expression 1568 * {@link #lanewise(VectorOperators.Binary,Vector) 1569 * lanewise}{@code (}{@link VectorOperators#AND 1570 * AND}{@code , e)}. 1571 * 1572 * @param e an input scalar 1573 * @return the bitwise {@code &} of this vector and scalar 1574 * @see #and(Vector) 1575 * @see VectorOperators#AND 1576 * @see #lanewise(VectorOperators.Binary,Vector,VectorMask) 1577 */ 1578 @ForceInline 1579 public final IntVector and(int e) { 1580 return lanewise(AND, e); 1581 } 1582 1583 /** 1584 * Computes the bitwise logical disjunction ({@code |}) 1585 * of this vector and a second input vector. 1586 * 1587 * This is a lane-wise binary operation which applies the 1588 * the primitive bitwise "or" operation ({@code |}) 1589 * to each pair of corresponding lane values. 1590 * 1591 * This method is also equivalent to the expression 1592 * {@link #lanewise(VectorOperators.Binary,Vector) 1593 * lanewise}{@code (}{@link VectorOperators#OR 1594 * AND}{@code , v)}. 1595 * 1596 * <p> 1597 * This is not a full-service named operation like 1598 * {@link #add(Vector) add}. A masked version of 1599 * version of this operation is not directly available 1600 * but may be obtained via the masked version of 1601 * {@code lanewise}. 1602 * 1603 * @param v a second input vector 1604 * @return the bitwise {@code |} of this vector and the second input vector 1605 * @see #or(int) 1606 * @see #and(Vector) 1607 * @see #not() 1608 * @see VectorOperators#OR 1609 * @see #lanewise(VectorOperators.Binary,Vector,VectorMask) 1610 */ 1611 @ForceInline 1612 public final IntVector or(Vector<Integer> v) { 1613 return lanewise(OR, v); 1614 } 1615 1616 /** 1617 * Computes the bitwise logical disjunction ({@code |}) 1618 * of this vector and a scalar. 1619 * 1620 * This is a lane-wise binary operation which applies the 1621 * the primitive bitwise "or" operation ({@code |}) 1622 * to each pair of corresponding lane values. 1623 * 1624 * This method is also equivalent to the expression 1625 * {@link #lanewise(VectorOperators.Binary,Vector) 1626 * lanewise}{@code (}{@link VectorOperators#OR 1627 * OR}{@code , e)}. 1628 * 1629 * @param e an input scalar 1630 * @return the bitwise {@code |} of this vector and scalar 1631 * @see #or(Vector) 1632 * @see VectorOperators#OR 1633 * @see #lanewise(VectorOperators.Binary,Vector,VectorMask) 1634 */ 1635 @ForceInline 1636 public final IntVector or(int e) { 1637 return lanewise(OR, e); 1638 } 1639 1640 1641 1642 /// UNARY METHODS 1643 1644 /** 1645 * {@inheritDoc} <!--workaround--> 1646 */ 1647 @Override 1648 @ForceInline 1649 public final 1650 IntVector neg() { 1651 return lanewise(NEG); 1652 } 1653 1654 /** 1655 * {@inheritDoc} <!--workaround--> 1656 */ 1657 @Override 1658 @ForceInline 1659 public final 1660 IntVector abs() { 1661 return lanewise(ABS); 1662 } 1663 1664 // not (~) 1665 /** 1666 * Computes the bitwise logical complement ({@code ~}) 1667 * of this vector. 1668 * 1669 * This is a lane-wise binary operation which applies the 1670 * the primitive bitwise "not" operation ({@code ~}) 1671 * to each lane value. 1672 * 1673 * This method is also equivalent to the expression 1674 * {@link #lanewise(VectorOperators.Unary,Vector) 1675 * lanewise}{@code (}{@link VectorOperators#NOT 1676 * NOT}{@code )}. 1677 * 1678 * <p> 1679 * This is not a full-service named operation like 1680 * {@link #add(Vector) add}. A masked version of 1681 * version of this operation is not directly available 1682 * but may be obtained via the masked version of 1683 * {@code lanewise}. 1684 * 1685 * @return the bitwise complement {@code ~} of this vector 1686 * @see #and(Vector) 1687 * @see VectorOperators#NOT 1688 * @see #lanewise(VectorOperators.Unary,Vector,VectorMask) 1689 */ 1690 @ForceInline 1691 public final IntVector not() { 1692 return lanewise(NOT); 1693 } 1694 1695 1696 /// COMPARISONS 1697 1698 /** 1699 * {@inheritDoc} <!--workaround--> 1700 */ 1701 @Override 1702 @ForceInline 1703 public final 1704 VectorMask<Integer> eq(Vector<Integer> v) { 1705 return compare(EQ, v); 1706 } 1707 1708 /** 1709 * Tests if this vector is equal to an input scalar. 1710 * 1711 * This is a lane-wise binary test operation which applies 1712 * the primitive equals operation ({@code ==}) to each lane. 1713 * The result is the same as {@code compare(VectorOperators.Comparison.EQ, e)}. 1714 * 1715 * @param e the input scalar 1716 * @return the result mask of testing if this vector 1717 * is equal to {@code e} 1718 * @see #compare(VectorOperators.Comparison,int) 1719 */ 1720 @ForceInline 1721 public final 1722 VectorMask<Integer> eq(int e) { 1723 return compare(EQ, e); 1724 } 1725 1726 /** 1727 * {@inheritDoc} <!--workaround--> 1728 */ 1729 @Override 1730 @ForceInline 1731 public final 1732 VectorMask<Integer> lt(Vector<Integer> v) { 1733 return compare(LT, v); 1734 } 1735 1736 /** 1737 * Tests if this vector is less than an input scalar. 1738 * 1739 * This is a lane-wise binary test operation which applies 1740 * the primitive less than operation ({@code <}) to each lane. 1741 * The result is the same as {@code compare(VectorOperators.LT, e)}. 1742 * 1743 * @param e the input scalar 1744 * @return the mask result of testing if this vector 1745 * is less than the input scalar 1746 * @see #compare(VectorOperators.Comparison,int) 1747 */ 1748 @ForceInline 1749 public final 1750 VectorMask<Integer> lt(int e) { 1751 return compare(LT, e); 1752 } 1753 1754 /** 1755 * {@inheritDoc} <!--workaround--> 1756 */ 1757 @Override 1758 public abstract 1759 VectorMask<Integer> compare(VectorOperators.Comparison op, Vector<Integer> v); 1760 1761 /*package-private*/ 1762 @ForceInline 1763 final 1764 <M extends VectorMask<Integer>> 1765 M compareTemplate(Class<M> maskType, Comparison op, Vector<Integer> v) { 1766 Objects.requireNonNull(v); 1767 IntSpecies vsp = vspecies(); 1768 int opc = opCode(op); 1769 return VectorIntrinsics.compare( 1770 opc, getClass(), maskType, int.class, length(), 1771 this, (IntVector) v, 1772 (cond, v0, v1) -> { 1773 AbstractMask<Integer> m 1774 = v0.bTest(cond, v1, (cond_, i, a, b) 1775 -> compareWithOp(cond, a, b)); 1776 @SuppressWarnings("unchecked") 1777 M m2 = (M) m; 1778 return m2; 1779 }); 1780 } 1781 1782 @ForceInline 1783 private static 1784 boolean compareWithOp(int cond, int a, int b) { 1785 switch (cond) { 1786 case VectorIntrinsics.BT_eq: return a == b; 1787 case VectorIntrinsics.BT_ne: return a != b; 1788 case VectorIntrinsics.BT_lt: return a < b; 1789 case VectorIntrinsics.BT_le: return a <= b; 1790 case VectorIntrinsics.BT_gt: return a > b; 1791 case VectorIntrinsics.BT_ge: return a >= b; 1792 } 1793 throw new AssertionError(); 1794 } 1795 1796 /** 1797 * {@inheritDoc} <!--workaround--> 1798 */ 1799 @Override 1800 @ForceInline 1801 public final 1802 VectorMask<Integer> compare(VectorOperators.Comparison op, 1803 Vector<Integer> v, 1804 VectorMask<Integer> m) { 1805 return compare(op, v).and(m); 1806 } 1807 1808 /** 1809 * Tests this vector by comparing it with an input scalar, 1810 * according to the given comparison operation. 1811 * 1812 * This is a lane-wise binary test operation which applies 1813 * the comparison operation to each lane. 1814 * <p> 1815 * The result is the same as 1816 * {@code compare(op, broadcast(species(), e))}. 1817 * That is, the scalar may be regarded as broadcast to 1818 * a vector of the same species, and then compared 1819 * against the original vector, using the selected 1820 * comparison operation. 1821 * 1822 * @param e the input scalar 1823 * @return the mask result of testing lane-wise if this vector 1824 * compares to the input, according to the selected 1825 * comparison operator 1826 * @see IntVector#compare(VectorOperators.Comparison,Vector) 1827 * @see #eq(int) 1828 * @see #lessThan(int) 1829 */ 1830 public abstract 1831 VectorMask<Integer> compare(Comparison op, int e); 1832 1833 /*package-private*/ 1834 @ForceInline 1835 final 1836 <M extends VectorMask<Integer>> 1837 M compareTemplate(Class<M> maskType, Comparison op, int e) { 1838 return compareTemplate(maskType, op, broadcast(e)); 1839 } 1840 1841 /** 1842 * Tests this vector by comparing it with an input scalar, 1843 * according to the given comparison operation, 1844 * in lanes selected by a mask. 1845 * 1846 * This is a masked lane-wise binary test operation which applies 1847 * to each pair of corresponding lane values. 1848 * 1849 * The returned result is equal to the expression 1850 * {@code compare(op,s).and(m)}. 1851 * 1852 * @param e the input scalar 1853 * @param m the mask controlling lane selection 1854 * @return the mask result of testing lane-wise if this vector 1855 * compares to the input, according to the selected 1856 * comparison operator, 1857 * and only in the lanes selected by the mask 1858 * @see IntVector#compare(VectorOperators.Comparison,Vector,VectorMask) 1859 */ 1860 @ForceInline 1861 public final VectorMask<Integer> compare(VectorOperators.Comparison op, 1862 int e, 1863 VectorMask<Integer> m) { 1864 return compare(op, e).and(m); 1865 } 1866 1867 /** 1868 * {@inheritDoc} <!--workaround--> 1869 */ 1870 @Override 1871 public abstract 1872 VectorMask<Integer> compare(Comparison op, long e); 1873 1874 /*package-private*/ 1875 @ForceInline 1876 final 1877 <M extends VectorMask<Integer>> 1878 M compareTemplate(Class<M> maskType, Comparison op, long e) { 1879 return compareTemplate(maskType, op, broadcast(e)); 1880 } 1881 1882 /** 1883 * {@inheritDoc} <!--workaround--> 1884 */ 1885 @Override 1886 @ForceInline 1887 public final 1888 VectorMask<Integer> compare(Comparison op, long e, VectorMask<Integer> m) { 1889 return compare(op, broadcast(e), m); 1890 } 1891 1892 1893 1894 /** 1895 * {@inheritDoc} <!--workaround--> 1896 */ 1897 @Override public abstract 1898 IntVector blend(Vector<Integer> v, VectorMask<Integer> m); 1899 1900 /*package-private*/ 1901 @ForceInline 1902 final 1903 <M extends VectorMask<Integer>> 1904 IntVector 1905 blendTemplate(Class<M> maskType, IntVector v, M m) { 1906 v.check(this); 1907 return VectorIntrinsics.blend( 1908 getClass(), maskType, int.class, length(), 1909 this, v, m, 1910 (v0, v1, m_) -> v0.bOp(v1, m_, (i, a, b) -> b)); 1911 } 1912 1913 /** 1914 * {@inheritDoc} <!--workaround--> 1915 */ 1916 @Override public abstract IntVector addIndex(int scale); 1917 1918 /*package-private*/ 1919 @ForceInline 1920 final IntVector addIndexTemplate(int scale) { 1921 IntSpecies vsp = vspecies(); 1922 // make sure VLENGTH*scale doesn't overflow: 1923 vsp.checkScale(scale); 1924 return VectorIntrinsics.indexVector( 1925 getClass(), int.class, length(), 1926 this, scale, vsp, 1927 (v, scale_, s) 1928 -> { 1929 // If the platform doesn't support an INDEX 1930 // instruction directly, load IOTA from memory 1931 // and multiply. 1932 IntVector iota = s.iota(); 1933 int sc = (int) scale_; 1934 return v.add(sc == 1 ? iota : iota.mul(sc)); 1935 }); 1936 } 1937 1938 /** 1939 * Replaces selected lanes of this vector with 1940 * a scalar value 1941 * under the control of a mask. 1942 * 1943 * This is a masked lane-wise binary operation which 1944 * selects each lane value from one or the other input. 1945 * 1946 * The returned result is equal to the expression 1947 * {@code blend(broadcast(e),m)}. 1948 * 1949 * @param e the input scalar, containing the replacement lane value 1950 * @param m the mask controlling lane selection of the scalar 1951 * @return the result of blending the lane elements of this vector with 1952 * the scalar value 1953 */ 1954 @ForceInline 1955 public final IntVector blend(int e, 1956 VectorMask<Integer> m) { 1957 return blend(broadcast(e), m); 1958 } 1959 1960 /** 1961 * Replaces selected lanes of this vector with 1962 * a scalar value 1963 * under the control of a mask. 1964 * 1965 * This is a masked lane-wise binary operation which 1966 * selects each lane value from one or the other input. 1967 * 1968 * The returned result is equal to the expression 1969 * {@code blend(broadcast(e),m)}. 1970 * 1971 * @param e the input scalar, containing the replacement lane value 1972 * @param m the mask controlling lane selection of the scalar 1973 * @return the result of blending the lane elements of this vector with 1974 * the scalar value 1975 */ 1976 @ForceInline 1977 public final IntVector blend(long e, 1978 VectorMask<Integer> m) { 1979 return blend(broadcast(e), m); 1980 } 1981 1982 /** 1983 * {@inheritDoc} <!--workaround--> 1984 */ 1985 @Override 1986 public abstract 1987 IntVector slice(int origin, Vector<Integer> v1); 1988 1989 /*package-private*/ 1990 final 1991 @ForceInline 1992 IntVector sliceTemplate(int origin, Vector<Integer> v1) { 1993 IntVector that = (IntVector) v1; 1994 that.check(this); 1995 int[] a0 = this.getElements(); 1996 int[] a1 = that.getElements(); 1997 int[] res = new int[a0.length]; 1998 int vlen = res.length; 1999 int firstPart = vlen - origin; 2000 System.arraycopy(a0, origin, res, 0, firstPart); 2001 System.arraycopy(a1, 0, res, firstPart, origin); 2002 return vectorFactory(res); 2003 } 2004 2005 /** 2006 * {@inheritDoc} <!--workaround--> 2007 */ 2008 @Override 2009 @ForceInline 2010 public final 2011 IntVector slice(int origin, 2012 Vector<Integer> w, 2013 VectorMask<Integer> m) { 2014 return broadcast(0).blend(slice(origin, w), m); 2015 } 2016 2017 /** 2018 * {@inheritDoc} <!--workaround--> 2019 */ 2020 @Override 2021 public abstract 2022 IntVector slice(int origin); 2023 2024 /** 2025 * {@inheritDoc} <!--workaround--> 2026 */ 2027 @Override 2028 public abstract 2029 IntVector unslice(int origin, Vector<Integer> w, int part); 2030 2031 /*package-private*/ 2032 final 2033 @ForceInline 2034 IntVector 2035 unsliceTemplate(int origin, Vector<Integer> w, int part) { 2036 IntVector that = (IntVector) w; 2037 that.check(this); 2038 int[] slice = this.getElements(); 2039 int[] res = that.getElements(); 2040 int vlen = res.length; 2041 int firstPart = vlen - origin; 2042 switch (part) { 2043 case 0: 2044 System.arraycopy(slice, 0, res, origin, firstPart); 2045 break; 2046 case 1: 2047 System.arraycopy(slice, firstPart, res, 0, origin); 2048 break; 2049 default: 2050 throw wrongPartForSlice(part); 2051 } 2052 return vectorFactory(res); 2053 } 2054 2055 /*package-private*/ 2056 final 2057 @ForceInline 2058 <M extends VectorMask<Integer>> 2059 IntVector 2060 unsliceTemplate(Class<M> maskType, int origin, Vector<Integer> w, int part, M m) { 2061 IntVector that = (IntVector) w; 2062 that.check(this); 2063 IntVector slice = that.sliceTemplate(origin, that); 2064 slice = slice.blendTemplate(maskType, this, m); 2065 return slice.unsliceTemplate(origin, w, part); 2066 } 2067 2068 /** 2069 * {@inheritDoc} <!--workaround--> 2070 */ 2071 @Override 2072 public abstract 2073 IntVector unslice(int origin, Vector<Integer> w, int part, VectorMask<Integer> m); 2074 2075 /** 2076 * {@inheritDoc} <!--workaround--> 2077 */ 2078 @Override 2079 public abstract 2080 IntVector unslice(int origin); 2081 2082 private ArrayIndexOutOfBoundsException 2083 wrongPartForSlice(int part) { 2084 String msg = String.format("bad part number %d for slice operation", 2085 part); 2086 return new ArrayIndexOutOfBoundsException(msg); 2087 } 2088 2089 /** 2090 * {@inheritDoc} <!--workaround--> 2091 */ 2092 @Override 2093 public abstract 2094 IntVector rearrange(VectorShuffle<Integer> m); 2095 2096 /*package-private*/ 2097 @ForceInline 2098 final 2099 <S extends VectorShuffle<Integer>> 2100 IntVector rearrangeTemplate(Class<S> shuffletype, S shuffle) { 2101 shuffle.checkIndexes(); 2102 return VectorIntrinsics.rearrangeOp( 2103 getClass(), shuffletype, int.class, length(), 2104 this, shuffle, 2105 (v1, s_) -> v1.uOp((i, a) -> { 2106 int ei = s_.laneSource(i); 2107 return v1.lane(ei); 2108 })); 2109 } 2110 2111 /** 2112 * {@inheritDoc} <!--workaround--> 2113 */ 2114 @Override 2115 public abstract 2116 IntVector rearrange(VectorShuffle<Integer> s, 2117 VectorMask<Integer> m); 2118 2119 /*package-private*/ 2120 @ForceInline 2121 final 2122 <S extends VectorShuffle<Integer>> 2123 IntVector rearrangeTemplate(Class<S> shuffletype, 2124 S shuffle, 2125 VectorMask<Integer> m) { 2126 IntVector unmasked = 2127 VectorIntrinsics.rearrangeOp( 2128 getClass(), shuffletype, int.class, length(), 2129 this, shuffle, 2130 (v1, s_) -> v1.uOp((i, a) -> { 2131 int ei = s_.laneSource(i); 2132 return ei < 0 ? 0 : v1.lane(ei); 2133 })); 2134 VectorMask<Integer> valid = shuffle.laneIsValid(); 2135 if (m.andNot(valid).anyTrue()) { 2136 shuffle.checkIndexes(); 2137 throw new AssertionError(); 2138 } 2139 return broadcast((int)0).blend(unmasked, valid); 2140 } 2141 2142 /** 2143 * {@inheritDoc} <!--workaround--> 2144 */ 2145 @Override 2146 public abstract 2147 IntVector rearrange(VectorShuffle<Integer> s, 2148 Vector<Integer> v); 2149 2150 /*package-private*/ 2151 @ForceInline 2152 final 2153 <S extends VectorShuffle<Integer>> 2154 IntVector rearrangeTemplate(Class<S> shuffletype, 2155 S shuffle, 2156 IntVector v) { 2157 VectorMask<Integer> valid = shuffle.laneIsValid(); 2158 VectorShuffle<Integer> ws = shuffle.wrapIndexes(); 2159 IntVector r1 = 2160 VectorIntrinsics.rearrangeOp( 2161 getClass(), shuffletype, int.class, length(), 2162 this, shuffle, 2163 (v1, s_) -> v1.uOp((i, a) -> { 2164 int ei = s_.laneSource(i); 2165 return v1.lane(ei); 2166 })); 2167 IntVector r2 = 2168 VectorIntrinsics.rearrangeOp( 2169 getClass(), shuffletype, int.class, length(), 2170 v, shuffle, 2171 (v1, s_) -> v1.uOp((i, a) -> { 2172 int ei = s_.laneSource(i); 2173 return v1.lane(ei); 2174 })); 2175 return r2.blend(r1, valid); 2176 } 2177 2178 /** 2179 * {@inheritDoc} <!--workaround--> 2180 */ 2181 @Override 2182 public abstract 2183 IntVector selectFrom(Vector<Integer> v); 2184 2185 /*package-private*/ 2186 @ForceInline 2187 final IntVector selectFromTemplate(IntVector v) { 2188 return v.rearrange(this.toShuffle()); 2189 } 2190 2191 /** 2192 * {@inheritDoc} <!--workaround--> 2193 */ 2194 @Override 2195 public abstract 2196 IntVector selectFrom(Vector<Integer> s, VectorMask<Integer> m); 2197 2198 /*package-private*/ 2199 @ForceInline 2200 final IntVector selectFromTemplate(IntVector v, 2201 AbstractMask<Integer> m) { 2202 return v.rearrange(this.toShuffle(), m); 2203 } 2204 2205 /// Ternary operations 2206 2207 /** 2208 * Blends together the bits of two vectors under 2209 * the control of a third, which supplies mask bits. 2210 * 2211 * 2212 * This is a lane-wise ternary operation which performs 2213 * a bitwise blending operation {@code (a&~c)|(b&c)} 2214 * to each lane. 2215 * 2216 * This method is also equivalent to the expression 2217 * {@link #lanewise(VectorOperators.Ternary,Vector,Vector) 2218 * lanewise}{@code (}{@link VectorOperators#BITWISE_BLEND 2219 * BITWISE_BLEND}{@code , bits, mask)}. 2220 * 2221 * @param bits input bits to blend into the current vector 2222 * @param mask a bitwise mask to enable blending of the input bits 2223 * @return the bitwise blend of the given bits into the current vector, 2224 * under control of the bitwise mask 2225 * @see #bitwiseBlend(int,int) 2226 * @see #bitwiseBlend(int,Vector) 2227 * @see #bitwiseBlend(Vector,int) 2228 * @see VectorOperators#BITWISE_BLEND 2229 * @see #lanewise(VectorOperators.Ternary,Vector,Vector,VectorMask) 2230 */ 2231 @ForceInline 2232 public final 2233 IntVector bitwiseBlend(Vector<Integer> bits, Vector<Integer> mask) { 2234 return lanewise(BITWISE_BLEND, bits, mask); 2235 } 2236 2237 /** 2238 * Blends together the bits of a vector and a scalar under 2239 * the control of another scalar, which supplies mask bits. 2240 * 2241 * 2242 * This is a lane-wise ternary operation which performs 2243 * a bitwise blending operation {@code (a&~c)|(b&c)} 2244 * to each lane. 2245 * 2246 * This method is also equivalent to the expression 2247 * {@link #lanewise(VectorOperators.Ternary,Vector,Vector) 2248 * lanewise}{@code (}{@link VectorOperators#BITWISE_BLEND 2249 * BITWISE_BLEND}{@code , bits, mask)}. 2250 * 2251 * @param bits input bits to blend into the current vector 2252 * @param mask a bitwise mask to enable blending of the input bits 2253 * @return the bitwise blend of the given bits into the current vector, 2254 * under control of the bitwise mask 2255 * @see #bitwiseBlend(Vector,Vector) 2256 * @see VectorOperators#BITWISE_BLEND 2257 * @see #lanewise(VectorOperators.Ternary,int,int,VectorMask) 2258 */ 2259 @ForceInline 2260 public final 2261 IntVector bitwiseBlend(int bits, int mask) { 2262 return lanewise(BITWISE_BLEND, bits, mask); 2263 } 2264 2265 /** 2266 * Blends together the bits of a vector and a scalar under 2267 * the control of another vector, which supplies mask bits. 2268 * 2269 * 2270 * This is a lane-wise ternary operation which performs 2271 * a bitwise blending operation {@code (a&~c)|(b&c)} 2272 * to each lane. 2273 * 2274 * This method is also equivalent to the expression 2275 * {@link #lanewise(VectorOperators.Ternary,Vector,Vector) 2276 * lanewise}{@code (}{@link VectorOperators#BITWISE_BLEND 2277 * BITWISE_BLEND}{@code , bits, mask)}. 2278 * 2279 * @param bits input bits to blend into the current vector 2280 * @param mask a bitwise mask to enable blending of the input bits 2281 * @return the bitwise blend of the given bits into the current vector, 2282 * under control of the bitwise mask 2283 * @see #bitwiseBlend(Vector,Vector) 2284 * @see VectorOperators#BITWISE_BLEND 2285 * @see #lanewise(VectorOperators.Ternary,int,Vector,VectorMask) 2286 */ 2287 @ForceInline 2288 public final 2289 IntVector bitwiseBlend(int bits, Vector<Integer> mask) { 2290 return lanewise(BITWISE_BLEND, bits, mask); 2291 } 2292 2293 /** 2294 * Blends together the bits of two vectors scalar under 2295 * the control of a scalar, which supplies mask bits. 2296 * 2297 * 2298 * This is a lane-wise ternary operation which performs 2299 * a bitwise blending operation {@code (a&~c)|(b&c)} 2300 * to each lane. 2301 * 2302 * This method is also equivalent to the expression 2303 * {@link #lanewise(VectorOperators.Ternary,Vector,Vector) 2304 * lanewise}{@code (}{@link VectorOperators#BITWISE_BLEND 2305 * BITWISE_BLEND}{@code , bits, mask)}. 2306 * 2307 * @param bits input bits to blend into the current vector 2308 * @param mask a bitwise mask to enable blending of the input bits 2309 * @return the bitwise blend of the given bits into the current vector, 2310 * under control of the bitwise mask 2311 * @see #bitwiseBlend(Vector,Vector) 2312 * @see VectorOperators#BITWISE_BLEND 2313 * @see #lanewise(VectorOperators.Ternary,Vector,int,VectorMask) 2314 */ 2315 @ForceInline 2316 public final 2317 IntVector bitwiseBlend(Vector<Integer> bits, int mask) { 2318 return lanewise(BITWISE_BLEND, bits, mask); 2319 } 2320 2321 2322 // Type specific horizontal reductions 2323 2324 /** 2325 * Returns a value accumulated from all the lanes of this vector. 2326 * 2327 * This is an associative cross-lane reduction operation which 2328 * applies the specified operation to all the lane elements. 2329 * 2330 * <p> 2331 * A few reduction operations do not support arbitrary reordering 2332 * of their operands, yet are included here because of their 2333 * usefulness. 2334 * 2335 * <ul> 2336 * <li> 2337 * In the case of {@code FIRST_NONZERO}, the reduction returns 2338 * the value from the lowest-numbered non-zero lane. 2339 * 2340 * 2341 * <li> 2342 * In the case of floating point addition and multiplication, the 2343 * precise result will reflect the choice of an arbitrary order 2344 * of operations, which may even vary over time. 2345 * 2346 * <li> 2347 * All other reduction operations are fully commutative and 2348 * associative. The implementation can choose any order of 2349 * processing, yet it will always produce the same result. 2350 * 2351 * </ul> 2352 * 2353 * 2354 * @param op the operation used to combine lane values 2355 * @return the accumulated result 2356 * @throws UnsupportedOperationException if this vector does 2357 * not support the requested operation 2358 * @see #reduceLanes(VectorOperators.Associative,VectorMask) 2359 * @see #add(Vector) 2360 * @see #mul(Vector) 2361 * @see #min(Vector) 2362 * @see #max(Vector) 2363 * @see #and(Vector) 2364 * @see #or(Vector) 2365 * @see VectorOperators#XOR 2366 * @see VectorOperators#FIRST_NONZERO 2367 */ 2368 public abstract int reduceLanes(VectorOperators.Associative op); 2369 2370 /** 2371 * Returns a value accumulated from selected lanes of this vector, 2372 * controlled by a mask. 2373 * 2374 * This is an associative cross-lane reduction operation which 2375 * applies the specified operation to the selected lane elements. 2376 * <p> 2377 * If no elements are selected, an operation-specific identity 2378 * value is returned. 2379 * <ul> 2380 * <li> 2381 * If the operation is 2382 * {@code ADD}, {@code XOR}, {@code OR}, 2383 * or {@code FIRST_NONZERO}, 2384 * then the identity value is zero, the default {@code int} value. 2385 * <li> 2386 * If the operation is {@code MUL}, 2387 * then the identity value is one. 2388 * <li> 2389 * If the operation is {@code AND}, 2390 * then the identity value is minus one (all bits set). 2391 * <li> 2392 * If the operation is {@code MAX}, 2393 * then the identity value is {@code Integer.MIN_VALUE}. 2394 * <li> 2395 * If the operation is {@code MIN}, 2396 * then the identity value is {@code Integer.MAX_VALUE}. 2397 * </ul> 2398 * 2399 * @param op the operation used to combine lane values 2400 * @param m the mask controlling lane selection 2401 * @return the reduced result accumulated from the selected lane values 2402 * @throws UnsupportedOperationException if this vector does 2403 * not support the requested operation 2404 * @see #reduceLanes(VectorOperators.Associative) 2405 */ 2406 public abstract int reduceLanes(VectorOperators.Associative op, 2407 VectorMask<Integer> m); 2408 2409 /*package-private*/ 2410 @ForceInline 2411 final 2412 int reduceLanesTemplate(VectorOperators.Associative op, 2413 VectorMask<Integer> m) { 2414 IntVector v = reduceIdentityVector(op).blend(this, m); 2415 return v.reduceLanesTemplate(op); 2416 } 2417 2418 /*package-private*/ 2419 @ForceInline 2420 final 2421 int reduceLanesTemplate(VectorOperators.Associative op) { 2422 if (op == FIRST_NONZERO) { 2423 // FIXME: The JIT should handle this, and other scan ops alos. 2424 VectorMask<Integer> thisNZ 2425 = this.viewAsIntegralLanes().compare(NE, (int) 0); 2426 return this.lane(thisNZ.firstTrue()); 2427 } 2428 int opc = opCode(op); 2429 return fromBits(VectorIntrinsics.reductionCoerced( 2430 opc, getClass(), int.class, length(), 2431 this, 2432 REDUCE_IMPL.find(op, opc, (opc_) -> { 2433 switch (opc_) { 2434 case VECTOR_OP_ADD: return v -> 2435 toBits(v.rOp((int)0, (i, a, b) -> (int)(a + b))); 2436 case VECTOR_OP_MUL: return v -> 2437 toBits(v.rOp((int)1, (i, a, b) -> (int)(a * b))); 2438 case VECTOR_OP_MIN: return v -> 2439 toBits(v.rOp(MAX_OR_INF, (i, a, b) -> (int) Math.min(a, b))); 2440 case VECTOR_OP_MAX: return v -> 2441 toBits(v.rOp(MIN_OR_INF, (i, a, b) -> (int) Math.max(a, b))); 2442 case VECTOR_OP_FIRST_NONZERO: return v -> 2443 toBits(v.rOp((int)0, (i, a, b) -> toBits(a) != 0 ? a : b)); 2444 case VECTOR_OP_AND: return v -> 2445 toBits(v.rOp((int)-1, (i, a, b) -> (int)(a & b))); 2446 case VECTOR_OP_OR: return v -> 2447 toBits(v.rOp((int)0, (i, a, b) -> (int)(a | b))); 2448 case VECTOR_OP_XOR: return v -> 2449 toBits(v.rOp((int)0, (i, a, b) -> (int)(a ^ b))); 2450 default: return null; 2451 }}))); 2452 } 2453 private static final 2454 ImplCache<Associative,Function<IntVector,Long>> REDUCE_IMPL 2455 = new ImplCache<>(Associative.class, IntVector.class); 2456 2457 private 2458 @ForceInline 2459 IntVector reduceIdentityVector(VectorOperators.Associative op) { 2460 int opc = opCode(op); 2461 UnaryOperator<IntVector> fn 2462 = REDUCE_ID_IMPL.find(op, opc, (opc_) -> { 2463 switch (opc_) { 2464 case VECTOR_OP_ADD: 2465 case VECTOR_OP_OR: 2466 case VECTOR_OP_XOR: 2467 case VECTOR_OP_FIRST_NONZERO: 2468 return v -> v.broadcast(0); 2469 case VECTOR_OP_MUL: 2470 return v -> v.broadcast(1); 2471 case VECTOR_OP_AND: 2472 return v -> v.broadcast(-1); 2473 case VECTOR_OP_MIN: 2474 return v -> v.broadcast(MAX_OR_INF); 2475 case VECTOR_OP_MAX: 2476 return v -> v.broadcast(MIN_OR_INF); 2477 default: return null; 2478 } 2479 }); 2480 return fn.apply(this); 2481 } 2482 private static final 2483 ImplCache<Associative,UnaryOperator<IntVector>> REDUCE_ID_IMPL 2484 = new ImplCache<>(Associative.class, IntVector.class); 2485 2486 private static final int MIN_OR_INF = Integer.MIN_VALUE; 2487 private static final int MAX_OR_INF = Integer.MAX_VALUE; 2488 2489 public @Override abstract long reduceLanesToLong(VectorOperators.Associative op); 2490 public @Override abstract long reduceLanesToLong(VectorOperators.Associative op, 2491 VectorMask<Integer> m); 2492 2493 // Type specific accessors 2494 2495 /** 2496 * Gets the lane element at lane index {@code i} 2497 * 2498 * @param i the lane index 2499 * @return the lane element at lane index {@code i} 2500 * @throws IllegalArgumentException if the index is is out of range 2501 * ({@code < 0 || >= length()}) 2502 */ 2503 public abstract int lane(int i); 2504 2505 /** 2506 * Replaces the lane element of this vector at lane index {@code i} with 2507 * value {@code e}. 2508 * 2509 * This is a cross-lane operation and behaves as if it returns the result 2510 * of blending this vector with an input vector that is the result of 2511 * broadcasting {@code e} and a mask that has only one lane set at lane 2512 * index {@code i}. 2513 * 2514 * @param i the lane index of the lane element to be replaced 2515 * @param e the value to be placed 2516 * @return the result of replacing the lane element of this vector at lane 2517 * index {@code i} with value {@code e}. 2518 * @throws IllegalArgumentException if the index is is out of range 2519 * ({@code < 0 || >= length()}) 2520 */ 2521 public abstract IntVector withLane(int i, int e); 2522 2523 // Memory load operations 2524 2525 /** 2526 * Returns an array of type {@code int[]} 2527 * containing all the lane values. 2528 * The array length is the same as the vector length. 2529 * The array elements are stored in lane order. 2530 * <p> 2531 * This method behaves as if it stores 2532 * this vector into an allocated array 2533 * (using {@link #intoArray(int[], int) intoArray}) 2534 * and returns the array as follows: 2535 * <pre>{@code 2536 * int[] a = new int[this.length()]; 2537 * this.intoArray(a, 0); 2538 * return a; 2539 * }</pre> 2540 * 2541 * @return an array containing the lane values of this vector 2542 */ 2543 @ForceInline 2544 @Override 2545 public final int[] toArray() { 2546 int[] a = new int[vspecies().laneCount()]; 2547 intoArray(a, 0); 2548 return a; 2549 } 2550 2551 /** {@inheritDoc} <!--workaround--> 2552 * @implNote 2553 * When this method is used on used on vectors 2554 * of type {@code IntVector}, 2555 * there will be no loss of precision or range. 2556 */ 2557 @ForceInline 2558 @Override 2559 public final long[] toLongArray() { 2560 int[] a = toArray(); 2561 long[] res = new long[a.length]; 2562 for (int i = 0; i < a.length; i++) { 2563 res[i] = (long) a[i]; 2564 } 2565 return res; 2566 } 2567 2568 /** {@inheritDoc} <!--workaround--> 2569 * @implNote 2570 * When this method is used on used on vectors 2571 * of type {@code IntVector}, 2572 * there will be no loss of precision. 2573 */ 2574 @ForceInline 2575 @Override 2576 public final double[] toDoubleArray() { 2577 int[] a = toArray(); 2578 double[] res = new double[a.length]; 2579 for (int i = 0; i < a.length; i++) { 2580 res[i] = (double) a[i]; 2581 } 2582 return res; 2583 } 2584 2585 /** 2586 * Loads a vector from a byte array starting at an offset. 2587 * Bytes are composed into primitive lane elements according 2588 * to {@linkplain ByteOrder#LITTLE_ENDIAN little endian} ordering. 2589 * The vector is arranged into lanes according to 2590 * <a href="Vector.html#lane-order">memory ordering</a>. 2591 * <p> 2592 * This method behaves as if it returns the result of calling 2593 * {@link #fromByteBuffer(VectorSpecies,ByteBuffer,int,ByteOrder,VectorMask) 2594 * fromByteBuffer()} as follows: 2595 * <pre>{@code 2596 * var bb = ByteBuffer.wrap(a); 2597 * var bo = ByteOrder.LITTLE_ENDIAN; 2598 * var m = species.maskAll(true); 2599 * return fromByteBuffer(species, bb, offset, m, bo); 2600 * }</pre> 2601 * 2602 * @param species species of desired vector 2603 * @param a the byte array 2604 * @param offset the offset into the array 2605 * @return a vector loaded from a byte array 2606 * @throws IndexOutOfBoundsException 2607 * if {@code offset+N*ESIZE < 0} 2608 * or {@code offset+(N+1)*ESIZE > a.length} 2609 * for any lane {@code N} in the vector 2610 */ 2611 @ForceInline 2612 public static 2613 IntVector fromByteArray(VectorSpecies<Integer> species, 2614 byte[] a, int offset) { 2615 return fromByteArray(species, a, offset, ByteOrder.LITTLE_ENDIAN); 2616 } 2617 2618 /** 2619 * Loads a vector from a byte array starting at an offset. 2620 * Bytes are composed into primitive lane elements according 2621 * to the specified byte order. 2622 * The vector is arranged into lanes according to 2623 * <a href="Vector.html#lane-order">memory ordering</a>. 2624 * <p> 2625 * This method behaves as if it returns the result of calling 2626 * {@link #fromByteBuffer(VectorSpecies,ByteBuffer,int,ByteOrder,VectorMask) 2627 * fromByteBuffer()} as follows: 2628 * <pre>{@code 2629 * var bb = ByteBuffer.wrap(a); 2630 * var m = species.maskAll(true); 2631 * return fromByteBuffer(species, bb, offset, m, bo); 2632 * }</pre> 2633 * 2634 * @param species species of desired vector 2635 * @param a the byte array 2636 * @param offset the offset into the array 2637 * @param bo the intended byte order 2638 * @return a vector loaded from a byte array 2639 * @throws IndexOutOfBoundsException 2640 * if {@code offset+N*ESIZE < 0} 2641 * or {@code offset+(N+1)*ESIZE > a.length} 2642 * for any lane {@code N} in the vector 2643 */ 2644 @ForceInline 2645 public static 2646 IntVector fromByteArray(VectorSpecies<Integer> species, 2647 byte[] a, int offset, 2648 ByteOrder bo) { 2649 IntSpecies vsp = (IntSpecies) species; 2650 offset = checkFromIndexSize(offset, 2651 vsp.vectorBitSize() / Byte.SIZE, 2652 a.length); 2653 return vsp.dummyVector() 2654 .fromByteArray0(a, offset).maybeSwap(bo); 2655 } 2656 2657 /** 2658 * Loads a vector from a byte array starting at an offset 2659 * and using a mask. 2660 * Lanes where the mask is unset are filled with the default 2661 * value of {@code int} (zero). 2662 * Bytes are composed into primitive lane elements according 2663 * to {@linkplain ByteOrder#LITTLE_ENDIAN little endian} ordering. 2664 * The vector is arranged into lanes according to 2665 * <a href="Vector.html#lane-order">memory ordering</a>. 2666 * <p> 2667 * This method behaves as if it returns the result of calling 2668 * {@link #fromByteBuffer(VectorSpecies,ByteBuffer,int,ByteOrder,VectorMask) 2669 * fromByteBuffer()} as follows: 2670 * <pre>{@code 2671 * var bb = ByteBuffer.wrap(a); 2672 * var bo = ByteOrder.LITTLE_ENDIAN; 2673 * return fromByteBuffer(species, bb, offset, bo, m); 2674 * }</pre> 2675 * 2676 * @param species species of desired vector 2677 * @param a the byte array 2678 * @param offset the offset into the array 2679 * @param m the mask controlling lane selection 2680 * @return a vector loaded from a byte array 2681 * @throws IndexOutOfBoundsException 2682 * if {@code offset+N*ESIZE < 0} 2683 * or {@code offset+(N+1)*ESIZE > a.length} 2684 * for any lane {@code N} in the vector 2685 */ 2686 @ForceInline 2687 public static 2688 IntVector fromByteArray(VectorSpecies<Integer> species, 2689 byte[] a, int offset, 2690 VectorMask<Integer> m) { 2691 return fromByteArray(species, a, offset, ByteOrder.LITTLE_ENDIAN, m); 2692 } 2693 2694 /** 2695 * Loads a vector from a byte array starting at an offset 2696 * and using a mask. 2697 * Lanes where the mask is unset are filled with the default 2698 * value of {@code int} (zero). 2699 * Bytes are composed into primitive lane elements according 2700 * to {@linkplain ByteOrder#LITTLE_ENDIAN little endian} ordering. 2701 * The vector is arranged into lanes according to 2702 * <a href="Vector.html#lane-order">memory ordering</a>. 2703 * <p> 2704 * This method behaves as if it returns the result of calling 2705 * {@link #fromByteBuffer(VectorSpecies,ByteBuffer,int,ByteOrder,VectorMask) 2706 * fromByteBuffer()} as follows: 2707 * <pre>{@code 2708 * var bb = ByteBuffer.wrap(a); 2709 * return fromByteBuffer(species, bb, offset, m, bo); 2710 * }</pre> 2711 * 2712 * @param species species of desired vector 2713 * @param a the byte array 2714 * @param offset the offset into the array 2715 * @param bo the intended byte order 2716 * @param m the mask controlling lane selection 2717 * @return a vector loaded from a byte array 2718 * @throws IndexOutOfBoundsException 2719 * if {@code offset+N*ESIZE < 0} 2720 * or {@code offset+(N+1)*ESIZE > a.length} 2721 * for any lane {@code N} in the vector 2722 * where the mask is set 2723 */ 2724 @ForceInline 2725 public static 2726 IntVector fromByteArray(VectorSpecies<Integer> species, 2727 byte[] a, int offset, 2728 ByteOrder bo, 2729 VectorMask<Integer> m) { 2730 IntSpecies vsp = (IntSpecies) species; 2731 IntVector zero = vsp.zero(); 2732 IntVector iota = zero.addIndex(1); 2733 ((AbstractMask<Integer>)m) 2734 .checkIndexByLane(offset, a.length, iota, 4); 2735 IntVector v = zero.fromByteArray0(a, offset); 2736 return zero.blend(v.maybeSwap(bo), m); 2737 } 2738 2739 /** 2740 * Loads a vector from an array of type {@code int[]} 2741 * starting at an offset. 2742 * For each vector lane, where {@code N} is the vector lane index, the 2743 * array element at index {@code offset + N} is placed into the 2744 * resulting vector at lane index {@code N}. 2745 * 2746 * @param species species of desired vector 2747 * @param a the array 2748 * @param offset the offset into the array 2749 * @return the vector loaded from an array 2750 * @throws IndexOutOfBoundsException 2751 * if {@code offset+N < 0} or {@code offset+N >= a.length} 2752 * for any lane {@code N} in the vector 2753 */ 2754 @ForceInline 2755 public static 2756 IntVector fromArray(VectorSpecies<Integer> species, 2757 int[] a, int offset) { 2758 IntSpecies vsp = (IntSpecies) species; 2759 offset = checkFromIndexSize(offset, 2760 vsp.laneCount(), 2761 a.length); 2762 return vsp.dummyVector().fromArray0(a, offset); 2763 } 2764 2765 /** 2766 * Loads a vector from an array of type {@code int[]} 2767 * starting at an offset and using a mask. 2768 * Lanes where the mask is unset are filled with the default 2769 * value of {@code int} (zero). 2770 * For each vector lane, where {@code N} is the vector lane index, 2771 * if the mask lane at index {@code N} is set then the array element at 2772 * index {@code offset + N} is placed into the resulting vector at lane index 2773 * {@code N}, otherwise the default element value is placed into the 2774 * resulting vector at lane index {@code N}. 2775 * 2776 * @param species species of desired vector 2777 * @param a the array 2778 * @param offset the offset into the array 2779 * @param m the mask controlling lane selection 2780 * @return the vector loaded from an array 2781 * @throws IndexOutOfBoundsException 2782 * if {@code offset+N < 0} or {@code offset+N >= a.length} 2783 * for any lane {@code N} in the vector 2784 * where the mask is set 2785 */ 2786 @ForceInline 2787 public static 2788 IntVector fromArray(VectorSpecies<Integer> species, 2789 int[] a, int offset, 2790 VectorMask<Integer> m) { 2791 IntSpecies vsp = (IntSpecies) species; 2792 IntVector zero = vsp.zero(); 2793 IntVector iota = vsp.iota(); 2794 ((AbstractMask<Integer>)m) 2795 .checkIndexByLane(offset, a.length, iota, 1); 2796 return zero.blend(zero.fromArray0(a, offset), m); 2797 } 2798 2799 /** 2800 * FIXME: EDIT THIS 2801 * Loads a vector from an array using indexes obtained from an index 2802 * map. 2803 * <p> 2804 * For each vector lane, where {@code N} is the vector lane index, the 2805 * array element at index {@code offset + indexMap[mapOffset + N]} is placed into the 2806 * resulting vector at lane index {@code N}. 2807 * 2808 * @param species species of desired vector 2809 * @param a the array 2810 * @param offset the offset into the array, may be negative if relative 2811 * indexes in the index map compensate to produce a value within the 2812 * array bounds 2813 * @param indexMap the index map 2814 * @param mapOffset the offset into the index map 2815 * @return the vector loaded from an array 2816 * @throws IndexOutOfBoundsException if {@code mapOffset < 0}, or 2817 * {@code mapOffset > indexMap.length - species.length()}, 2818 * or for any vector lane index {@code N} the result of 2819 * {@code offset + indexMap[mapOffset + N]} is {@code < 0} or {@code >= a.length} 2820 */ 2821 @ForceInline 2822 public static 2823 IntVector fromArray(VectorSpecies<Integer> species, 2824 int[] a, int offset, 2825 int[] indexMap, int mapOffset) { 2826 IntSpecies vsp = (IntSpecies) species; 2827 Objects.requireNonNull(a); 2828 Objects.requireNonNull(indexMap); 2829 Class<? extends IntVector> vectorType = vsp.vectorType(); 2830 2831 2832 // Index vector: vix[0:n] = k -> offset + indexMap[mapOffset + k] 2833 IntVector vix = IntVector.fromArray(IntVector.species(vsp.indexShape()), indexMap, mapOffset).add(offset); 2834 2835 vix = VectorIntrinsics.checkIndex(vix, a.length); 2836 2837 return VectorIntrinsics.loadWithMap( 2838 vectorType, int.class, vsp.laneCount(), 2839 IntVector.species(vsp.indexShape()).vectorType(), 2840 a, ARRAY_BASE, vix, 2841 a, offset, indexMap, mapOffset, vsp, 2842 (int[] c, int idx, int[] iMap, int idy, IntSpecies s) -> 2843 s.vOp(n -> c[idx + iMap[idy+n]])); 2844 } 2845 2846 /** 2847 * Loads a vector from an array using indexes obtained from an index 2848 * map and using a mask. 2849 * FIXME: EDIT THIS 2850 * <p> 2851 * For each vector lane, where {@code N} is the vector lane index, 2852 * if the mask lane at index {@code N} is set then the array element at 2853 * index {@code offset + indexMap[mapOffset + N]} is placed into the resulting vector 2854 * at lane index {@code N}. 2855 * 2856 * @param species species of desired vector 2857 * @param a the array 2858 * @param offset the offset into the array, may be negative if relative 2859 * indexes in the index map compensate to produce a value within the 2860 * array bounds 2861 * @param indexMap the index map 2862 * @param mapOffset the offset into the index map 2863 * @param m the mask controlling lane selection 2864 * @return the vector loaded from an array 2865 * @throws IndexOutOfBoundsException if {@code mapOffset < 0}, or 2866 * {@code mapOffset > indexMap.length - species.length()}, 2867 * or for any vector lane index {@code N} where the mask at lane 2868 * {@code N} is set the result of {@code offset + indexMap[mapOffset + N]} is 2869 * {@code < 0} or {@code >= a.length} 2870 */ 2871 public static 2872 IntVector fromArray(VectorSpecies<Integer> species, 2873 int[] a, int offset, 2874 int[] indexMap, int mapOffset, 2875 VectorMask<Integer> m) { 2876 IntSpecies vsp = (IntSpecies) species; 2877 2878 // FIXME This can result in out of bounds errors for unset mask lanes 2879 // FIX = Use a scatter instruction which routes the unwanted lanes 2880 // into a bit-bucket variable (private to implementation). 2881 // This requires a 2-D scatter in order to set a second base address. 2882 // See notes in https://bugs.openjdk.java.net/browse/JDK-8223367 2883 assert(m.allTrue()); 2884 return (IntVector) 2885 zero(species).blend(fromArray(species, a, offset, indexMap, mapOffset), m); 2886 2887 } 2888 2889 /** 2890 * Loads a vector from a {@linkplain ByteBuffer byte buffer} 2891 * starting at an offset into the byte buffer. 2892 * <p> 2893 * Bytes are composed into primitive lane elements according to 2894 * {@link ByteOrder#LITTLE_ENDIAN little endian} byte order. 2895 * To avoid errors, the 2896 * {@linkplain ByteBuffer#order() intrinsic byte order} 2897 * of the buffer must be little-endian. 2898 * <p> 2899 * This method behaves as if it returns the result of calling 2900 * {@link #fromByteBuffer(VectorSpecies,ByteBuffer,int,ByteOrder,VectorMask) 2901 * fromByteBuffer()} as follows: 2902 * <pre>{@code 2903 * var bb = ByteBuffer.wrap(a); 2904 * var bo = ByteOrder.LITTLE_ENDIAN; 2905 * var m = species.maskAll(true); 2906 * return fromByteBuffer(species, bb, offset, m, bo); 2907 * }</pre> 2908 * 2909 * @param species species of desired vector 2910 * @param bb the byte buffer 2911 * @param offset the offset into the byte buffer 2912 * @return a vector loaded from a byte buffer 2913 * @throws IllegalArgumentException if byte order of bb 2914 * is not {@link ByteOrder#LITTLE_ENDIAN} 2915 * @throws IndexOutOfBoundsException 2916 * if {@code offset+N*4 < 0} 2917 * or {@code offset+N**4 >= bb.limit()} 2918 * for any lane {@code N} in the vector 2919 */ 2920 @ForceInline 2921 public static 2922 IntVector fromByteBuffer(VectorSpecies<Integer> species, 2923 ByteBuffer bb, int offset, 2924 ByteOrder bo) { 2925 IntSpecies vsp = (IntSpecies) species; 2926 offset = checkFromIndexSize(offset, 2927 vsp.laneCount(), 2928 bb.limit()); 2929 return vsp.dummyVector() 2930 .fromByteBuffer0(bb, offset).maybeSwap(bo); 2931 } 2932 2933 /** 2934 * Loads a vector from a {@linkplain ByteBuffer byte buffer} 2935 * starting at an offset into the byte buffer 2936 * and using a mask. 2937 * <p> 2938 * Bytes are composed into primitive lane elements according to 2939 * {@link ByteOrder#LITTLE_ENDIAN little endian} byte order. 2940 * To avoid errors, the 2941 * {@linkplain ByteBuffer#order() intrinsic byte order} 2942 * of the buffer must be little-endian. 2943 * <p> 2944 * This method behaves as if it returns the result of calling 2945 * {@link #fromByteBuffer(VectorSpecies,ByteBuffer,int,ByteOrder,VectorMask) 2946 * fromByteBuffer()} as follows: 2947 * <pre>{@code 2948 * var bb = ByteBuffer.wrap(a); 2949 * var bo = ByteOrder.LITTLE_ENDIAN; 2950 * var m = species.maskAll(true); 2951 * return fromByteBuffer(species, bb, offset, m, bo); 2952 * }</pre> 2953 * 2954 * @param species species of desired vector 2955 * @param bb the byte buffer 2956 * @param offset the offset into the byte buffer 2957 * @param m the mask controlling lane selection 2958 * @return a vector loaded from a byte buffer 2959 * @throws IllegalArgumentException if byte order of bb 2960 * is not {@link ByteOrder#LITTLE_ENDIAN} 2961 * @throws IndexOutOfBoundsException 2962 * if {@code offset+N*4 < 0} 2963 * or {@code offset+N**4 >= bb.limit()} 2964 * for any lane {@code N} in the vector 2965 * where the mask is set 2966 */ 2967 @ForceInline 2968 public static 2969 IntVector fromByteBuffer(VectorSpecies<Integer> species, 2970 ByteBuffer bb, int offset, 2971 ByteOrder bo, 2972 VectorMask<Integer> m) { 2973 if (m.allTrue()) { 2974 return fromByteBuffer(species, bb, offset, bo); 2975 } 2976 IntSpecies vsp = (IntSpecies) species; 2977 checkMaskFromIndexSize(offset, 2978 vsp, m, 1, 2979 bb.limit()); 2980 IntVector zero = zero(vsp); 2981 IntVector v = zero.fromByteBuffer0(bb, offset); 2982 return zero.blend(v.maybeSwap(bo), m); 2983 } 2984 2985 // Memory store operations 2986 2987 /** 2988 * Stores this vector into an array of type {@code int[]} 2989 * starting at an offset. 2990 * <p> 2991 * For each vector lane, where {@code N} is the vector lane index, 2992 * the lane element at index {@code N} is stored into the array 2993 * element {@code a[offset+N]}. 2994 * 2995 * @param a the array, of type {@code int[]} 2996 * @param offset the offset into the array 2997 * @throws IndexOutOfBoundsException 2998 * if {@code offset+N < 0} or {@code offset+N >= a.length} 2999 * for any lane {@code N} in the vector 3000 */ 3001 @ForceInline 3002 public final 3003 void intoArray(int[] a, int offset) { 3004 IntSpecies vsp = vspecies(); 3005 offset = checkFromIndexSize(offset, 3006 vsp.laneCount(), 3007 a.length); 3008 VectorIntrinsics.store( 3009 vsp.vectorType(), vsp.elementType(), vsp.laneCount(), 3010 a, arrayAddress(a, offset), 3011 this, 3012 a, offset, 3013 (arr, off, v) 3014 -> v.stOp(arr, off, 3015 (arr_, off_, i, e) -> arr_[off_ + i] = e)); 3016 } 3017 3018 /** 3019 * Stores this vector into an array of {@code int} 3020 * starting at offset and using a mask. 3021 * <p> 3022 * For each vector lane, where {@code N} is the vector lane index, 3023 * the lane element at index {@code N} is stored into the array 3024 * element {@code a[offset+N]}. 3025 * If the mask lane at {@code N} is unset then the corresponding 3026 * array element {@code a[offset+N]} is left unchanged. 3027 * <p> 3028 * Array range checking is done for lanes where the mask is set. 3029 * Lanes where the mask is unset are not stored and do not need 3030 * to correspond to legitimate elements of {@code a}. 3031 * That is, unset lanes may correspond to array indexes less than 3032 * zero or beyond the end of the array. 3033 * 3034 * @param a the array, of type {@code int[]} 3035 * @param offset the offset into the array 3036 * @param m the mask controlling lane storage 3037 * @throws IndexOutOfBoundsException 3038 * if {@code offset+N < 0} or {@code offset+N >= a.length} 3039 * for any lane {@code N} in the vector 3040 * where the mask is set 3041 */ 3042 @ForceInline 3043 public final 3044 void intoArray(int[] a, int offset, 3045 VectorMask<Integer> m) { 3046 if (m.allTrue()) { 3047 intoArray(a, offset); 3048 } else { 3049 // FIXME: Cannot vectorize yet, if there's a mask. 3050 stOp(a, offset, m, (arr, off, i, v) -> arr[off+i] = v); 3051 } 3052 } 3053 3054 /** 3055 * Stores this vector into an array of type {@code int[]} 3056 * using indexes obtained from an index map 3057 * and using a mask. 3058 * <p> 3059 * For each vector lane, where {@code N} is the vector lane index, 3060 * if the mask lane at index {@code N} is set then 3061 * the lane element at index {@code N} is stored into the array 3062 * element {@code a[f(N)]}, where {@code f(N)} is the 3063 * index mapping expression 3064 * {@code offset + indexMap[mapOffset + N]]}. 3065 * 3066 * @param a the array 3067 * @param offset an offset to combine with the index map offsets 3068 * @param indexMap the index map 3069 * @param mapOffset the offset into the index map 3070 * @param m the mask 3071 * @returns a vector of the values {@code m ? a[f(N)] : 0}, 3072 * {@code f(N) = offset + indexMap[mapOffset + N]]}. 3073 * @throws IndexOutOfBoundsException 3074 * if {@code mapOffset+N < 0} 3075 * or if {@code mapOffset+N >= indexMap.length}, 3076 * or if {@code f(N)=offset+indexMap[mapOffset+N]} 3077 * is an invalid index into {@code a}, 3078 * for any lane {@code N} in the vector 3079 * where the mask is set 3080 */ 3081 @ForceInline 3082 public final 3083 void intoArray(int[] a, int offset, 3084 int[] indexMap, int mapOffset) { 3085 IntSpecies vsp = vspecies(); 3086 if (length() == 1) { 3087 intoArray(a, offset + indexMap[mapOffset]); 3088 return; 3089 } 3090 IntVector.IntSpecies isp = (IntVector.IntSpecies) vsp.indexSpecies(); 3091 if (isp.laneCount() != vsp.laneCount()) { 3092 stOp(a, offset, 3093 (arr, off, i, e) -> { 3094 int j = indexMap[mapOffset + i]; 3095 arr[off + j] = e; 3096 }); 3097 return; 3098 } 3099 3100 // Index vector: vix[0:n] = i -> offset + indexMap[mo + i] 3101 IntVector vix = IntVector 3102 .fromArray(isp, indexMap, mapOffset) 3103 .add(offset); 3104 3105 vix = VectorIntrinsics.checkIndex(vix, a.length); 3106 3107 VectorIntrinsics.storeWithMap( 3108 vsp.vectorType(), vsp.elementType(), vsp.laneCount(), 3109 isp.vectorType(), 3110 a, arrayAddress(a, 0), vix, 3111 this, 3112 a, offset, indexMap, mapOffset, 3113 (arr, off, v, map, mo) 3114 -> v.stOp(arr, off, 3115 (arr_, off_, i, e) -> { 3116 int j = map[mo + i]; 3117 arr[off + j] = e; 3118 })); 3119 } 3120 3121 /** 3122 * Stores this vector into an array of type {@code int[]} 3123 * using indexes obtained from an index map 3124 * and using a mask. 3125 * <p> 3126 * For each vector lane, where {@code N} is the vector lane index, 3127 * if the mask lane at index {@code N} is set then 3128 * the lane element at index {@code N} is stored into the array 3129 * element {@code a[f(N)]}, where {@code f(N)} is the 3130 * index mapping expression 3131 * {@code offset + indexMap[mapOffset + N]]}. 3132 * 3133 * @param a the array 3134 * @param offset an offset to combine with the index map offsets 3135 * @param indexMap the index map 3136 * @param mapOffset the offset into the index map 3137 * @param m the mask 3138 * @returns a vector of the values {@code m ? a[f(N)] : 0}, 3139 * {@code f(N) = offset + indexMap[mapOffset + N]]}. 3140 * @throws IndexOutOfBoundsException 3141 * if {@code mapOffset+N < 0} 3142 * or if {@code mapOffset+N >= indexMap.length}, 3143 * or if {@code f(N)=offset+indexMap[mapOffset+N]} 3144 * is an invalid index into {@code a}, 3145 * for any lane {@code N} in the vector 3146 * where the mask is set 3147 */ 3148 @ForceInline 3149 public final 3150 void intoArray(int[] a, int offset, 3151 int[] indexMap, int mapOffset, 3152 VectorMask<Integer> m) { 3153 IntSpecies vsp = vspecies(); 3154 if (m.allTrue()) { 3155 intoArray(a, offset, indexMap, mapOffset); 3156 return; 3157 } 3158 throw new AssertionError("fixme"); 3159 } 3160 3161 /** 3162 * {@inheritDoc} <!--workaround--> 3163 */ 3164 @Override 3165 @ForceInline 3166 public final 3167 void intoByteArray(byte[] a, int offset) { 3168 offset = checkFromIndexSize(offset, 3169 bitSize() / Byte.SIZE, 3170 a.length); 3171 this.maybeSwap(ByteOrder.LITTLE_ENDIAN) 3172 .intoByteArray0(a, offset); 3173 } 3174 3175 /** 3176 * {@inheritDoc} <!--workaround--> 3177 */ 3178 @Override 3179 @ForceInline 3180 public final 3181 void intoByteArray(byte[] a, int offset, 3182 VectorMask<Integer> m) { 3183 if (m.allTrue()) { 3184 intoByteArray(a, offset); 3185 return; 3186 } 3187 IntSpecies vsp = vspecies(); 3188 checkMaskFromIndexSize(offset, vsp, m, 4, a.length); 3189 conditionalStoreNYI(offset, vsp, m, 4, a.length); 3190 var oldVal = fromByteArray0(a, offset); 3191 var newVal = oldVal.blend(this, m); 3192 newVal.intoByteArray0(a, offset); 3193 } 3194 3195 /** 3196 * {@inheritDoc} <!--workaround--> 3197 */ 3198 @Override 3199 @ForceInline 3200 public final 3201 void intoByteArray(byte[] a, int offset, 3202 ByteOrder bo, 3203 VectorMask<Integer> m) { 3204 maybeSwap(bo).intoByteArray(a, offset, m); 3205 } 3206 3207 /** 3208 * {@inheritDoc} <!--workaround--> 3209 */ 3210 @Override 3211 @ForceInline 3212 public final 3213 void intoByteBuffer(ByteBuffer bb, int offset, 3214 ByteOrder bo) { 3215 maybeSwap(bo).intoByteBuffer0(bb, offset); 3216 } 3217 3218 /** 3219 * {@inheritDoc} <!--workaround--> 3220 */ 3221 @Override 3222 @ForceInline 3223 public final 3224 void intoByteBuffer(ByteBuffer bb, int offset, 3225 ByteOrder bo, 3226 VectorMask<Integer> m) { 3227 if (m.allTrue()) { 3228 intoByteBuffer(bb, offset, bo); 3229 return; 3230 } 3231 IntSpecies vsp = vspecies(); 3232 checkMaskFromIndexSize(offset, vsp, m, 4, bb.limit()); 3233 conditionalStoreNYI(offset, vsp, m, 4, bb.limit()); 3234 var oldVal = fromByteBuffer0(bb, offset); 3235 var newVal = oldVal.blend(this.maybeSwap(bo), m); 3236 newVal.intoByteBuffer0(bb, offset); 3237 } 3238 3239 // ================================================ 3240 3241 // Low-level memory operations. 3242 // 3243 // Note that all of these operations *must* inline into a context 3244 // where the exact species of the involved vector is a 3245 // compile-time constant. Otherwise, the intrinsic generation 3246 // will fail and performance will suffer. 3247 // 3248 // In many cases this is achieved by re-deriving a version of the 3249 // method in each concrete subclass (per species). The re-derived 3250 // method simply calls one of these generic methods, with exact 3251 // parameters for the controlling metadata, which is either a 3252 // typed vector or constant species instance. 3253 3254 // Unchecked loading operations in native byte order. 3255 // Caller is reponsible for applying index checks, masking, and 3256 // byte swapping. 3257 3258 /*package-private*/ 3259 abstract 3260 IntVector fromArray0(int[] a, int offset); 3261 @ForceInline 3262 final 3263 IntVector fromArray0Template(int[] a, int offset) { 3264 IntSpecies vsp = vspecies(); 3265 return VectorIntrinsics.load( 3266 vsp.vectorType(), vsp.elementType(), vsp.laneCount(), 3267 a, arrayAddress(a, offset), 3268 a, offset, vsp, 3269 (arr, off, s) -> s.ldOp(arr, off, 3270 (arr_, off_, i) -> arr_[off_ + i])); 3271 } 3272 3273 @Override 3274 abstract 3275 IntVector fromByteArray0(byte[] a, int offset); 3276 @ForceInline 3277 final 3278 IntVector fromByteArray0Template(byte[] a, int offset) { 3279 IntSpecies vsp = vspecies(); 3280 return VectorIntrinsics.load( 3281 vsp.vectorType(), vsp.elementType(), vsp.laneCount(), 3282 a, byteArrayAddress(a, offset), 3283 a, offset, vsp, 3284 (arr, off, s) -> { 3285 IntBuffer tb = wrapper(arr, off, NATIVE_ENDIAN); 3286 return s.ldOp(tb, 0, (tb_, __, i) -> tb_.get(i)); 3287 }); 3288 } 3289 3290 abstract 3291 IntVector fromByteBuffer0(ByteBuffer bb, int offset); 3292 @ForceInline 3293 final 3294 IntVector fromByteBuffer0Template(ByteBuffer bb, int offset) { 3295 IntSpecies vsp = vspecies(); 3296 return VectorIntrinsics.load( 3297 vsp.vectorType(), vsp.elementType(), vsp.laneCount(), 3298 bufferBase(bb), bufferAddress(bb, offset), 3299 bb, offset, vsp, 3300 (buf, off, s) -> { 3301 IntBuffer tb = wrapper(buf, off, NATIVE_ENDIAN); 3302 return s.ldOp(tb, 0, (tb_, __, i) -> tb_.get(i)); 3303 }); 3304 } 3305 3306 // Unchecked storing operations in native byte order. 3307 // Caller is reponsible for applying index checks, masking, and 3308 // byte swapping. 3309 3310 abstract 3311 void intoArray0(int[] a, int offset); 3312 @ForceInline 3313 final 3314 void intoArray0Template(int[] a, int offset) { 3315 IntSpecies vsp = vspecies(); 3316 VectorIntrinsics.store( 3317 vsp.vectorType(), vsp.elementType(), vsp.laneCount(), 3318 a, arrayAddress(a, offset), 3319 this, a, offset, 3320 (arr, off, v) 3321 -> v.stOp(arr, off, 3322 (arr_, off_, i, e) -> arr_[off_+i] = e)); 3323 } 3324 3325 abstract 3326 void intoByteArray0(byte[] a, int offset); 3327 @ForceInline 3328 final 3329 void intoByteArray0Template(byte[] a, int offset) { 3330 IntSpecies vsp = vspecies(); 3331 VectorIntrinsics.store( 3332 vsp.vectorType(), vsp.elementType(), vsp.laneCount(), 3333 a, byteArrayAddress(a, offset), 3334 this, a, offset, 3335 (arr, off, v) -> { 3336 IntBuffer tb = wrapper(arr, off, NATIVE_ENDIAN); 3337 v.stOp(tb, 0, (tb_, __, i, e) -> tb_.put(i, e)); 3338 }); 3339 } 3340 3341 @ForceInline 3342 final 3343 void intoByteBuffer0(ByteBuffer bb, int offset) { 3344 IntSpecies vsp = vspecies(); 3345 VectorIntrinsics.store( 3346 vsp.vectorType(), vsp.elementType(), vsp.laneCount(), 3347 bufferBase(bb), bufferAddress(bb, offset), 3348 this, bb, offset, 3349 (buf, off, v) -> { 3350 IntBuffer tb = wrapper(buf, off, NATIVE_ENDIAN); 3351 v.stOp(tb, 0, (tb_, __, i, e) -> tb_.put(i, e)); 3352 }); 3353 } 3354 3355 // End of low-level memory operations. 3356 3357 private static 3358 void checkMaskFromIndexSize(int offset, 3359 IntSpecies vsp, 3360 VectorMask<Integer> m, 3361 int scale, 3362 int limit) { 3363 ((AbstractMask<Integer>)m) 3364 .checkIndexByLane(offset, limit, vsp.iota(), scale); 3365 } 3366 3367 @ForceInline 3368 private void conditionalStoreNYI(int offset, 3369 IntSpecies vsp, 3370 VectorMask<Integer> m, 3371 int scale, 3372 int limit) { 3373 if (offset < 0 || offset + vsp.laneCount() * scale > limit) { 3374 String msg = 3375 String.format("unimplemented: store @%d in [0..%d), %s in %s", 3376 offset, limit, m, vsp); 3377 throw new AssertionError(msg); 3378 } 3379 } 3380 3381 /*package-private*/ 3382 @Override 3383 @ForceInline 3384 final 3385 IntVector maybeSwap(ByteOrder bo) { 3386 if (bo != NATIVE_ENDIAN) { 3387 return this.reinterpretAsBytes() 3388 .rearrange(swapBytesShuffle()) 3389 .reinterpretAsInts(); 3390 } 3391 return this; 3392 } 3393 3394 static final int ARRAY_SHIFT = 3395 31 - Integer.numberOfLeadingZeros(Unsafe.ARRAY_INT_INDEX_SCALE); 3396 static final long ARRAY_BASE = 3397 Unsafe.ARRAY_INT_BASE_OFFSET; 3398 3399 @ForceInline 3400 static long arrayAddress(int[] a, int index) { 3401 return ARRAY_BASE + (((long)index) << ARRAY_SHIFT); 3402 } 3403 3404 @ForceInline 3405 static long byteArrayAddress(byte[] a, int index) { 3406 return Unsafe.ARRAY_BYTE_BASE_OFFSET + index; 3407 } 3408 3409 // Byte buffer wrappers. 3410 private static IntBuffer wrapper(ByteBuffer bb, int offset, 3411 ByteOrder bo) { 3412 return bb.duplicate().position(offset).slice() 3413 .order(bo).asIntBuffer(); 3414 } 3415 private static IntBuffer wrapper(byte[] a, int offset, 3416 ByteOrder bo) { 3417 return ByteBuffer.wrap(a, offset, a.length - offset) 3418 .order(bo).asIntBuffer(); 3419 } 3420 3421 // ================================================ 3422 3423 /// Reinterpreting view methods: 3424 // lanewise reinterpret: viewAsXVector() 3425 // keep shape, redraw lanes: reinterpretAsEs() 3426 3427 /** 3428 * {@inheritDoc} <!--workaround--> 3429 */ 3430 @ForceInline 3431 @Override 3432 public final ByteVector reinterpretAsBytes() { 3433 // Going to ByteVector, pay close attention to byte order. 3434 assert(REGISTER_ENDIAN == ByteOrder.LITTLE_ENDIAN); 3435 return asByteVectorRaw(); 3436 //return asByteVectorRaw().rearrange(swapBytesShuffle()); 3437 } 3438 3439 /** 3440 * {@inheritDoc} <!--workaround--> 3441 */ 3442 @ForceInline 3443 @Override 3444 public final IntVector viewAsIntegralLanes() { 3445 return this; 3446 } 3447 3448 /** 3449 * {@inheritDoc} <!--workaround--> 3450 */ 3451 @ForceInline 3452 @Override 3453 public final 3454 FloatVector 3455 viewAsFloatingLanes() { 3456 LaneType flt = LaneType.INT.asFloating(); 3457 return (FloatVector) asVectorRaw(flt); 3458 } 3459 3460 // ================================================ 3461 3462 /// Object methods: toString, equals, hashCode 3463 // 3464 // Object methods are defined as if via Arrays.toString, etc., 3465 // is applied to the array of elements. Two equal vectors 3466 // are required to have equal species and equal lane values. 3467 3468 /** 3469 * Returns a string representation of this vector, of the form 3470 * {@code "[0,1,2...]"}, reporting the lane values of this vector, 3471 * in lane order. 3472 * 3473 * The string is produced as if by a call to {@link 3474 * java.util.Arrays#toString(int[]) Arrays.toString()}, 3475 * as appropriate to the {@code int} array returned by 3476 * {@link #toArray this.toArray()}. 3477 * 3478 * @return a string of the form {@code "[0,1,2...]"} 3479 * reporting the lane values of this vector 3480 */ 3481 @Override 3482 @ForceInline 3483 public final 3484 String toString() { 3485 // now that toArray is strongly typed, we can define this 3486 return Arrays.toString(toArray()); 3487 } 3488 3489 /** 3490 * {@inheritDoc} <!--workaround--> 3491 */ 3492 @Override 3493 @ForceInline 3494 public final 3495 boolean equals(Object obj) { 3496 if (obj instanceof Vector) { 3497 Vector<?> that = (Vector<?>) obj; 3498 if (this.species().equals(that.species())) { 3499 return this.eq(that.check(this.species())).allTrue(); 3500 } 3501 } 3502 return false; 3503 } 3504 3505 /** 3506 * {@inheritDoc} <!--workaround--> 3507 */ 3508 @Override 3509 @ForceInline 3510 public final 3511 int hashCode() { 3512 // now that toArray is strongly typed, we can define this 3513 return Objects.hash(species(), Arrays.hashCode(toArray())); 3514 } 3515 3516 // ================================================ 3517 3518 // Species 3519 3520 /** 3521 * Class representing {@link IntVector}'s of the same {@link VectorShape VectorShape}. 3522 */ 3523 /*package-private*/ 3524 static final class IntSpecies extends AbstractSpecies<Integer> { 3525 private IntSpecies(VectorShape shape, 3526 Class<? extends IntVector> vectorType, 3527 Class<? extends AbstractMask<Integer>> maskType, 3528 Function<Object, IntVector> vectorFactory) { 3529 super(shape, LaneType.of(int.class), 3530 vectorType, maskType, 3531 vectorFactory); 3532 assert(this.elementSize() == Integer.SIZE); 3533 } 3534 3535 // Specializing overrides: 3536 3537 @Override 3538 @ForceInline 3539 public final Class<Integer> elementType() { 3540 return int.class; 3541 } 3542 3543 @Override 3544 @ForceInline 3545 public final Class<Integer> genericElementType() { 3546 return Integer.class; 3547 } 3548 3549 @Override 3550 @ForceInline 3551 public final Class<int[]> arrayType() { 3552 return int[].class; 3553 } 3554 3555 @SuppressWarnings("unchecked") 3556 @Override 3557 @ForceInline 3558 public final Class<? extends IntVector> vectorType() { 3559 return (Class<? extends IntVector>) vectorType; 3560 } 3561 3562 @Override 3563 @ForceInline 3564 public final long checkValue(long e) { 3565 longToElementBits(e); // only for exception 3566 return e; 3567 } 3568 3569 /*package-private*/ 3570 @Override 3571 @ForceInline 3572 final IntVector broadcastBits(long bits) { 3573 return (IntVector) 3574 VectorIntrinsics.broadcastCoerced( 3575 vectorType, int.class, laneCount, 3576 bits, this, 3577 (bits_, s_) -> s_.rvOp(i -> bits_)); 3578 } 3579 3580 /*package-private*/ 3581 @ForceInline 3582 3583 final IntVector broadcast(int e) { 3584 return broadcastBits(toBits(e)); 3585 } 3586 3587 @Override 3588 @ForceInline 3589 public final IntVector broadcast(long e) { 3590 return broadcastBits(longToElementBits(e)); 3591 } 3592 3593 /*package-private*/ 3594 final @Override 3595 @ForceInline 3596 long longToElementBits(long value) { 3597 // Do the conversion, and then test it for failure. 3598 int e = (int) value; 3599 if ((long) e != value) { 3600 throw badElementBits(value, e); 3601 } 3602 return toBits(e); 3603 } 3604 3605 @Override 3606 @ForceInline 3607 public final IntVector fromValues(long... values) { 3608 VectorIntrinsics.requireLength(values.length, laneCount); 3609 int[] va = new int[laneCount()]; 3610 for (int i = 0; i < va.length; i++) { 3611 long lv = values[i]; 3612 int v = (int) lv; 3613 va[i] = v; 3614 if ((long)v != lv) { 3615 throw badElementBits(lv, v); 3616 } 3617 } 3618 return dummyVector().fromArray0(va, 0); 3619 } 3620 3621 /* this non-public one is for internal conversions */ 3622 @Override 3623 @ForceInline 3624 final IntVector fromIntValues(int[] values) { 3625 VectorIntrinsics.requireLength(values.length, laneCount); 3626 int[] va = new int[laneCount()]; 3627 for (int i = 0; i < va.length; i++) { 3628 int lv = values[i]; 3629 int v = (int) lv; 3630 va[i] = v; 3631 if ((int)v != lv) { 3632 throw badElementBits(lv, v); 3633 } 3634 } 3635 return dummyVector().fromArray0(va, 0); 3636 } 3637 3638 // Virtual constructors 3639 3640 @ForceInline 3641 @Override final 3642 public IntVector fromArray(Object a, int offset) { 3643 // User entry point: Be careful with inputs. 3644 return IntVector 3645 .fromArray(this, (int[]) a, offset); 3646 } 3647 3648 @Override final 3649 IntVector dummyVector() { 3650 return (IntVector) super.dummyVector(); 3651 } 3652 3653 final 3654 IntVector vectorFactory(int[] vec) { 3655 // Species delegates all factory requests to its dummy 3656 // vector. The dummy knows all about it. 3657 return dummyVector().vectorFactory(vec); 3658 } 3659 3660 /*package-private*/ 3661 final @Override 3662 @ForceInline 3663 IntVector rvOp(RVOp f) { 3664 int[] res = new int[laneCount()]; 3665 for (int i = 0; i < res.length; i++) { 3666 int bits = (int) f.apply(i); 3667 res[i] = fromBits(bits); 3668 } 3669 return dummyVector().vectorFactory(res); 3670 } 3671 3672 IntVector vOp(FVOp f) { 3673 int[] res = new int[laneCount()]; 3674 for (int i = 0; i < res.length; i++) { 3675 res[i] = f.apply(i); 3676 } 3677 return dummyVector().vectorFactory(res); 3678 } 3679 3680 IntVector vOp(VectorMask<Integer> m, FVOp f) { 3681 int[] res = new int[laneCount()]; 3682 boolean[] mbits = ((AbstractMask<Integer>)m).getBits(); 3683 for (int i = 0; i < res.length; i++) { 3684 if (mbits[i]) { 3685 res[i] = f.apply(i); 3686 } 3687 } 3688 return dummyVector().vectorFactory(res); 3689 } 3690 3691 /*package-private*/ 3692 @ForceInline 3693 <M> IntVector ldOp(M memory, int offset, 3694 FLdOp<M> f) { 3695 return dummyVector().ldOp(memory, offset, f); 3696 } 3697 3698 /*package-private*/ 3699 @ForceInline 3700 <M> IntVector ldOp(M memory, int offset, 3701 AbstractMask<Integer> m, 3702 FLdOp<M> f) { 3703 return dummyVector().ldOp(memory, offset, m, f); 3704 } 3705 3706 /*package-private*/ 3707 @ForceInline 3708 <M> void stOp(M memory, int offset, FStOp<M> f) { 3709 dummyVector().stOp(memory, offset, f); 3710 } 3711 3712 /*package-private*/ 3713 @ForceInline 3714 <M> void stOp(M memory, int offset, 3715 AbstractMask<Integer> m, 3716 FStOp<M> f) { 3717 dummyVector().stOp(memory, offset, m, f); 3718 } 3719 3720 // N.B. Make sure these constant vectors and 3721 // masks load up correctly into registers. 3722 // 3723 // Also, see if we can avoid all that switching. 3724 // Could we cache both vectors and both masks in 3725 // this species object? 3726 3727 // Zero and iota vector access 3728 @Override 3729 @ForceInline 3730 public final IntVector zero() { 3731 if ((Class<?>) vectorType() == IntMaxVector.class) 3732 return IntMaxVector.ZERO; 3733 switch (vectorBitSize()) { 3734 case 64: return Int64Vector.ZERO; 3735 case 128: return Int128Vector.ZERO; 3736 case 256: return Int256Vector.ZERO; 3737 case 512: return Int512Vector.ZERO; 3738 } 3739 throw new AssertionError(); 3740 } 3741 3742 @Override 3743 @ForceInline 3744 public final IntVector iota() { 3745 if ((Class<?>) vectorType() == IntMaxVector.class) 3746 return IntMaxVector.IOTA; 3747 switch (vectorBitSize()) { 3748 case 64: return Int64Vector.IOTA; 3749 case 128: return Int128Vector.IOTA; 3750 case 256: return Int256Vector.IOTA; 3751 case 512: return Int512Vector.IOTA; 3752 } 3753 throw new AssertionError(); 3754 } 3755 3756 // Mask access 3757 @Override 3758 @ForceInline 3759 public final VectorMask<Integer> maskAll(boolean bit) { 3760 if ((Class<?>) vectorType() == IntMaxVector.class) 3761 return IntMaxVector.IntMaxMask.maskAll(bit); 3762 switch (vectorBitSize()) { 3763 case 64: return Int64Vector.Int64Mask.maskAll(bit); 3764 case 128: return Int128Vector.Int128Mask.maskAll(bit); 3765 case 256: return Int256Vector.Int256Mask.maskAll(bit); 3766 case 512: return Int512Vector.Int512Mask.maskAll(bit); 3767 } 3768 throw new AssertionError(); 3769 } 3770 } 3771 3772 /** 3773 * Finds a species for an element type of {@code int} and shape. 3774 * 3775 * @param s the shape 3776 * @return a species for an element type of {@code int} and shape 3777 * @throws IllegalArgumentException if no such species exists for the shape 3778 */ 3779 static IntSpecies species(VectorShape s) { 3780 Objects.requireNonNull(s); 3781 switch (s) { 3782 case S_64_BIT: return (IntSpecies) SPECIES_64; 3783 case S_128_BIT: return (IntSpecies) SPECIES_128; 3784 case S_256_BIT: return (IntSpecies) SPECIES_256; 3785 case S_512_BIT: return (IntSpecies) SPECIES_512; 3786 case S_Max_BIT: return (IntSpecies) SPECIES_MAX; 3787 default: throw new IllegalArgumentException("Bad shape: " + s); 3788 } 3789 } 3790 3791 /** Species representing {@link IntVector}s of {@link VectorShape#S_64_BIT VectorShape.S_64_BIT}. */ 3792 public static final VectorSpecies<Integer> SPECIES_64 3793 = new IntSpecies(VectorShape.S_64_BIT, 3794 Int64Vector.class, 3795 Int64Vector.Int64Mask.class, 3796 Int64Vector::new); 3797 3798 /** Species representing {@link IntVector}s of {@link VectorShape#S_128_BIT VectorShape.S_128_BIT}. */ 3799 public static final VectorSpecies<Integer> SPECIES_128 3800 = new IntSpecies(VectorShape.S_128_BIT, 3801 Int128Vector.class, 3802 Int128Vector.Int128Mask.class, 3803 Int128Vector::new); 3804 3805 /** Species representing {@link IntVector}s of {@link VectorShape#S_256_BIT VectorShape.S_256_BIT}. */ 3806 public static final VectorSpecies<Integer> SPECIES_256 3807 = new IntSpecies(VectorShape.S_256_BIT, 3808 Int256Vector.class, 3809 Int256Vector.Int256Mask.class, 3810 Int256Vector::new); 3811 3812 /** Species representing {@link IntVector}s of {@link VectorShape#S_512_BIT VectorShape.S_512_BIT}. */ 3813 public static final VectorSpecies<Integer> SPECIES_512 3814 = new IntSpecies(VectorShape.S_512_BIT, 3815 Int512Vector.class, 3816 Int512Vector.Int512Mask.class, 3817 Int512Vector::new); 3818 3819 /** Species representing {@link IntVector}s of {@link VectorShape#S_Max_BIT VectorShape.S_Max_BIT}. */ 3820 public static final VectorSpecies<Integer> SPECIES_MAX 3821 = new IntSpecies(VectorShape.S_Max_BIT, 3822 IntMaxVector.class, 3823 IntMaxVector.IntMaxMask.class, 3824 IntMaxVector::new); 3825 3826 /** 3827 * Preferred species for {@link IntVector}s. 3828 * A preferred species is a species of maximal bit-size for the platform. 3829 */ 3830 public static final VectorSpecies<Integer> SPECIES_PREFERRED 3831 = (IntSpecies) VectorSpecies.ofPreferred(int.class); 3832 3833 3834 // ==== JROSE NAME CHANGES ==== 3835 3836 /** Use lanewise(NEG, m). */ 3837 @Deprecated 3838 public final IntVector neg(VectorMask<Integer> m) { 3839 return lanewise(NEG, m); 3840 } 3841 3842 /** Use lanewise(ABS, m). */ 3843 @Deprecated 3844 public final IntVector abs(VectorMask<Integer> m) { 3845 return lanewise(ABS, m); 3846 } 3847 3848 /** Use explicit argument of ByteOrder.LITTLE_ENDIAN */ 3849 @Deprecated 3850 public static 3851 IntVector fromByteBuffer(VectorSpecies<Integer> species, 3852 ByteBuffer bb, int offset) { 3853 ByteOrder bo = ByteOrder.LITTLE_ENDIAN; 3854 if (bb.order() != bo) throw new IllegalArgumentException(); 3855 return fromByteBuffer(species, bb, offset, bo); 3856 } 3857 3858 /** Use explicit argument of ByteOrder.LITTLE_ENDIAN */ 3859 @Deprecated 3860 public static 3861 IntVector fromByteBuffer(VectorSpecies<Integer> species, 3862 ByteBuffer bb, int offset, 3863 VectorMask<Integer> m) { 3864 ByteOrder bo = ByteOrder.LITTLE_ENDIAN; 3865 if (bb.order() != bo) throw new IllegalArgumentException(); 3866 return fromByteBuffer(species, bb, offset, bo, m); 3867 } 3868 3869 /** Use fromValues(s, value...) */ 3870 @Deprecated 3871 public static 3872 IntVector scalars(VectorSpecies<Integer> species, 3873 int... values) { 3874 return fromValues(species, values); 3875 } 3876 3877 @Deprecated public final int addLanes() { return reduceLanes(ADD); } 3878 @Deprecated public final int addLanes(VectorMask<Integer> m) { return reduceLanes(ADD, m); } 3879 @Deprecated public final int mulLanes() { return reduceLanes(MUL); } 3880 @Deprecated public final int mulLanes(VectorMask<Integer> m) { return reduceLanes(MUL, m); } 3881 @Deprecated public final int minLanes() { return reduceLanes(MIN); } 3882 @Deprecated public final int minLanes(VectorMask<Integer> m) { return reduceLanes(MIN, m); } 3883 @Deprecated public final int maxLanes() { return reduceLanes(MAX); } 3884 @Deprecated public final int maxLanes(VectorMask<Integer> m) { return reduceLanes(MAX, m); } 3885 @Deprecated public final int orLanes() { return reduceLanes(OR); } 3886 @Deprecated public final int orLanes(VectorMask<Integer> m) { return reduceLanes(OR, m); } 3887 @Deprecated public final int andLanes() { return reduceLanes(AND); } 3888 @Deprecated public final int andLanes(VectorMask<Integer> m) { return reduceLanes(AND, m); } 3889 @Deprecated public final int xorLanes() { return reduceLanes(XOR); } 3890 @Deprecated public final int xorLanes(VectorMask<Integer> m) { return reduceLanes(XOR, m); } 3891 @Deprecated public final IntVector sqrt(VectorMask<Integer> m) { return lanewise(SQRT, m); } 3892 @Deprecated public final IntVector tan() { return lanewise(TAN); } 3893 @Deprecated public final IntVector tan(VectorMask<Integer> m) { return lanewise(TAN, m); } 3894 @Deprecated public final IntVector tanh() { return lanewise(TANH); } 3895 @Deprecated public final IntVector tanh(VectorMask<Integer> m) { return lanewise(TANH, m); } 3896 @Deprecated public final IntVector sin() { return lanewise(SIN); } 3897 @Deprecated public final IntVector sin(VectorMask<Integer> m) { return lanewise(SIN, m); } 3898 @Deprecated public final IntVector sinh() { return lanewise(SINH); } 3899 @Deprecated public final IntVector sinh(VectorMask<Integer> m) { return lanewise(SINH, m); } 3900 @Deprecated public final IntVector cos() { return lanewise(COS); } 3901 @Deprecated public final IntVector cos(VectorMask<Integer> m) { return lanewise(COS, m); } 3902 @Deprecated public final IntVector cosh() { return lanewise(COSH); } 3903 @Deprecated public final IntVector cosh(VectorMask<Integer> m) { return lanewise(COSH, m); } 3904 @Deprecated public final IntVector asin() { return lanewise(ASIN); } 3905 @Deprecated public final IntVector asin(VectorMask<Integer> m) { return lanewise(ASIN, m); } 3906 @Deprecated public final IntVector acos() { return lanewise(ACOS); } 3907 @Deprecated public final IntVector acos(VectorMask<Integer> m) { return lanewise(ACOS, m); } 3908 @Deprecated public final IntVector atan() { return lanewise(ATAN); } 3909 @Deprecated public final IntVector atan(VectorMask<Integer> m) { return lanewise(ATAN, m); } 3910 @Deprecated public final IntVector atan2(Vector<Integer> v) { return lanewise(ATAN2, v); } 3911 @Deprecated public final IntVector atan2(int s) { return lanewise(ATAN2, s); } 3912 @Deprecated public final IntVector atan2(Vector<Integer> v, VectorMask<Integer> m) { return lanewise(ATAN2, v, m); } 3913 @Deprecated public final IntVector atan2(int s, VectorMask<Integer> m) { return lanewise(ATAN2, s, m); } 3914 @Deprecated public final IntVector cbrt() { return lanewise(CBRT); } 3915 @Deprecated public final IntVector cbrt(VectorMask<Integer> m) { return lanewise(CBRT, m); } 3916 @Deprecated public final IntVector log() { return lanewise(LOG); } 3917 @Deprecated public final IntVector log(VectorMask<Integer> m) { return lanewise(LOG, m); } 3918 @Deprecated public final IntVector log10() { return lanewise(LOG10); } 3919 @Deprecated public final IntVector log10(VectorMask<Integer> m) { return lanewise(LOG10, m); } 3920 @Deprecated public final IntVector log1p() { return lanewise(LOG1P); } 3921 @Deprecated public final IntVector log1p(VectorMask<Integer> m) { return lanewise(LOG1P, m); } 3922 @Deprecated public final IntVector pow(Vector<Integer> v, VectorMask<Integer> m) { return lanewise(POW, v, m); } 3923 @Deprecated public final IntVector pow(int s, VectorMask<Integer> m) { return lanewise(POW, s, m); } 3924 @Deprecated public final IntVector exp() { return lanewise(EXP); } 3925 @Deprecated public final IntVector exp(VectorMask<Integer> m) { return lanewise(EXP, m); } 3926 @Deprecated public final IntVector expm1() { return lanewise(EXPM1); } 3927 @Deprecated public final IntVector expm1(VectorMask<Integer> m) { return lanewise(EXPM1, m); } 3928 @Deprecated public final IntVector hypot(Vector<Integer> v) { return lanewise(HYPOT, v); } 3929 @Deprecated public final IntVector hypot(int s) { return lanewise(HYPOT, s); } 3930 @Deprecated public final IntVector hypot(Vector<Integer> v, VectorMask<Integer> m) { return lanewise(HYPOT, v, m); } 3931 @Deprecated public final IntVector hypot(int s, VectorMask<Integer> m) { return lanewise(HYPOT, s, m); } 3932 @Deprecated public final IntVector and(Vector<Integer> v, VectorMask<Integer> m) { return lanewise(AND, v, m); } 3933 @Deprecated public final IntVector and(int s, VectorMask<Integer> m) { return lanewise(AND, s, m); } 3934 @Deprecated public final IntVector or(Vector<Integer> v, VectorMask<Integer> m) { return lanewise(OR, v, m); } 3935 @Deprecated public final IntVector or(int s, VectorMask<Integer> m) { return lanewise(OR, s, m); } 3936 @Deprecated public final IntVector xor(Vector<Integer> v) { return lanewise(XOR, v); } 3937 @Deprecated public final IntVector xor(int s) { return lanewise(XOR, s); } 3938 @Deprecated public final IntVector xor(Vector<Integer> v, VectorMask<Integer> m) { return lanewise(XOR, v, m); } 3939 @Deprecated public final IntVector xor(int s, VectorMask<Integer> m) { return lanewise(XOR, s, m); } 3940 @Deprecated public final IntVector not(VectorMask<Integer> m) { return lanewise(NOT, m); } 3941 @Deprecated public final IntVector shiftLeft(int s) { return lanewise(LSHL, (int) s); } 3942 @Deprecated public final IntVector shiftLeft(int s, VectorMask<Integer> m) { return lanewise(LSHL, (int) s, m); } 3943 @Deprecated public final IntVector shiftLeft(Vector<Integer> v) { return lanewise(LSHL, v); } 3944 @Deprecated public final IntVector shiftLeft(Vector<Integer> v, VectorMask<Integer> m) { return lanewise(LSHL, v, m); } 3945 @Deprecated public final IntVector shiftRight(int s) { return lanewise(LSHR, (int) s); } 3946 @Deprecated public final IntVector shiftRight(int s, VectorMask<Integer> m) { return lanewise(LSHR, (int) s, m); } 3947 @Deprecated public final IntVector shiftRight(Vector<Integer> v) { return lanewise(LSHR, v); } 3948 @Deprecated public final IntVector shiftRight(Vector<Integer> v, VectorMask<Integer> m) { return lanewise(LSHR, v, m); } 3949 @Deprecated public final IntVector shiftArithmeticRight(int s) { return lanewise(ASHR, (int) s); } 3950 @Deprecated public final IntVector shiftArithmeticRight(int s, VectorMask<Integer> m) { return lanewise(ASHR, (int) s, m); } 3951 @Deprecated public final IntVector shiftArithmeticRight(Vector<Integer> v) { return lanewise(ASHR, v); } 3952 @Deprecated public final IntVector shiftArithmeticRight(Vector<Integer> v, VectorMask<Integer> m) { return lanewise(ASHR, v, m); } 3953 @Deprecated public final IntVector rotateLeft(int s) { return lanewise(ROL, (int) s); } 3954 @Deprecated public final IntVector rotateLeft(int s, VectorMask<Integer> m) { return lanewise(ROL, (int) s, m); } 3955 @Deprecated public final IntVector rotateRight(int s) { return lanewise(ROR, (int) s); } 3956 @Deprecated public final IntVector rotateRight(int s, VectorMask<Integer> m) { return lanewise(ROR, (int) s, m); } 3957 @Deprecated @Override public IntVector rotateLanesLeft(int i) { return (IntVector) super.rotateLanesLeft(i); } 3958 @Deprecated @Override public IntVector rotateLanesRight(int i) { return (IntVector) super.rotateLanesRight(i); } 3959 @Deprecated @Override public IntVector shiftLanesLeft(int i) { return (IntVector) super.shiftLanesLeft(i); } 3960 @Deprecated @Override public IntVector shiftLanesRight(int i) { return (IntVector) super.shiftLanesRight(i); } 3961 @Deprecated public IntVector with(int i, int e) { return withLane(i, e); } 3962 }