1 /* 2 * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. Oracle designates this 8 * particular file as subject to the "Classpath" exception as provided 9 * by Oracle in the LICENSE file that accompanied this code. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have 23 * questions. 24 */ 25 package jdk.incubator.vector; 26 27 import java.nio.ByteBuffer; 28 import java.nio.ByteOrder; 29 import java.nio.ReadOnlyBufferException; 30 import java.util.Arrays; 31 import java.util.Objects; 32 import java.util.function.IntUnaryOperator; 33 34 import jdk.internal.misc.Unsafe; 35 import jdk.internal.vm.annotation.ForceInline; 36 import jdk.internal.vm.annotation.Stable; 37 38 import static jdk.incubator.vector.VectorIntrinsics.*; 39 import static jdk.incubator.vector.VectorOperators.*; 40 41 // -- This file was mechanically generated: Do not edit! -- // 42 43 @SuppressWarnings("cast") // warning: redundant cast 44 final class Byte64Vector extends ByteVector { 45 static final ByteSpecies VSPECIES = 46 (ByteSpecies) ByteVector.SPECIES_64; 47 48 static final VectorShape VSHAPE = 49 VSPECIES.vectorShape(); 50 51 static final Class<Byte64Vector> VCLASS = Byte64Vector.class; 52 53 static final int VSIZE = VSPECIES.vectorBitSize(); 54 55 static final int VLENGTH = VSPECIES.laneCount(); 56 57 static final Class<Byte> ETYPE = byte.class; 58 59 // The JVM expects to find the state here. 60 private final byte[] vec; // Don't access directly, use vec() instead. 61 62 Byte64Vector(byte[] v) { 63 vec = v; 64 } 65 66 // For compatibility as Byte64Vector::new, 67 // stored into species.vectorFactory. 68 Byte64Vector(Object v) { 69 this((byte[]) v); 70 } 71 72 static final Byte64Vector ZERO = new Byte64Vector(new byte[VLENGTH]); 73 static final Byte64Vector IOTA = new Byte64Vector(VSPECIES.iotaArray()); 74 75 static { 76 // Warm up a few species caches. 77 // If we do this too much we will 78 // get NPEs from bootstrap circularity. 79 VSPECIES.dummyVector(); 80 VSPECIES.withLanes(LaneType.BYTE); 81 } 82 83 // Specialized extractors 84 85 @ForceInline 86 final @Override 87 public ByteSpecies vspecies() { 88 // ISSUE: This should probably be a @Stable 89 // field inside AbstractVector, rather than 90 // a megamorphic method. 91 return VSPECIES; 92 } 93 94 @ForceInline 95 @Override 96 public final Class<Byte> elementType() { return byte.class; } 97 98 @ForceInline 99 @Override 100 public final int elementSize() { return Byte.SIZE; } 101 102 @ForceInline 103 @Override 104 public final VectorShape shape() { return VSHAPE; } 105 106 @ForceInline 107 @Override 108 public final int length() { return VLENGTH; } 109 110 @ForceInline 111 @Override 112 public final int bitSize() { return VSIZE; } 113 114 @ForceInline 115 @Override 116 public final int byteSize() { return VSIZE / Byte.SIZE; } 117 118 /*package-private*/ 119 @ForceInline 120 final @Override 121 byte[] vec() { 122 return VectorIntrinsics.maybeRebox(this).vec; 123 } 124 125 // Virtualized constructors 126 127 @Override 128 @ForceInline 129 public final Byte64Vector broadcast(byte e) { 130 return (Byte64Vector) super.broadcastTemplate(e); // specialize 131 } 132 133 @Override 134 @ForceInline 135 public final Byte64Vector broadcast(long e) { 136 return (Byte64Vector) super.broadcastTemplate(e); // specialize 137 } 138 139 @Override 140 @ForceInline 141 Byte64Mask maskFromArray(boolean[] bits) { 142 return new Byte64Mask(bits); 143 } 144 145 @Override 146 @ForceInline 147 Byte64Shuffle iotaShuffle() { return Byte64Shuffle.IOTA; } 148 149 @ForceInline 150 Byte64Shuffle iotaShuffle(int start) { 151 return (Byte64Shuffle)VectorIntrinsics.shuffleIota(ETYPE, Byte64Shuffle.class, VSPECIES, VLENGTH, start, (val, l) -> new Byte64Shuffle(i -> (VectorIntrinsics.wrapToRange(i + val, l)))); 152 } 153 154 @Override 155 @ForceInline 156 Byte64Shuffle shuffleFromBytes(byte[] reorder) { return new Byte64Shuffle(reorder); } 157 158 @Override 159 @ForceInline 160 Byte64Shuffle shuffleFromArray(int[] indexes, int i) { return new Byte64Shuffle(indexes, i); } 161 162 @Override 163 @ForceInline 164 Byte64Shuffle shuffleFromOp(IntUnaryOperator fn) { return new Byte64Shuffle(fn); } 165 166 // Make a vector of the same species but the given elements: 167 @ForceInline 168 final @Override 169 Byte64Vector vectorFactory(byte[] vec) { 170 return new Byte64Vector(vec); 171 } 172 173 @ForceInline 174 final @Override 175 Byte64Vector asByteVectorRaw() { 176 return (Byte64Vector) super.asByteVectorRawTemplate(); // specialize 177 } 178 179 @ForceInline 180 final @Override 181 AbstractVector<?> asVectorRaw(LaneType laneType) { 182 return super.asVectorRawTemplate(laneType); // specialize 183 } 184 185 // Unary operator 186 187 final @Override 188 Byte64Vector uOp(FUnOp f) { 189 return (Byte64Vector) super.uOpTemplate(f); // specialize 190 } 191 192 @ForceInline 193 final @Override 194 Byte64Vector uOp(VectorMask<Byte> m, FUnOp f) { 195 return (Byte64Vector) 196 super.uOpTemplate((Byte64Mask)m, f); // specialize 197 } 198 199 // Binary operator 200 201 @ForceInline 202 final @Override 203 Byte64Vector bOp(Vector<Byte> v, FBinOp f) { 204 return (Byte64Vector) super.bOpTemplate((Byte64Vector)v, f); // specialize 205 } 206 207 @ForceInline 208 final @Override 209 Byte64Vector bOp(Vector<Byte> v, 210 VectorMask<Byte> m, FBinOp f) { 211 return (Byte64Vector) 212 super.bOpTemplate((Byte64Vector)v, (Byte64Mask)m, 213 f); // specialize 214 } 215 216 // Ternary operator 217 218 @ForceInline 219 final @Override 220 Byte64Vector tOp(Vector<Byte> v1, Vector<Byte> v2, FTriOp f) { 221 return (Byte64Vector) 222 super.tOpTemplate((Byte64Vector)v1, (Byte64Vector)v2, 223 f); // specialize 224 } 225 226 @ForceInline 227 final @Override 228 Byte64Vector tOp(Vector<Byte> v1, Vector<Byte> v2, 229 VectorMask<Byte> m, FTriOp f) { 230 return (Byte64Vector) 231 super.tOpTemplate((Byte64Vector)v1, (Byte64Vector)v2, 232 (Byte64Mask)m, f); // specialize 233 } 234 235 @ForceInline 236 final @Override 237 byte rOp(byte v, FBinOp f) { 238 return super.rOpTemplate(v, f); // specialize 239 } 240 241 @Override 242 @ForceInline 243 public final <F> 244 Vector<F> convertShape(VectorOperators.Conversion<Byte,F> conv, 245 VectorSpecies<F> rsp, int part) { 246 return super.convertShapeTemplate(conv, rsp, part); // specialize 247 } 248 249 @Override 250 @ForceInline 251 public final <F> 252 Vector<F> reinterpretShape(VectorSpecies<F> toSpecies, int part) { 253 return super.reinterpretShapeTemplate(toSpecies, part); // specialize 254 } 255 256 // Specialized algebraic operations: 257 258 // The following definition forces a specialized version of this 259 // crucial method into the v-table of this class. A call to add() 260 // will inline to a call to lanewise(ADD,), at which point the JIT 261 // intrinsic will have the opcode of ADD, plus all the metadata 262 // for this particular class, enabling it to generate precise 263 // code. 264 // 265 // There is probably no benefit to the JIT to specialize the 266 // masked or broadcast versions of the lanewise method. 267 268 @Override 269 @ForceInline 270 public Byte64Vector lanewise(Unary op) { 271 return (Byte64Vector) super.lanewiseTemplate(op); // specialize 272 } 273 274 @Override 275 @ForceInline 276 public Byte64Vector lanewise(Binary op, Vector<Byte> v) { 277 return (Byte64Vector) super.lanewiseTemplate(op, v); // specialize 278 } 279 280 /*package-private*/ 281 @Override 282 @ForceInline Byte64Vector 283 lanewiseShift(VectorOperators.Binary op, int e) { 284 return (Byte64Vector) super.lanewiseShiftTemplate(op, e); // specialize 285 } 286 287 /*package-private*/ 288 @Override 289 @ForceInline 290 public final 291 Byte64Vector 292 lanewise(VectorOperators.Ternary op, Vector<Byte> v1, Vector<Byte> v2) { 293 return (Byte64Vector) super.lanewiseTemplate(op, v1, v2); // specialize 294 } 295 296 @Override 297 @ForceInline 298 public final 299 Byte64Vector addIndex(int scale) { 300 return (Byte64Vector) super.addIndexTemplate(scale); // specialize 301 } 302 303 // Type specific horizontal reductions 304 305 @Override 306 @ForceInline 307 public final byte reduceLanes(VectorOperators.Associative op) { 308 return super.reduceLanesTemplate(op); // specialized 309 } 310 311 @Override 312 @ForceInline 313 public final byte reduceLanes(VectorOperators.Associative op, 314 VectorMask<Byte> m) { 315 return super.reduceLanesTemplate(op, m); // specialized 316 } 317 318 @Override 319 @ForceInline 320 public final long reduceLanesToLong(VectorOperators.Associative op) { 321 return (long) super.reduceLanesTemplate(op); // specialized 322 } 323 324 @Override 325 @ForceInline 326 public final long reduceLanesToLong(VectorOperators.Associative op, 327 VectorMask<Byte> m) { 328 return (long) super.reduceLanesTemplate(op, m); // specialized 329 } 330 331 @Override 332 @ForceInline 333 public VectorShuffle<Byte> toShuffle() { 334 byte[] a = toArray(); 335 int[] sa = new int[a.length]; 336 for (int i = 0; i < a.length; i++) { 337 sa[i] = (int) a[i]; 338 } 339 return VectorShuffle.fromArray(VSPECIES, sa, 0); 340 } 341 342 // Specialized unary testing 343 344 @Override 345 @ForceInline 346 public final Byte64Mask test(Test op) { 347 return super.testTemplate(Byte64Mask.class, op); // specialize 348 } 349 350 // Specialized comparisons 351 352 @Override 353 @ForceInline 354 public final Byte64Mask compare(Comparison op, Vector<Byte> v) { 355 return super.compareTemplate(Byte64Mask.class, op, v); // specialize 356 } 357 358 @Override 359 @ForceInline 360 public final Byte64Mask compare(Comparison op, byte s) { 361 return super.compareTemplate(Byte64Mask.class, op, s); // specialize 362 } 363 364 @Override 365 @ForceInline 366 public final Byte64Mask compare(Comparison op, long s) { 367 return super.compareTemplate(Byte64Mask.class, op, s); // specialize 368 } 369 370 @Override 371 @ForceInline 372 public Byte64Vector blend(Vector<Byte> v, VectorMask<Byte> m) { 373 return (Byte64Vector) 374 super.blendTemplate(Byte64Mask.class, 375 (Byte64Vector) v, 376 (Byte64Mask) m); // specialize 377 } 378 379 @Override 380 @ForceInline 381 public Byte64Vector slice(int origin, Vector<Byte> v) { 382 return (Byte64Vector) super.sliceTemplate(origin, v); // specialize 383 } 384 385 @Override 386 @ForceInline 387 public Byte64Vector slice(int origin) { 388 if ((origin < 0) || (origin >= VLENGTH)) { 389 throw new ArrayIndexOutOfBoundsException("Index " + origin + " out of bounds for vector length " + VLENGTH); 390 } else { 391 Byte64Shuffle Iota = (Byte64Shuffle)VectorShuffle.iota(VSPECIES, 0, 1, true); 392 VectorMask<Byte> BlendMask = Iota.toVector().compare(VectorOperators.LT, (broadcast((byte)(VLENGTH-origin)))); 393 Iota = (Byte64Shuffle)VectorShuffle.iota(VSPECIES, origin, 1, true); 394 return ZERO.blend(this.rearrange(Iota), BlendMask); 395 } 396 } 397 398 @Override 399 @ForceInline 400 public Byte64Vector unslice(int origin, Vector<Byte> w, int part) { 401 return (Byte64Vector) super.unsliceTemplate(origin, w, part); // specialize 402 } 403 404 @Override 405 @ForceInline 406 public Byte64Vector unslice(int origin, Vector<Byte> w, int part, VectorMask<Byte> m) { 407 return (Byte64Vector) 408 super.unsliceTemplate(Byte64Mask.class, 409 origin, w, part, 410 (Byte64Mask) m); // specialize 411 } 412 413 @Override 414 @ForceInline 415 public Byte64Vector unslice(int origin) { 416 if ((origin < 0) || (origin >= VLENGTH)) { 417 throw new ArrayIndexOutOfBoundsException("Index " + origin + " out of bounds for vector length " + VLENGTH); 418 } else { 419 Byte64Shuffle Iota = (Byte64Shuffle)VectorShuffle.iota(VSPECIES, 0, 1, true); 420 VectorMask<Byte> BlendMask = Iota.toVector().compare(VectorOperators.GE, (broadcast((byte)(origin)))); 421 Iota = (Byte64Shuffle)VectorShuffle.iota(VSPECIES, -origin, 1, true); 422 return ZERO.blend(this.rearrange(Iota), BlendMask); 423 } 424 } 425 426 @Override 427 @ForceInline 428 public Byte64Vector rearrange(VectorShuffle<Byte> s) { 429 return (Byte64Vector) 430 super.rearrangeTemplate(Byte64Shuffle.class, 431 (Byte64Shuffle) s); // specialize 432 } 433 434 @Override 435 @ForceInline 436 public Byte64Vector rearrange(VectorShuffle<Byte> shuffle, 437 VectorMask<Byte> m) { 438 return (Byte64Vector) 439 super.rearrangeTemplate(Byte64Shuffle.class, 440 (Byte64Shuffle) shuffle, 441 (Byte64Mask) m); // specialize 442 } 443 444 @Override 445 @ForceInline 446 public Byte64Vector rearrange(VectorShuffle<Byte> s, 447 Vector<Byte> v) { 448 return (Byte64Vector) 449 super.rearrangeTemplate(Byte64Shuffle.class, 450 (Byte64Shuffle) s, 451 (Byte64Vector) v); // specialize 452 } 453 454 @Override 455 @ForceInline 456 public Byte64Vector selectFrom(Vector<Byte> v) { 457 return (Byte64Vector) 458 super.selectFromTemplate((Byte64Vector) v); // specialize 459 } 460 461 @Override 462 @ForceInline 463 public Byte64Vector selectFrom(Vector<Byte> v, 464 VectorMask<Byte> m) { 465 return (Byte64Vector) 466 super.selectFromTemplate((Byte64Vector) v, 467 (Byte64Mask) m); // specialize 468 } 469 470 471 @Override 472 public byte lane(int i) { 473 if (i < 0 || i >= VLENGTH) { 474 throw new IllegalArgumentException("Index " + i + " must be zero or positive, and less than " + VLENGTH); 475 } 476 return (byte) VectorIntrinsics.extract( 477 VCLASS, ETYPE, VLENGTH, 478 this, i, 479 (vec, ix) -> { 480 byte[] vecarr = vec.vec(); 481 return (long)vecarr[ix]; 482 }); 483 } 484 485 @Override 486 public Byte64Vector withLane(int i, byte e) { 487 if (i < 0 || i >= VLENGTH) { 488 throw new IllegalArgumentException("Index " + i + " must be zero or positive, and less than " + VLENGTH); 489 } 490 return VectorIntrinsics.insert( 491 VCLASS, ETYPE, VLENGTH, 492 this, i, (long)e, 493 (v, ix, bits) -> { 494 byte[] res = v.vec().clone(); 495 res[ix] = (byte)bits; 496 return v.vectorFactory(res); 497 }); 498 } 499 500 // Mask 501 502 static final class Byte64Mask extends AbstractMask<Byte> { 503 504 private final boolean[] bits; // Don't access directly, use getBits() instead. 505 506 public Byte64Mask(boolean[] bits) { 507 this(bits, 0); 508 } 509 510 public Byte64Mask(boolean[] bits, int offset) { 511 boolean[] a = new boolean[vspecies().laneCount()]; 512 for (int i = 0; i < a.length; i++) { 513 a[i] = bits[offset + i]; 514 } 515 this.bits = a; 516 } 517 518 public Byte64Mask(boolean val) { 519 boolean[] bits = new boolean[vspecies().laneCount()]; 520 Arrays.fill(bits, val); 521 this.bits = bits; 522 } 523 524 @ForceInline 525 final @Override 526 public ByteSpecies vspecies() { 527 // ISSUE: This should probably be a @Stable 528 // field inside AbstractMask, rather than 529 // a megamorphic method. 530 return VSPECIES; 531 } 532 533 boolean[] getBits() { 534 return VectorIntrinsics.maybeRebox(this).bits; 535 } 536 537 @Override 538 Byte64Mask uOp(MUnOp f) { 539 boolean[] res = new boolean[vspecies().laneCount()]; 540 boolean[] bits = getBits(); 541 for (int i = 0; i < res.length; i++) { 542 res[i] = f.apply(i, bits[i]); 543 } 544 return new Byte64Mask(res); 545 } 546 547 @Override 548 Byte64Mask bOp(VectorMask<Byte> m, MBinOp f) { 549 boolean[] res = new boolean[vspecies().laneCount()]; 550 boolean[] bits = getBits(); 551 boolean[] mbits = ((Byte64Mask)m).getBits(); 552 for (int i = 0; i < res.length; i++) { 553 res[i] = f.apply(i, bits[i], mbits[i]); 554 } 555 return new Byte64Mask(res); 556 } 557 558 @ForceInline 559 @Override 560 public final 561 Byte64Vector toVector() { 562 return (Byte64Vector) super.toVectorTemplate(); // specialize 563 } 564 565 @Override 566 @ForceInline 567 public <E> VectorMask<E> cast(VectorSpecies<E> s) { 568 AbstractSpecies<E> species = (AbstractSpecies<E>) s; 569 if (length() != species.laneCount()) 570 throw new IllegalArgumentException("VectorMask length and species length differ"); 571 boolean[] maskArray = toArray(); 572 // enum-switches don't optimize properly JDK-8161245 573 switch (species.laneType.switchKey) { 574 case LaneType.SK_BYTE: 575 return new Byte64Vector.Byte64Mask(maskArray).check(species); 576 case LaneType.SK_SHORT: 577 return new Short64Vector.Short64Mask(maskArray).check(species); 578 case LaneType.SK_INT: 579 return new Int64Vector.Int64Mask(maskArray).check(species); 580 case LaneType.SK_LONG: 581 return new Long64Vector.Long64Mask(maskArray).check(species); 582 case LaneType.SK_FLOAT: 583 return new Float64Vector.Float64Mask(maskArray).check(species); 584 case LaneType.SK_DOUBLE: 585 return new Double64Vector.Double64Mask(maskArray).check(species); 586 } 587 588 // Should not reach here. 589 throw new AssertionError(species); 590 } 591 592 // Unary operations 593 594 @Override 595 @ForceInline 596 public Byte64Mask not() { 597 return (Byte64Mask) VectorIntrinsics.unaryOp( 598 VECTOR_OP_NOT, Byte64Mask.class, byte.class, VLENGTH, 599 this, 600 (m1) -> m1.uOp((i, a) -> !a)); 601 } 602 603 // Binary operations 604 605 @Override 606 @ForceInline 607 public Byte64Mask and(VectorMask<Byte> mask) { 608 Objects.requireNonNull(mask); 609 Byte64Mask m = (Byte64Mask)mask; 610 return VectorIntrinsics.binaryOp(VECTOR_OP_AND, Byte64Mask.class, byte.class, VLENGTH, 611 this, m, 612 (m1, m2) -> m1.bOp(m2, (i, a, b) -> a & b)); 613 } 614 615 @Override 616 @ForceInline 617 public Byte64Mask or(VectorMask<Byte> mask) { 618 Objects.requireNonNull(mask); 619 Byte64Mask m = (Byte64Mask)mask; 620 return VectorIntrinsics.binaryOp(VECTOR_OP_OR, Byte64Mask.class, byte.class, VLENGTH, 621 this, m, 622 (m1, m2) -> m1.bOp(m2, (i, a, b) -> a | b)); 623 } 624 625 // Reductions 626 627 @Override 628 @ForceInline 629 public boolean anyTrue() { 630 return VectorIntrinsics.test(BT_ne, Byte64Mask.class, byte.class, VLENGTH, 631 this, this, 632 (m, __) -> anyTrueHelper(((Byte64Mask)m).getBits())); 633 } 634 635 @Override 636 @ForceInline 637 public boolean allTrue() { 638 return VectorIntrinsics.test(BT_overflow, Byte64Mask.class, byte.class, VLENGTH, 639 this, vspecies().maskAll(true), 640 (m, __) -> allTrueHelper(((Byte64Mask)m).getBits())); 641 } 642 643 /*package-private*/ 644 static Byte64Mask maskAll(boolean bit) { 645 return bit ? TRUE_MASK : FALSE_MASK; 646 } 647 static final Byte64Mask TRUE_MASK = new Byte64Mask(true); 648 static final Byte64Mask FALSE_MASK = new Byte64Mask(false); 649 } 650 651 // Shuffle 652 653 static final class Byte64Shuffle extends AbstractShuffle<Byte> { 654 Byte64Shuffle(byte[] reorder) { 655 super(reorder); 656 } 657 658 public Byte64Shuffle(int[] reorder) { 659 super(reorder); 660 } 661 662 public Byte64Shuffle(int[] reorder, int i) { 663 super(reorder, i); 664 } 665 666 public Byte64Shuffle(IntUnaryOperator fn) { 667 super(fn); 668 } 669 670 @Override 671 public ByteSpecies vspecies() { 672 return VSPECIES; 673 } 674 675 static { 676 // There must be enough bits in the shuffle lanes to encode 677 // VLENGTH valid indexes and VLENGTH exceptional ones. 678 assert(VLENGTH < Byte.MAX_VALUE); 679 assert(Byte.MIN_VALUE <= -VLENGTH); 680 } 681 static final Byte64Shuffle IOTA = new Byte64Shuffle(IDENTITY); 682 683 @Override 684 @ForceInline 685 public Byte64Vector toVector() { 686 return VectorIntrinsics.shuffleToVector(VCLASS, ETYPE, Byte64Shuffle.class, this, VLENGTH, 687 (s) -> ((Byte64Vector)(((AbstractShuffle<Byte>)(s)).toVectorTemplate()))); 688 } 689 690 @Override 691 @ForceInline 692 public <F> VectorShuffle<F> cast(VectorSpecies<F> s) { 693 AbstractSpecies<F> species = (AbstractSpecies<F>) s; 694 if (length() != species.laneCount()) 695 throw new IllegalArgumentException("VectorShuffle length and species length differ"); 696 int[] shuffleArray = toArray(); 697 // enum-switches don't optimize properly JDK-8161245 698 switch (species.laneType.switchKey) { 699 case LaneType.SK_BYTE: 700 return new Byte64Vector.Byte64Shuffle(shuffleArray).check(species); 701 case LaneType.SK_SHORT: 702 return new Short64Vector.Short64Shuffle(shuffleArray).check(species); 703 case LaneType.SK_INT: 704 return new Int64Vector.Int64Shuffle(shuffleArray).check(species); 705 case LaneType.SK_LONG: 706 return new Long64Vector.Long64Shuffle(shuffleArray).check(species); 707 case LaneType.SK_FLOAT: 708 return new Float64Vector.Float64Shuffle(shuffleArray).check(species); 709 case LaneType.SK_DOUBLE: 710 return new Double64Vector.Double64Shuffle(shuffleArray).check(species); 711 } 712 713 // Should not reach here. 714 throw new AssertionError(species); 715 } 716 717 @Override 718 public Byte64Shuffle rearrange(VectorShuffle<Byte> shuffle) { 719 Byte64Shuffle s = (Byte64Shuffle) shuffle; 720 byte[] r = new byte[reorder.length]; 721 for (int i = 0; i < reorder.length; i++) { 722 int ssi = s.reorder[i]; 723 r[i] = this.reorder[ssi]; // throws on exceptional index 724 } 725 return new Byte64Shuffle(r); 726 } 727 } 728 729 // ================================================ 730 731 // Specialized low-level memory operations. 732 733 @ForceInline 734 @Override 735 final 736 ByteVector fromArray0(byte[] a, int offset) { 737 return super.fromArray0Template(a, offset); // specialize 738 } 739 740 @ForceInline 741 @Override 742 final 743 ByteVector fromByteArray0(byte[] a, int offset) { 744 return super.fromByteArray0Template(a, offset); // specialize 745 } 746 747 @ForceInline 748 @Override 749 final 750 ByteVector fromByteBuffer0(ByteBuffer bb, int offset) { 751 return super.fromByteBuffer0Template(bb, offset); // specialize 752 } 753 754 @ForceInline 755 @Override 756 final 757 void intoArray0(byte[] a, int offset) { 758 super.intoArray0Template(a, offset); // specialize 759 } 760 761 @ForceInline 762 @Override 763 final 764 void intoByteArray0(byte[] a, int offset) { 765 super.intoByteArray0Template(a, offset); // specialize 766 } 767 768 // End of specialized low-level memory operations. 769 770 // ================================================ 771 772 }