1 /* 2 * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. Oracle designates this 8 * particular file as subject to the "Classpath" exception as provided 9 * by Oracle in the LICENSE file that accompanied this code. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have 23 * questions. 24 */ 25 package jdk.incubator.vector; 26 27 import java.nio.ByteBuffer; 28 import java.nio.ByteOrder; 29 import java.nio.IntBuffer; 30 import java.nio.ReadOnlyBufferException; 31 import java.util.Arrays; 32 import java.util.Objects; 33 import java.util.function.IntUnaryOperator; 34 35 import jdk.internal.misc.Unsafe; 36 import jdk.internal.vm.annotation.ForceInline; 37 import jdk.internal.vm.annotation.Stable; 38 39 import static jdk.incubator.vector.VectorIntrinsics.*; 40 import static jdk.incubator.vector.VectorOperators.*; 41 42 // -- This file was mechanically generated: Do not edit! -- // 43 44 @SuppressWarnings("cast") // warning: redundant cast 45 final class Int64Vector extends IntVector { 46 static final IntSpecies VSPECIES = 47 (IntSpecies) IntVector.SPECIES_64; 48 49 static final VectorShape VSHAPE = 50 VSPECIES.vectorShape(); 51 52 static final Class<Int64Vector> VCLASS = Int64Vector.class; 53 54 static final int VSIZE = VSPECIES.vectorBitSize(); 55 56 static final int VLENGTH = VSPECIES.laneCount(); 57 58 static final Class<Integer> ETYPE = int.class; 59 60 // The JVM expects to find the state here. 61 private final int[] vec; // Don't access directly, use getElements() instead. 62 63 Int64Vector(int[] v) { 64 vec = v; 65 } 66 67 // For compatibility as Int64Vector::new, 68 // stored into species.vectorFactory. 69 Int64Vector(Object v) { 70 this((int[]) v); 71 } 72 73 static final Int64Vector ZERO = new Int64Vector(new int[VLENGTH]); 74 static final Int64Vector IOTA = new Int64Vector(VSPECIES.iotaArray()); 75 76 static { 77 // Warm up a few species caches. 78 // If we do this too much we will 79 // get NPEs from bootstrap circularity. 80 VSPECIES.dummyVector(); 81 VSPECIES.withLanes(LaneType.BYTE); 82 } 83 84 // Specialized extractors 85 86 @ForceInline 87 final @Override 88 public IntSpecies vspecies() { 89 // ISSUE: This should probably be a @Stable 90 // field inside AbstractVector, rather than 91 // a megamorphic method. 92 return VSPECIES; 93 } 94 95 @ForceInline 96 @Override 97 public final Class<Integer> elementType() { return int.class; } 98 99 @ForceInline 100 @Override 101 public final int elementSize() { return Integer.SIZE; } 102 103 @ForceInline 104 @Override 105 public final VectorShape shape() { return VSHAPE; } 106 107 @ForceInline 108 @Override 109 public final int length() { return VLENGTH; } 110 111 @ForceInline 112 @Override 113 public final int bitSize() { return VSIZE; } 114 115 @ForceInline 116 @Override 117 public final int byteSize() { return VSIZE / Byte.SIZE; } 118 119 /*package-private*/ 120 @ForceInline 121 final @Override 122 int[] getElements() { 123 return VectorIntrinsics.maybeRebox(this).vec; 124 } 125 126 // Virtualized constructors 127 128 @Override 129 @ForceInline 130 public final Int64Vector broadcast(int e) { 131 return (Int64Vector) super.broadcastTemplate(e); // specialize 132 } 133 134 @Override 135 @ForceInline 136 public final Int64Vector broadcast(long e) { 137 return (Int64Vector) super.broadcastTemplate(e); // specialize 138 } 139 140 @Override 141 @ForceInline 142 Int64Mask maskFromArray(boolean[] bits) { 143 return new Int64Mask(bits); 144 } 145 146 @Override 147 @ForceInline 148 Int64Shuffle iotaShuffle() { return Int64Shuffle.IOTA; } 149 150 @ForceInline 151 Int64Shuffle iotaShuffle(int start) { 152 return (Int64Shuffle)VectorIntrinsics.shuffleIota(ETYPE, Int64Shuffle.class, VSPECIES, VLENGTH, start, (val, l) -> new Int64Shuffle(i -> (VectorIntrinsics.wrapToRange(i + val, l)))); 153 } 154 155 @Override 156 @ForceInline 157 Int64Shuffle shuffleFromBytes(byte[] reorder) { return new Int64Shuffle(reorder); } 158 159 @Override 160 @ForceInline 161 Int64Shuffle shuffleFromArray(int[] indexes, int i) { return new Int64Shuffle(indexes, i); } 162 163 @Override 164 @ForceInline 165 Int64Shuffle shuffleFromOp(IntUnaryOperator fn) { return new Int64Shuffle(fn); } 166 167 // Make a vector of the same species but the given elements: 168 @ForceInline 169 final @Override 170 Int64Vector vectorFactory(int[] vec) { 171 return new Int64Vector(vec); 172 } 173 174 @ForceInline 175 final @Override 176 Byte64Vector asByteVectorRaw() { 177 return (Byte64Vector) super.asByteVectorRawTemplate(); // specialize 178 } 179 180 @ForceInline 181 final @Override 182 AbstractVector<?> asVectorRaw(LaneType laneType) { 183 return super.asVectorRawTemplate(laneType); // specialize 184 } 185 186 // Unary operator 187 188 final @Override 189 Int64Vector uOp(FUnOp f) { 190 return (Int64Vector) super.uOpTemplate(f); // specialize 191 } 192 193 @ForceInline 194 final @Override 195 Int64Vector uOp(VectorMask<Integer> m, FUnOp f) { 196 return (Int64Vector) 197 super.uOpTemplate((Int64Mask)m, f); // specialize 198 } 199 200 // Binary operator 201 202 @ForceInline 203 final @Override 204 Int64Vector bOp(Vector<Integer> v, FBinOp f) { 205 return (Int64Vector) super.bOpTemplate((Int64Vector)v, f); // specialize 206 } 207 208 @ForceInline 209 final @Override 210 Int64Vector bOp(Vector<Integer> v, 211 VectorMask<Integer> m, FBinOp f) { 212 return (Int64Vector) 213 super.bOpTemplate((Int64Vector)v, (Int64Mask)m, 214 f); // specialize 215 } 216 217 // Ternary operator 218 219 @ForceInline 220 final @Override 221 Int64Vector tOp(Vector<Integer> v1, Vector<Integer> v2, FTriOp f) { 222 return (Int64Vector) 223 super.tOpTemplate((Int64Vector)v1, (Int64Vector)v2, 224 f); // specialize 225 } 226 227 @ForceInline 228 final @Override 229 Int64Vector tOp(Vector<Integer> v1, Vector<Integer> v2, 230 VectorMask<Integer> m, FTriOp f) { 231 return (Int64Vector) 232 super.tOpTemplate((Int64Vector)v1, (Int64Vector)v2, 233 (Int64Mask)m, f); // specialize 234 } 235 236 @ForceInline 237 final @Override 238 int rOp(int v, FBinOp f) { 239 return super.rOpTemplate(v, f); // specialize 240 } 241 242 @Override 243 @ForceInline 244 public final <F> 245 Vector<F> convertShape(VectorOperators.Conversion<Integer,F> conv, 246 VectorSpecies<F> rsp, int part) { 247 return super.convertShapeTemplate(conv, rsp, part); // specialize 248 } 249 250 @Override 251 @ForceInline 252 public final <F> 253 Vector<F> reinterpretShape(VectorSpecies<F> toSpecies, int part) { 254 return super.reinterpretShapeTemplate(toSpecies, part); // specialize 255 } 256 257 // Specialized algebraic operations: 258 259 // The following definition forces a specialized version of this 260 // crucial method into the v-table of this class. A call to add() 261 // will inline to a call to lanewise(ADD,), at which point the JIT 262 // intrinsic will have the opcode of ADD, plus all the metadata 263 // for this particular class, enabling it to generate precise 264 // code. 265 // 266 // There is probably no benefit to the JIT to specialize the 267 // masked or broadcast versions of the lanewise method. 268 269 @Override 270 @ForceInline 271 public Int64Vector lanewise(Unary op) { 272 return (Int64Vector) super.lanewiseTemplate(op); // specialize 273 } 274 275 @Override 276 @ForceInline 277 public Int64Vector lanewise(Binary op, Vector<Integer> v) { 278 return (Int64Vector) super.lanewiseTemplate(op, v); // specialize 279 } 280 281 /*package-private*/ 282 @Override 283 @ForceInline Int64Vector 284 lanewiseShift(VectorOperators.Binary op, int e) { 285 return (Int64Vector) super.lanewiseShiftTemplate(op, e); // specialize 286 } 287 288 /*package-private*/ 289 @Override 290 @ForceInline 291 public final 292 Int64Vector 293 lanewise(VectorOperators.Ternary op, Vector<Integer> v1, Vector<Integer> v2) { 294 return (Int64Vector) super.lanewiseTemplate(op, v1, v2); // specialize 295 } 296 297 @Override 298 @ForceInline 299 public final 300 Int64Vector addIndex(int scale) { 301 return (Int64Vector) super.addIndexTemplate(scale); // specialize 302 } 303 304 // Type specific horizontal reductions 305 306 @Override 307 @ForceInline 308 public final int reduceLanes(VectorOperators.Associative op) { 309 return super.reduceLanesTemplate(op); // specialized 310 } 311 312 @Override 313 @ForceInline 314 public final int reduceLanes(VectorOperators.Associative op, 315 VectorMask<Integer> m) { 316 return super.reduceLanesTemplate(op, m); // specialized 317 } 318 319 @Override 320 @ForceInline 321 public final long reduceLanesToLong(VectorOperators.Associative op) { 322 return (long) super.reduceLanesTemplate(op); // specialized 323 } 324 325 @Override 326 @ForceInline 327 public final long reduceLanesToLong(VectorOperators.Associative op, 328 VectorMask<Integer> m) { 329 return (long) super.reduceLanesTemplate(op, m); // specialized 330 } 331 332 @Override 333 @ForceInline 334 public VectorShuffle<Integer> toShuffle() { 335 int[] a = toArray(); 336 int[] sa = new int[a.length]; 337 for (int i = 0; i < a.length; i++) { 338 sa[i] = (int) a[i]; 339 } 340 return VectorShuffle.fromArray(VSPECIES, sa, 0); 341 } 342 343 // Specialized unary testing 344 345 @Override 346 @ForceInline 347 public final Int64Mask test(Test op) { 348 return super.testTemplate(Int64Mask.class, op); // specialize 349 } 350 351 // Specialized comparisons 352 353 @Override 354 @ForceInline 355 public final Int64Mask compare(Comparison op, Vector<Integer> v) { 356 return super.compareTemplate(Int64Mask.class, op, v); // specialize 357 } 358 359 @Override 360 @ForceInline 361 public final Int64Mask compare(Comparison op, int s) { 362 return super.compareTemplate(Int64Mask.class, op, s); // specialize 363 } 364 365 @Override 366 @ForceInline 367 public final Int64Mask compare(Comparison op, long s) { 368 return super.compareTemplate(Int64Mask.class, op, s); // specialize 369 } 370 371 @Override 372 @ForceInline 373 public Int64Vector blend(Vector<Integer> v, VectorMask<Integer> m) { 374 return (Int64Vector) 375 super.blendTemplate(Int64Mask.class, 376 (Int64Vector) v, 377 (Int64Mask) m); // specialize 378 } 379 380 @Override 381 @ForceInline 382 public Int64Vector slice(int origin, Vector<Integer> v) { 383 return (Int64Vector) super.sliceTemplate(origin, v); // specialize 384 } 385 386 @Override 387 @ForceInline 388 public Int64Vector slice(int origin) { 389 if ((origin < 0) || (origin >= VLENGTH)) { 390 throw new ArrayIndexOutOfBoundsException("Index " + origin + " out of bounds for vector length " + VLENGTH); 391 } else { 392 Int64Shuffle Iota = (Int64Shuffle)VectorShuffle.iota(VSPECIES, 0, 1, true); 393 VectorMask<Integer> BlendMask = Iota.toVector().compare(VectorOperators.LT, (broadcast((int)(VLENGTH-origin)))); 394 Iota = (Int64Shuffle)VectorShuffle.iota(VSPECIES, origin, 1, true); 395 return ZERO.blend(this.rearrange(Iota), BlendMask); 396 } 397 } 398 399 @Override 400 @ForceInline 401 public Int64Vector unslice(int origin, Vector<Integer> w, int part) { 402 return (Int64Vector) super.unsliceTemplate(origin, w, part); // specialize 403 } 404 405 @Override 406 @ForceInline 407 public Int64Vector unslice(int origin, Vector<Integer> w, int part, VectorMask<Integer> m) { 408 return (Int64Vector) 409 super.unsliceTemplate(Int64Mask.class, 410 origin, w, part, 411 (Int64Mask) m); // specialize 412 } 413 414 @Override 415 @ForceInline 416 public Int64Vector unslice(int origin) { 417 if ((origin < 0) || (origin >= VLENGTH)) { 418 throw new ArrayIndexOutOfBoundsException("Index " + origin + " out of bounds for vector length " + VLENGTH); 419 } else { 420 Int64Shuffle Iota = (Int64Shuffle)VectorShuffle.iota(VSPECIES, 0, 1, true); 421 VectorMask<Integer> BlendMask = Iota.toVector().compare(VectorOperators.GE, (broadcast((int)(origin)))); 422 Iota = (Int64Shuffle)VectorShuffle.iota(VSPECIES, -origin, 1, true); 423 return ZERO.blend(this.rearrange(Iota), BlendMask); 424 } 425 } 426 427 @Override 428 @ForceInline 429 public Int64Vector rearrange(VectorShuffle<Integer> s) { 430 return (Int64Vector) 431 super.rearrangeTemplate(Int64Shuffle.class, 432 (Int64Shuffle) s); // specialize 433 } 434 435 @Override 436 @ForceInline 437 public Int64Vector rearrange(VectorShuffle<Integer> shuffle, 438 VectorMask<Integer> m) { 439 return (Int64Vector) 440 super.rearrangeTemplate(Int64Shuffle.class, 441 (Int64Shuffle) shuffle, 442 (Int64Mask) m); // specialize 443 } 444 445 @Override 446 @ForceInline 447 public Int64Vector rearrange(VectorShuffle<Integer> s, 448 Vector<Integer> v) { 449 return (Int64Vector) 450 super.rearrangeTemplate(Int64Shuffle.class, 451 (Int64Shuffle) s, 452 (Int64Vector) v); // specialize 453 } 454 455 @Override 456 @ForceInline 457 public Int64Vector selectFrom(Vector<Integer> v) { 458 return (Int64Vector) 459 super.selectFromTemplate((Int64Vector) v); // specialize 460 } 461 462 @Override 463 @ForceInline 464 public Int64Vector selectFrom(Vector<Integer> v, 465 VectorMask<Integer> m) { 466 return (Int64Vector) 467 super.selectFromTemplate((Int64Vector) v, 468 (Int64Mask) m); // specialize 469 } 470 471 472 @Override 473 public int lane(int i) { 474 if (i < 0 || i >= VLENGTH) { 475 throw new IllegalArgumentException("Index " + i + " must be zero or positive, and less than " + VLENGTH); 476 } 477 return (int) VectorIntrinsics.extract( 478 VCLASS, ETYPE, VLENGTH, 479 this, i, 480 (vec, ix) -> { 481 int[] vecarr = vec.getElements(); 482 return (long)vecarr[ix]; 483 }); 484 } 485 486 @Override 487 public Int64Vector withLane(int i, int e) { 488 if (i < 0 || i >= VLENGTH) { 489 throw new IllegalArgumentException("Index " + i + " must be zero or positive, and less than " + VLENGTH); 490 } 491 return VectorIntrinsics.insert( 492 VCLASS, ETYPE, VLENGTH, 493 this, i, (long)e, 494 (v, ix, bits) -> { 495 int[] res = v.getElements().clone(); 496 res[ix] = (int)bits; 497 return v.vectorFactory(res); 498 }); 499 } 500 501 // Mask 502 503 static final class Int64Mask extends AbstractMask<Integer> { 504 505 private final boolean[] bits; // Don't access directly, use getBits() instead. 506 507 public Int64Mask(boolean[] bits) { 508 this(bits, 0); 509 } 510 511 public Int64Mask(boolean[] bits, int offset) { 512 boolean[] a = new boolean[vspecies().laneCount()]; 513 for (int i = 0; i < a.length; i++) { 514 a[i] = bits[offset + i]; 515 } 516 this.bits = a; 517 } 518 519 public Int64Mask(boolean val) { 520 boolean[] bits = new boolean[vspecies().laneCount()]; 521 Arrays.fill(bits, val); 522 this.bits = bits; 523 } 524 525 @ForceInline 526 final @Override 527 public IntSpecies vspecies() { 528 // ISSUE: This should probably be a @Stable 529 // field inside AbstractMask, rather than 530 // a megamorphic method. 531 return VSPECIES; 532 } 533 534 boolean[] getBits() { 535 return VectorIntrinsics.maybeRebox(this).bits; 536 } 537 538 @Override 539 Int64Mask uOp(MUnOp f) { 540 boolean[] res = new boolean[vspecies().laneCount()]; 541 boolean[] bits = getBits(); 542 for (int i = 0; i < res.length; i++) { 543 res[i] = f.apply(i, bits[i]); 544 } 545 return new Int64Mask(res); 546 } 547 548 @Override 549 Int64Mask bOp(VectorMask<Integer> m, MBinOp f) { 550 boolean[] res = new boolean[vspecies().laneCount()]; 551 boolean[] bits = getBits(); 552 boolean[] mbits = ((Int64Mask)m).getBits(); 553 for (int i = 0; i < res.length; i++) { 554 res[i] = f.apply(i, bits[i], mbits[i]); 555 } 556 return new Int64Mask(res); 557 } 558 559 @ForceInline 560 @Override 561 public final 562 Int64Vector toVector() { 563 return (Int64Vector) super.toVectorTemplate(); // specialize 564 } 565 566 @Override 567 @ForceInline 568 public <E> VectorMask<E> cast(VectorSpecies<E> s) { 569 AbstractSpecies<E> species = (AbstractSpecies<E>) s; 570 if (length() != species.laneCount()) 571 throw new IllegalArgumentException("VectorMask length and species length differ"); 572 boolean[] maskArray = toArray(); 573 // enum-switches don't optimize properly JDK-8161245 574 switch (species.laneType.switchKey) { 575 case LaneType.SK_BYTE: 576 return new Byte64Vector.Byte64Mask(maskArray).check(species); 577 case LaneType.SK_SHORT: 578 return new Short64Vector.Short64Mask(maskArray).check(species); 579 case LaneType.SK_INT: 580 return new Int64Vector.Int64Mask(maskArray).check(species); 581 case LaneType.SK_LONG: 582 return new Long64Vector.Long64Mask(maskArray).check(species); 583 case LaneType.SK_FLOAT: 584 return new Float64Vector.Float64Mask(maskArray).check(species); 585 case LaneType.SK_DOUBLE: 586 return new Double64Vector.Double64Mask(maskArray).check(species); 587 } 588 589 // Should not reach here. 590 throw new AssertionError(species); 591 } 592 593 // Unary operations 594 595 @Override 596 @ForceInline 597 public Int64Mask not() { 598 return (Int64Mask) VectorIntrinsics.unaryOp( 599 VECTOR_OP_NOT, Int64Mask.class, int.class, VLENGTH, 600 this, 601 (m1) -> m1.uOp((i, a) -> !a)); 602 } 603 604 // Binary operations 605 606 @Override 607 @ForceInline 608 public Int64Mask and(VectorMask<Integer> mask) { 609 Objects.requireNonNull(mask); 610 Int64Mask m = (Int64Mask)mask; 611 return VectorIntrinsics.binaryOp(VECTOR_OP_AND, Int64Mask.class, int.class, VLENGTH, 612 this, m, 613 (m1, m2) -> m1.bOp(m2, (i, a, b) -> a & b)); 614 } 615 616 @Override 617 @ForceInline 618 public Int64Mask or(VectorMask<Integer> mask) { 619 Objects.requireNonNull(mask); 620 Int64Mask m = (Int64Mask)mask; 621 return VectorIntrinsics.binaryOp(VECTOR_OP_OR, Int64Mask.class, int.class, VLENGTH, 622 this, m, 623 (m1, m2) -> m1.bOp(m2, (i, a, b) -> a | b)); 624 } 625 626 // Reductions 627 628 @Override 629 @ForceInline 630 public boolean anyTrue() { 631 return VectorIntrinsics.test(BT_ne, Int64Mask.class, int.class, VLENGTH, 632 this, this, 633 (m, __) -> anyTrueHelper(((Int64Mask)m).getBits())); 634 } 635 636 @Override 637 @ForceInline 638 public boolean allTrue() { 639 return VectorIntrinsics.test(BT_overflow, Int64Mask.class, int.class, VLENGTH, 640 this, vspecies().maskAll(true), 641 (m, __) -> allTrueHelper(((Int64Mask)m).getBits())); 642 } 643 644 /*package-private*/ 645 static Int64Mask maskAll(boolean bit) { 646 return bit ? TRUE_MASK : FALSE_MASK; 647 } 648 static final Int64Mask TRUE_MASK = new Int64Mask(true); 649 static final Int64Mask FALSE_MASK = new Int64Mask(false); 650 } 651 652 // Shuffle 653 654 static final class Int64Shuffle extends AbstractShuffle<Integer> { 655 Int64Shuffle(byte[] reorder) { 656 super(reorder); 657 } 658 659 public Int64Shuffle(int[] reorder) { 660 super(reorder); 661 } 662 663 public Int64Shuffle(int[] reorder, int i) { 664 super(reorder, i); 665 } 666 667 public Int64Shuffle(IntUnaryOperator fn) { 668 super(fn); 669 } 670 671 @Override 672 public IntSpecies vspecies() { 673 return VSPECIES; 674 } 675 676 static { 677 // There must be enough bits in the shuffle lanes to encode 678 // VLENGTH valid indexes and VLENGTH exceptional ones. 679 assert(VLENGTH < Byte.MAX_VALUE); 680 assert(Byte.MIN_VALUE <= -VLENGTH); 681 } 682 static final Int64Shuffle IOTA = new Int64Shuffle(IDENTITY); 683 684 @Override 685 @ForceInline 686 public Int64Vector toVector() { 687 return VectorIntrinsics.shuffleToVector(VCLASS, ETYPE, Int64Shuffle.class, this, VLENGTH, 688 (s) -> ((Int64Vector)(((AbstractShuffle<Integer>)(s)).toVectorTemplate()))); 689 } 690 691 @Override 692 @ForceInline 693 public <F> VectorShuffle<F> cast(VectorSpecies<F> s) { 694 AbstractSpecies<F> species = (AbstractSpecies<F>) s; 695 if (length() != species.laneCount()) 696 throw new IllegalArgumentException("VectorShuffle length and species length differ"); 697 int[] shuffleArray = toArray(); 698 // enum-switches don't optimize properly JDK-8161245 699 switch (species.laneType.switchKey) { 700 case LaneType.SK_BYTE: 701 return new Byte64Vector.Byte64Shuffle(shuffleArray).check(species); 702 case LaneType.SK_SHORT: 703 return new Short64Vector.Short64Shuffle(shuffleArray).check(species); 704 case LaneType.SK_INT: 705 return new Int64Vector.Int64Shuffle(shuffleArray).check(species); 706 case LaneType.SK_LONG: 707 return new Long64Vector.Long64Shuffle(shuffleArray).check(species); 708 case LaneType.SK_FLOAT: 709 return new Float64Vector.Float64Shuffle(shuffleArray).check(species); 710 case LaneType.SK_DOUBLE: 711 return new Double64Vector.Double64Shuffle(shuffleArray).check(species); 712 } 713 714 // Should not reach here. 715 throw new AssertionError(species); 716 } 717 718 @Override 719 public Int64Shuffle rearrange(VectorShuffle<Integer> shuffle) { 720 Int64Shuffle s = (Int64Shuffle) shuffle; 721 byte[] r = new byte[reorder.length]; 722 for (int i = 0; i < reorder.length; i++) { 723 int ssi = s.reorder[i]; 724 r[i] = this.reorder[ssi]; // throws on exceptional index 725 } 726 return new Int64Shuffle(r); 727 } 728 } 729 730 // ================================================ 731 732 // Specialized low-level memory operations. 733 734 @ForceInline 735 @Override 736 final 737 IntVector fromArray0(int[] a, int offset) { 738 return super.fromArray0Template(a, offset); // specialize 739 } 740 741 @ForceInline 742 @Override 743 final 744 IntVector fromByteArray0(byte[] a, int offset) { 745 return super.fromByteArray0Template(a, offset); // specialize 746 } 747 748 @ForceInline 749 @Override 750 final 751 IntVector fromByteBuffer0(ByteBuffer bb, int offset) { 752 return super.fromByteBuffer0Template(bb, offset); // specialize 753 } 754 755 @ForceInline 756 @Override 757 final 758 void intoArray0(int[] a, int offset) { 759 super.intoArray0Template(a, offset); // specialize 760 } 761 762 @ForceInline 763 @Override 764 final 765 void intoByteArray0(byte[] a, int offset) { 766 super.intoByteArray0Template(a, offset); // specialize 767 } 768 769 // End of specialized low-level memory operations. 770 771 // ================================================ 772 773 }