src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir/src/org/graalvm/compiler/lir/alloc/trace/lsra/TraceLinearScanWalker.java
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Sdiff src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir/src/org/graalvm/compiler/lir/alloc/trace/lsra

src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir/src/org/graalvm/compiler/lir/alloc/trace/lsra/TraceLinearScanWalker.java

Print this page




   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 package org.graalvm.compiler.lir.alloc.trace.lsra;
  24 
  25 import static org.graalvm.compiler.lir.LIRValueUtil.isStackSlotValue;
  26 import static org.graalvm.compiler.lir.LIRValueUtil.isVariable;
  27 import static jdk.vm.ci.code.CodeUtil.isOdd;
  28 import static jdk.vm.ci.code.ValueUtil.asRegister;
  29 import static jdk.vm.ci.code.ValueUtil.isRegister;


  30 
  31 import java.util.ArrayList;
  32 import java.util.Arrays;
  33 import java.util.BitSet;
  34 import java.util.List;
  35 
  36 import org.graalvm.compiler.core.common.alloc.RegisterAllocationConfig.AllocatableRegisters;
  37 import org.graalvm.compiler.core.common.cfg.AbstractBlockBase;
  38 import org.graalvm.compiler.core.common.util.Util;
  39 import org.graalvm.compiler.debug.Debug;
  40 import org.graalvm.compiler.debug.GraalError;
  41 import org.graalvm.compiler.debug.Indent;
  42 import org.graalvm.compiler.lir.LIRInstruction;
  43 import org.graalvm.compiler.lir.StandardOp.BlockEndOp;
  44 import org.graalvm.compiler.lir.StandardOp.LabelOp;
  45 import org.graalvm.compiler.lir.StandardOp.ValueMoveOp;
  46 import org.graalvm.compiler.lir.alloc.OutOfRegistersException;
  47 import org.graalvm.compiler.lir.alloc.trace.lsra.TraceInterval.RegisterPriority;
  48 import org.graalvm.compiler.lir.alloc.trace.lsra.TraceInterval.SpillState;
  49 import org.graalvm.compiler.lir.alloc.trace.lsra.TraceLinearScanPhase.TraceLinearScan;

  50 
  51 import jdk.vm.ci.code.Register;
  52 import jdk.vm.ci.meta.Value;
  53 
  54 /**
  55  */
  56 final class TraceLinearScanWalker extends TraceIntervalWalker {
  57 
  58     private Register[] availableRegs;
  59 
  60     private final int[] usePos;
  61     private final int[] blockPos;
  62     private final BitSet isInMemory;
  63 
  64     private List<TraceInterval>[] spillIntervals;
  65 
  66     private TraceLocalMoveResolver moveResolver; // for ordering spill moves
  67 
  68     private int minReg;
  69 
  70     private int maxReg;
  71 
  72     /**
  73      * Only 10% of the lists in {@link #spillIntervals} are actually used. But when they are used,
  74      * they can grow quite long. The maximum length observed was 45 (all numbers taken from a
  75      * bootstrap run of Graal). Therefore, we initialize {@link #spillIntervals} with this marker
  76      * value, and allocate a "real" list only on demand in {@link #setUsePos}.
  77      */
  78     private static final List<TraceInterval> EMPTY_LIST = new ArrayList<>(0);
  79 
  80     // accessors mapped to same functions in class LinearScan
  81     private int blockCount() {
  82         return allocator.blockCount();
  83     }
  84 
  85     private AbstractBlockBase<?> blockAt(int idx) {
  86         return allocator.blockAt(idx);
  87     }
  88 
  89     @SuppressWarnings("unused")
  90     private AbstractBlockBase<?> blockOfOpWithId(int opId) {
  91         return allocator.blockForId(opId);
  92     }
  93 
  94     TraceLinearScanWalker(TraceLinearScan allocator, FixedInterval unhandledFixedFirst, TraceInterval unhandledAnyFirst) {
  95         super(allocator, unhandledFixedFirst, unhandledAnyFirst);
  96 
  97         moveResolver = allocator.createMoveResolver();
  98         int numRegs = allocator.getRegisters().size();
  99         spillIntervals = Util.uncheckedCast(new List<?>[numRegs]);
 100         for (int i = 0; i < numRegs; i++) {
 101             spillIntervals[i] = EMPTY_LIST;
 102         }
 103         usePos = new int[numRegs];
 104         blockPos = new int[numRegs];
 105         isInMemory = new BitSet(numRegs);
 106     }
 107 
 108     private void initUseLists(boolean onlyProcessUsePos) {
 109         for (Register register : availableRegs) {
 110             int i = register.number;
 111             usePos[i] = Integer.MAX_VALUE;
 112 
 113             if (!onlyProcessUsePos) {
 114                 blockPos[i] = Integer.MAX_VALUE;
 115                 spillIntervals[i].clear();
 116                 isInMemory.clear(i);
 117             }
 118         }
 119     }


 130         return reg >= minRegisterNumber() && reg <= maxRegisterNumber();
 131     }
 132 
 133     private void excludeFromUse(IntervalHint i) {
 134         Value location = i.location();
 135         int i1 = asRegister(location).number;
 136         if (isRegisterInRange(i1)) {
 137             usePos[i1] = 0;
 138         }
 139     }
 140 
 141     private void setUsePos(TraceInterval interval, int usePos, boolean onlyProcessUsePos) {
 142         if (usePos != -1) {
 143             assert usePos != 0 : "must use excludeFromUse to set usePos to 0";
 144             int i = asRegister(interval.location()).number;
 145             if (isRegisterInRange(i)) {
 146                 if (this.usePos[i] > usePos) {
 147                     this.usePos[i] = usePos;
 148                 }
 149                 if (!onlyProcessUsePos) {
 150                     List<TraceInterval> list = spillIntervals[i];
 151                     if (list == EMPTY_LIST) {
 152                         list = new ArrayList<>(2);
 153                         spillIntervals[i] = list;
 154                     }
 155                     list.add(interval);
 156                     // set is in memory flag
 157                     if (interval.inMemoryAt(currentPosition)) {
 158                         isInMemory.set(i);
 159                     }
 160                 }
 161             }
 162         }
 163     }
 164 
 165     private void setUsePos(FixedInterval interval, int usePos, boolean onlyProcessUsePos) {
 166         assert onlyProcessUsePos;
 167         if (usePos != -1) {
 168             assert usePos != 0 : "must use excludeFromUse to set usePos to 0";
 169             int i = asRegister(interval.location()).number;
 170             if (isRegisterInRange(i)) {


 263             return opId - 2;
 264         }
 265         assert toBlock.getPredecessorCount() == 1 : String.format("Critical Edge? %s->%s", fromBlock, toBlock);
 266         // insert move in successor
 267         return opId + 2;
 268     }
 269 
 270     private void insertMove(int operandId, TraceInterval srcIt, TraceInterval dstIt) {
 271         // output all moves here. When source and target are equal, the move is
 272         // optimized away later in assignRegNums
 273 
 274         int opId = (operandId + 1) & ~1;
 275         AbstractBlockBase<?> opBlock = allocator.blockForId(opId);
 276         assert opId > 0 && allocator.blockForId(opId - 2) == opBlock : "cannot insert move at block boundary";
 277 
 278         // calculate index of instruction inside instruction list of current block
 279         // the minimal index (for a block with no spill moves) can be calculated because the
 280         // numbering of instructions is known.
 281         // When the block already contains spill moves, the index must be increased until the
 282         // correct index is reached.
 283         List<LIRInstruction> instructions = allocator.getLIR().getLIRforBlock(opBlock);
 284         int index = (opId - instructions.get(0).id()) >> 1;
 285         assert instructions.get(index).id() <= opId : "error in calculation";
 286 
 287         while (instructions.get(index).id() != opId) {
 288             index++;
 289             assert 0 <= index && index < instructions.size() : "index out of bounds";
 290         }
 291         assert 1 <= index && index < instructions.size() : "index out of bounds";
 292         assert instructions.get(index).id() == opId : "error in calculation";
 293 
 294         // insert new instruction before instruction at position index
 295         moveResolver.moveInsertPosition(instructions, index);
 296         moveResolver.addMapping(srcIt, dstIt);
 297     }
 298 
 299     private int findOptimalSplitPos(AbstractBlockBase<?> minBlock, AbstractBlockBase<?> maxBlock, int maxSplitPos) {
 300         int fromBlockNr = minBlock.getLinearScanNumber();
 301         int toBlockNr = maxBlock.getLinearScanNumber();
 302 
 303         assert 0 <= fromBlockNr && fromBlockNr < blockCount() : "out of range";


 388 
 389             assert interval.from() < minSplitPos : "cannot split at start of interval";
 390             assert currentPosition < minSplitPos : "cannot split before current position";
 391             assert minSplitPos <= maxSplitPos : "invalid order";
 392             assert maxSplitPos <= interval.to() : "cannot split after end of interval";
 393 
 394             final int optimalSplitPos = findOptimalSplitPos(interval, minSplitPos, maxSplitPos, true);
 395 
 396             if (optimalSplitPos == interval.to() && interval.nextUsage(RegisterPriority.MustHaveRegister, minSplitPos) == Integer.MAX_VALUE) {
 397                 // the split position would be just before the end of the interval
 398                 // . no split at all necessary
 399                 if (Debug.isLogEnabled()) {
 400                     Debug.log("no split necessary because optimal split position is at end of interval");
 401                 }
 402                 return;
 403             }
 404             // must calculate this before the actual split is performed and before split position is
 405             // moved to odd opId
 406             final int optimalSplitPosFinal;
 407             boolean blockBegin = allocator.isBlockBegin(optimalSplitPos);


 408             if (blockBegin) {
 409                 assert (optimalSplitPos & 1) == 0 : "Block begins must be even: " + optimalSplitPos;
 410                 // move position after the label (odd optId)
 411                 optimalSplitPosFinal = optimalSplitPos + 1;
 412             } else {
 413                 // move position before actual instruction (odd opId)
 414                 optimalSplitPosFinal = (optimalSplitPos - 1) | 1;
 415             }
 416 
 417             // TODO( je) better define what min split pos max split pos mean.
 418             assert minSplitPos <= optimalSplitPosFinal && optimalSplitPosFinal <= maxSplitPos || minSplitPos == maxSplitPos && optimalSplitPosFinal == minSplitPos - 1 : "out of range";
 419             assert optimalSplitPosFinal <= interval.to() : "cannot split after end of interval";
 420             assert optimalSplitPosFinal > interval.from() : "cannot split at start of interval";
 421 
 422             if (Debug.isLogEnabled()) {
 423                 Debug.log("splitting at position %d", optimalSplitPosFinal);
 424             }
 425             assert optimalSplitPosFinal > currentPosition : "Can not split interval " + interval + " at current position: " + currentPosition;
 426 
 427             // was:
 428             // assert isBlockBegin || ((optimalSplitPos1 & 1) == 1) :
 429             // "split pos must be odd when not on block boundary";
 430             // assert !isBlockBegin || ((optimalSplitPos1 & 1) == 0) :
 431             // "split pos must be even on block boundary";
 432             assert (optimalSplitPosFinal & 1) == 1 : "split pos must be odd";
 433 
 434             // TODO (je) duplicate code. try to fold
 435             if (optimalSplitPosFinal == interval.to() && interval.nextUsage(RegisterPriority.MustHaveRegister, minSplitPos) == Integer.MAX_VALUE) {
 436                 // the split position would be just before the end of the interval
 437                 // . no split at all necessary
 438                 if (Debug.isLogEnabled()) {
 439                     Debug.log("no split necessary because optimal split position is at end of interval");
 440                 }
 441                 return;
 442             }
 443             TraceInterval splitPart = interval.split(optimalSplitPosFinal, allocator);
 444 
 445             boolean moveNecessary = true;
 446             splitPart.setInsertMoveWhenActivated(moveNecessary);
 447 
 448             assert splitPart.from() >= currentPosition : "cannot append new interval before current walk position";
 449             unhandledAnyList.addToListSortedByStartAndUsePositions(splitPart);
 450 
 451             if (Debug.isLogEnabled()) {
 452                 Debug.log("left interval  %s: %s", moveNecessary ? "      " : "", interval.logString());
 453                 Debug.log("right interval %s: %s", moveNecessary ? "(move)" : "", splitPart.logString());
 454             }
 455         }
 456     }
 457 
 458     // split an interval at the optimal position between minSplitPos and
 459     // maxSplitPos in two parts:
 460     // 1) the left part has already a location assigned
 461     // 2) the right part is always on the stack and therefore ignored in further processing
 462     @SuppressWarnings("try")
 463     private void splitForSpilling(TraceInterval interval) {
 464         // calculate allowed range of splitting position
 465         int maxSplitPos = currentPosition;


 554                     assert spilledPart.currentSplitChild() == interval : "overwriting wrong currentSplitChild";
 555                     spilledPart.makeCurrentSplitChild();
 556 
 557                     if (Debug.isLogEnabled()) {
 558                         Debug.log("left interval: %s", interval.logString());
 559                         Debug.log("spilled interval   : %s", spilledPart.logString());
 560                     }
 561                 }
 562             }
 563         }
 564     }
 565 
 566     /**
 567      * Change spill state of an interval.
 568      *
 569      * Note: called during register allocation.
 570      *
 571      * @param spillPos position of the spill
 572      */
 573     private void changeSpillState(TraceInterval interval, int spillPos) {
 574         if (TraceLinearScanPhase.Options.LIROptTraceRAEliminateSpillMoves.getValue()) {
 575             switch (interval.spillState()) {
 576                 case NoSpillStore:
 577                     final int minSpillPos = interval.spillDefinitionPos();
 578                     final int maxSpillPost = spillPos;
 579 
 580                     final int optimalSpillPos = findOptimalSpillPos(minSpillPos, maxSpillPost);
 581 
 582                     // assert !allocator.isBlockBegin(optimalSpillPos);
 583                     assert !allocator.isBlockEnd(optimalSpillPos);
 584                     assert (optimalSpillPos & 1) == 0 : "Spill pos must be even";

 585 
 586                     interval.setSpillDefinitionPos(optimalSpillPos);
 587                     interval.setSpillState(SpillState.SpillStore);
 588                     break;
 589                 case SpillStore:
 590                 case StartInMemory:
 591                 case NoOptimization:
 592                 case NoDefinitionFound:
 593                     // nothing to do
 594                     break;
 595 
 596                 default:
 597                     throw GraalError.shouldNotReachHere("other states not allowed at this time");
 598             }
 599         } else {
 600             interval.setSpillState(SpillState.NoOptimization);
 601         }
 602     }
 603 
























































 604     /**
 605      * @param minSpillPos minimal spill position
 606      * @param maxSpillPos maximal spill position
 607      */
 608     private int findOptimalSpillPos(int minSpillPos, int maxSpillPos) {
 609         int optimalSpillPos = findOptimalSpillPos0(minSpillPos, maxSpillPos) & (~1);
 610         if (Debug.isLogEnabled()) {
 611             Debug.log("optimal spill position: %d", optimalSpillPos);
 612         }
 613         return optimalSpillPos;
 614     }
 615 
 616     private int findOptimalSpillPos0(int minSpillPos, int maxSpillPos) {
 617         if (minSpillPos == maxSpillPos) {
 618             // trivial case, no optimization of split position possible
 619             if (Debug.isLogEnabled()) {
 620                 Debug.log("min-pos and max-pos are equal, no optimization possible");
 621             }
 622             return minSpillPos;
 623 


 638 
 639         }
 640         // search optimal block boundary between minSplitPos and maxSplitPos
 641         if (Debug.isLogEnabled()) {
 642             Debug.log("moving split pos to optimal block boundary between block B%d and B%d", minBlock.getId(), maxBlock.getId());
 643         }
 644 
 645         // currently using the same heuristic as for splitting
 646         return findOptimalSpillPos(minBlock, maxBlock, maxSpillPos);
 647     }
 648 
 649     private int findOptimalSpillPos(AbstractBlockBase<?> minBlock, AbstractBlockBase<?> maxBlock, int maxSplitPos) {
 650         int fromBlockNr = minBlock.getLinearScanNumber();
 651         int toBlockNr = maxBlock.getLinearScanNumber();
 652 
 653         assert 0 <= fromBlockNr && fromBlockNr < blockCount() : "out of range";
 654         assert 0 <= toBlockNr && toBlockNr < blockCount() : "out of range";
 655         assert fromBlockNr < toBlockNr : "must cross block boundary";
 656 
 657         /*
 658          * Try to split at end of maxBlock. If this would be after maxSplitPos, then use the begin
 659          * of maxBlock. We use last instruction -2 because we want to insert the move before the
 660          * block end op.
 661          */
 662         int optimalSplitPos = allocator.getLastLirInstructionId(maxBlock) - 2;
 663         if (optimalSplitPos > maxSplitPos) {
 664             optimalSplitPos = allocator.getFirstLirInstructionId(maxBlock);
 665         }
 666 
 667         // minimal block probability
 668         double minProbability = maxBlock.probability();
 669         for (int i = toBlockNr - 1; i >= fromBlockNr; i--) {
 670             AbstractBlockBase<?> cur = blockAt(i);
 671 
 672             if (cur.probability() < minProbability) {
 673                 // Block with lower probability found. Split at the end of this block.


 674                 minProbability = cur.probability();
 675                 optimalSplitPos = allocator.getLastLirInstructionId(cur) - 2;






 676             }
 677         }
 678         assert optimalSplitPos > allocator.maxOpId() || allocator.isBlockBegin(optimalSplitPos) || allocator.isBlockEnd(optimalSplitPos + 2) : "algorithm must move split pos to block boundary";
 679 

 680         return optimalSplitPos;
 681     }
 682 
 683     /**
 684      * This is called for every interval that is assigned to a stack slot.
 685      */
 686     private static void handleSpillSlot(TraceInterval interval) {
 687         assert interval.location() != null && (interval.canMaterialize() || isStackSlotValue(interval.location())) : "interval not assigned to a stack slot " + interval;
 688         // Do nothing. Stack slots are not processed in this implementation.
 689     }
 690 
 691     private void splitStackInterval(TraceInterval interval) {
 692         int minSplitPos = currentPosition + 1;
 693         int maxSplitPos = Math.min(interval.firstUsage(RegisterPriority.ShouldHaveRegister), interval.to());
 694 
 695         splitBeforeUsage(interval, minSplitPos, maxSplitPos);
 696     }
 697 
 698     private void splitWhenPartialRegisterAvailable(TraceInterval interval, int registerAvailableUntil) {
 699         int minSplitPos = Math.max(interval.previousUsage(RegisterPriority.ShouldHaveRegister, registerAvailableUntil), interval.from() + 1);




   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 package org.graalvm.compiler.lir.alloc.trace.lsra;
  24 


  25 import static jdk.vm.ci.code.CodeUtil.isOdd;
  26 import static jdk.vm.ci.code.ValueUtil.asRegister;
  27 import static jdk.vm.ci.code.ValueUtil.isRegister;
  28 import static org.graalvm.compiler.lir.LIRValueUtil.isStackSlotValue;
  29 import static org.graalvm.compiler.lir.LIRValueUtil.isVariable;
  30 
  31 import java.util.ArrayList;
  32 import java.util.Arrays;
  33 import java.util.BitSet;

  34 
  35 import org.graalvm.compiler.core.common.alloc.RegisterAllocationConfig.AllocatableRegisters;
  36 import org.graalvm.compiler.core.common.cfg.AbstractBlockBase;
  37 import org.graalvm.compiler.core.common.util.Util;
  38 import org.graalvm.compiler.debug.Debug;
  39 import org.graalvm.compiler.debug.GraalError;
  40 import org.graalvm.compiler.debug.Indent;
  41 import org.graalvm.compiler.lir.LIRInstruction;
  42 import org.graalvm.compiler.lir.StandardOp.BlockEndOp;
  43 import org.graalvm.compiler.lir.StandardOp.LabelOp;
  44 import org.graalvm.compiler.lir.StandardOp.ValueMoveOp;
  45 import org.graalvm.compiler.lir.alloc.OutOfRegistersException;
  46 import org.graalvm.compiler.lir.alloc.trace.lsra.TraceInterval.RegisterPriority;
  47 import org.graalvm.compiler.lir.alloc.trace.lsra.TraceInterval.SpillState;
  48 import org.graalvm.compiler.lir.alloc.trace.lsra.TraceLinearScanPhase.TraceLinearScan;
  49 import org.graalvm.compiler.lir.ssa.SSAUtil;
  50 
  51 import jdk.vm.ci.code.Register;
  52 import jdk.vm.ci.meta.Value;
  53 
  54 /**
  55  */
  56 final class TraceLinearScanWalker extends TraceIntervalWalker {
  57 
  58     private Register[] availableRegs;
  59 
  60     private final int[] usePos;
  61     private final int[] blockPos;
  62     private final BitSet isInMemory;
  63 
  64     private final ArrayList<TraceInterval>[] spillIntervals;
  65 
  66     private TraceLocalMoveResolver moveResolver; // for ordering spill moves
  67 
  68     private int minReg;
  69 
  70     private int maxReg;
  71 
  72     /**
  73      * Only 10% of the lists in {@link #spillIntervals} are actually used. But when they are used,
  74      * they can grow quite long. The maximum length observed was 45 (all numbers taken from a
  75      * bootstrap run of Graal). Therefore, we initialize {@link #spillIntervals} with this marker
  76      * value, and allocate a "real" list only on demand in {@link #setUsePos}.
  77      */
  78     private static final ArrayList<TraceInterval> EMPTY_LIST = new ArrayList<>(0);
  79 
  80     // accessors mapped to same functions in class LinearScan
  81     private int blockCount() {
  82         return allocator.blockCount();
  83     }
  84 
  85     private AbstractBlockBase<?> blockAt(int idx) {
  86         return allocator.blockAt(idx);
  87     }
  88 
  89     @SuppressWarnings("unused")
  90     private AbstractBlockBase<?> blockOfOpWithId(int opId) {
  91         return allocator.blockForId(opId);
  92     }
  93 
  94     TraceLinearScanWalker(TraceLinearScan allocator, FixedInterval unhandledFixedFirst, TraceInterval unhandledAnyFirst) {
  95         super(allocator, unhandledFixedFirst, unhandledAnyFirst);
  96 
  97         moveResolver = allocator.createMoveResolver();
  98         int numRegs = allocator.getRegisters().size();
  99         spillIntervals = Util.uncheckedCast(new ArrayList<?>[numRegs]);
 100         for (int i = 0; i < numRegs; i++) {
 101             spillIntervals[i] = EMPTY_LIST;
 102         }
 103         usePos = new int[numRegs];
 104         blockPos = new int[numRegs];
 105         isInMemory = new BitSet(numRegs);
 106     }
 107 
 108     private void initUseLists(boolean onlyProcessUsePos) {
 109         for (Register register : availableRegs) {
 110             int i = register.number;
 111             usePos[i] = Integer.MAX_VALUE;
 112 
 113             if (!onlyProcessUsePos) {
 114                 blockPos[i] = Integer.MAX_VALUE;
 115                 spillIntervals[i].clear();
 116                 isInMemory.clear(i);
 117             }
 118         }
 119     }


 130         return reg >= minRegisterNumber() && reg <= maxRegisterNumber();
 131     }
 132 
 133     private void excludeFromUse(IntervalHint i) {
 134         Value location = i.location();
 135         int i1 = asRegister(location).number;
 136         if (isRegisterInRange(i1)) {
 137             usePos[i1] = 0;
 138         }
 139     }
 140 
 141     private void setUsePos(TraceInterval interval, int usePos, boolean onlyProcessUsePos) {
 142         if (usePos != -1) {
 143             assert usePos != 0 : "must use excludeFromUse to set usePos to 0";
 144             int i = asRegister(interval.location()).number;
 145             if (isRegisterInRange(i)) {
 146                 if (this.usePos[i] > usePos) {
 147                     this.usePos[i] = usePos;
 148                 }
 149                 if (!onlyProcessUsePos) {
 150                     ArrayList<TraceInterval> list = spillIntervals[i];
 151                     if (list == EMPTY_LIST) {
 152                         list = new ArrayList<>(2);
 153                         spillIntervals[i] = list;
 154                     }
 155                     list.add(interval);
 156                     // set is in memory flag
 157                     if (interval.inMemoryAt(currentPosition)) {
 158                         isInMemory.set(i);
 159                     }
 160                 }
 161             }
 162         }
 163     }
 164 
 165     private void setUsePos(FixedInterval interval, int usePos, boolean onlyProcessUsePos) {
 166         assert onlyProcessUsePos;
 167         if (usePos != -1) {
 168             assert usePos != 0 : "must use excludeFromUse to set usePos to 0";
 169             int i = asRegister(interval.location()).number;
 170             if (isRegisterInRange(i)) {


 263             return opId - 2;
 264         }
 265         assert toBlock.getPredecessorCount() == 1 : String.format("Critical Edge? %s->%s", fromBlock, toBlock);
 266         // insert move in successor
 267         return opId + 2;
 268     }
 269 
 270     private void insertMove(int operandId, TraceInterval srcIt, TraceInterval dstIt) {
 271         // output all moves here. When source and target are equal, the move is
 272         // optimized away later in assignRegNums
 273 
 274         int opId = (operandId + 1) & ~1;
 275         AbstractBlockBase<?> opBlock = allocator.blockForId(opId);
 276         assert opId > 0 && allocator.blockForId(opId - 2) == opBlock : "cannot insert move at block boundary";
 277 
 278         // calculate index of instruction inside instruction list of current block
 279         // the minimal index (for a block with no spill moves) can be calculated because the
 280         // numbering of instructions is known.
 281         // When the block already contains spill moves, the index must be increased until the
 282         // correct index is reached.
 283         ArrayList<LIRInstruction> instructions = allocator.getLIR().getLIRforBlock(opBlock);
 284         int index = (opId - instructions.get(0).id()) >> 1;
 285         assert instructions.get(index).id() <= opId : "error in calculation";
 286 
 287         while (instructions.get(index).id() != opId) {
 288             index++;
 289             assert 0 <= index && index < instructions.size() : "index out of bounds";
 290         }
 291         assert 1 <= index && index < instructions.size() : "index out of bounds";
 292         assert instructions.get(index).id() == opId : "error in calculation";
 293 
 294         // insert new instruction before instruction at position index
 295         moveResolver.moveInsertPosition(instructions, index);
 296         moveResolver.addMapping(srcIt, dstIt);
 297     }
 298 
 299     private int findOptimalSplitPos(AbstractBlockBase<?> minBlock, AbstractBlockBase<?> maxBlock, int maxSplitPos) {
 300         int fromBlockNr = minBlock.getLinearScanNumber();
 301         int toBlockNr = maxBlock.getLinearScanNumber();
 302 
 303         assert 0 <= fromBlockNr && fromBlockNr < blockCount() : "out of range";


 388 
 389             assert interval.from() < minSplitPos : "cannot split at start of interval";
 390             assert currentPosition < minSplitPos : "cannot split before current position";
 391             assert minSplitPos <= maxSplitPos : "invalid order";
 392             assert maxSplitPos <= interval.to() : "cannot split after end of interval";
 393 
 394             final int optimalSplitPos = findOptimalSplitPos(interval, minSplitPos, maxSplitPos, true);
 395 
 396             if (optimalSplitPos == interval.to() && interval.nextUsage(RegisterPriority.MustHaveRegister, minSplitPos) == Integer.MAX_VALUE) {
 397                 // the split position would be just before the end of the interval
 398                 // . no split at all necessary
 399                 if (Debug.isLogEnabled()) {
 400                     Debug.log("no split necessary because optimal split position is at end of interval");
 401                 }
 402                 return;
 403             }
 404             // must calculate this before the actual split is performed and before split position is
 405             // moved to odd opId
 406             final int optimalSplitPosFinal;
 407             boolean blockBegin = allocator.isBlockBegin(optimalSplitPos);
 408             assert blockBegin || !allocator.isBlockBegin(optimalSplitPos - 1);
 409             boolean moveNecessary = !blockBegin;
 410             if (blockBegin) {
 411                 optimalSplitPosFinal = optimalSplitPos;


 412             } else {
 413                 // move position before actual instruction (odd opId)
 414                 optimalSplitPosFinal = (optimalSplitPos - 1) | 1;
 415             }
 416 
 417             // TODO( je) better define what min split pos max split pos mean.
 418             assert minSplitPos <= optimalSplitPosFinal && optimalSplitPosFinal <= maxSplitPos || minSplitPos == maxSplitPos && optimalSplitPosFinal == minSplitPos - 1 : "out of range";
 419             assert optimalSplitPosFinal <= interval.to() : "cannot split after end of interval";
 420             assert optimalSplitPosFinal > interval.from() : "cannot split at start of interval";
 421 
 422             if (Debug.isLogEnabled()) {
 423                 Debug.log("splitting at position %d", optimalSplitPosFinal);
 424             }
 425             assert optimalSplitPosFinal > currentPosition : "Can not split interval " + interval + " at current position: " + currentPosition;
 426 
 427             assert blockBegin || ((optimalSplitPosFinal & 1) == 1) : "split pos must be odd when not on block boundary";
 428             assert !blockBegin || ((optimalSplitPosFinal & 1) == 0) : "split pos must be even on block boundary";




 429 
 430             // TODO (je) duplicate code. try to fold
 431             if (optimalSplitPosFinal == interval.to() && interval.nextUsage(RegisterPriority.MustHaveRegister, minSplitPos) == Integer.MAX_VALUE) {
 432                 // the split position would be just before the end of the interval
 433                 // . no split at all necessary
 434                 if (Debug.isLogEnabled()) {
 435                     Debug.log("no split necessary because optimal split position is at end of interval");
 436                 }
 437                 return;
 438             }
 439             TraceInterval splitPart = interval.split(optimalSplitPosFinal, allocator);
 440 

 441             splitPart.setInsertMoveWhenActivated(moveNecessary);
 442 
 443             assert splitPart.from() >= currentPosition : "cannot append new interval before current walk position";
 444             unhandledAnyList.addToListSortedByStartAndUsePositions(splitPart);
 445 
 446             if (Debug.isLogEnabled()) {
 447                 Debug.log("left interval  %s: %s", moveNecessary ? "      " : "", interval.logString());
 448                 Debug.log("right interval %s: %s", moveNecessary ? "(move)" : "", splitPart.logString());
 449             }
 450         }
 451     }
 452 
 453     // split an interval at the optimal position between minSplitPos and
 454     // maxSplitPos in two parts:
 455     // 1) the left part has already a location assigned
 456     // 2) the right part is always on the stack and therefore ignored in further processing
 457     @SuppressWarnings("try")
 458     private void splitForSpilling(TraceInterval interval) {
 459         // calculate allowed range of splitting position
 460         int maxSplitPos = currentPosition;


 549                     assert spilledPart.currentSplitChild() == interval : "overwriting wrong currentSplitChild";
 550                     spilledPart.makeCurrentSplitChild();
 551 
 552                     if (Debug.isLogEnabled()) {
 553                         Debug.log("left interval: %s", interval.logString());
 554                         Debug.log("spilled interval   : %s", spilledPart.logString());
 555                     }
 556                 }
 557             }
 558         }
 559     }
 560 
 561     /**
 562      * Change spill state of an interval.
 563      *
 564      * Note: called during register allocation.
 565      *
 566      * @param spillPos position of the spill
 567      */
 568     private void changeSpillState(TraceInterval interval, int spillPos) {
 569         if (TraceLinearScanPhase.Options.LIROptTraceRAEliminateSpillMoves.getValue(allocator.getOptions())) {
 570             switch (interval.spillState()) {
 571                 case NoSpillStore:
 572                     final int minSpillPos = calculateMinSpillPos(interval.spillDefinitionPos(), spillPos);
 573                     final int maxSpillPos = calculateMaxSpillPos(minSpillPos, spillPos);
 574 
 575                     final int optimalSpillPos = findOptimalSpillPos(minSpillPos, maxSpillPos);
 576 
 577                     /* Cannot spill at block begin since it interferes with move resolution. */
 578                     assert isNotBlockBeginOrMerge(optimalSpillPos) : "Spill pos at block begin: " + optimalSpillPos;
 579                     assert !allocator.isBlockEnd(optimalSpillPos) : "Spill pos at block end: " + optimalSpillPos;
 580                     assert (optimalSpillPos & 1) == 0 : "Spill pos must be even " + optimalSpillPos;
 581 
 582                     interval.setSpillDefinitionPos(optimalSpillPos);
 583                     interval.setSpillState(SpillState.SpillStore);
 584                     break;
 585                 case SpillStore:
 586                 case StartInMemory:
 587                 case NoOptimization:
 588                 case NoDefinitionFound:
 589                     // nothing to do
 590                     break;
 591 
 592                 default:
 593                     throw GraalError.shouldNotReachHere("other states not allowed at this time");
 594             }
 595         } else {
 596             interval.setSpillState(SpillState.NoOptimization);
 597         }
 598     }
 599 
 600     private int calculateMinSpillPos(int spillDefinitionPos, int spillPos) {
 601         int spillDefinitionPosEven = spillDefinitionPos & ~1;
 602         if (spillDefinitionPosEven == 0 || !allocator.isBlockBegin(spillDefinitionPosEven) || spillDefinitionPos == spillPos) {
 603             assert !allocator.isBlockEnd(spillDefinitionPosEven) : "Defintion at block end? " + spillDefinitionPos;
 604             return spillDefinitionPos;
 605         }
 606         assert allocator.isBlockBegin(spillDefinitionPosEven);
 607         if (SSAUtil.isMerge(allocator.blockForId(spillDefinitionPos))) {
 608             /* Spill at merge are OK since there will be no resolution moves. */
 609             return spillDefinitionPos;
 610         }
 611         int minSpillPos = spillDefinitionPosEven + 2;
 612         while (allocator.isBlockEnd(minSpillPos)) {
 613             // +2 is block begin, +4 is the instruction afterwards
 614             minSpillPos += 4;
 615         }
 616         assert minSpillPos <= spillPos : String.format("No minSpillPos found. defPos: %d, spillPos: %d, minSpillPos, %d", spillDefinitionPos, spillPos, minSpillPos);
 617         return minSpillPos;
 618     }
 619 
 620     private int calculateMaxSpillPos(final int minSpillPos, int spillPos) {
 621         int spillPosEven = spillPos & ~1;
 622         if (spillPosEven == 0) {
 623             return spillPos;
 624         }
 625         if ((minSpillPos & ~1) == spillPosEven) {
 626             assert isNotBlockBeginOrMerge(spillPos);
 627             return spillPos;
 628         }
 629         int maxSpillPos;
 630         /* Move away from block end. */
 631         if (allocator.isBlockEnd(spillPosEven)) {
 632             /* Block end. Use instruction before. */
 633             maxSpillPos = spillPosEven - 2;
 634         } else if (allocator.isBlockBegin(spillPosEven)) {
 635             /* Block begin. Use instruction before previous block end. */
 636             maxSpillPos = spillPosEven - 4;
 637         } else {
 638             return spillPos;
 639         }
 640         assert !allocator.isBlockEnd(maxSpillPos) : "Can no longer be a block end! " + maxSpillPos;
 641 
 642         /* Skip block begins. */
 643         while (allocator.isBlockBegin(maxSpillPos) && maxSpillPos > minSpillPos) {
 644             // -2 is block end, -4 is the instruction before
 645             maxSpillPos -= 4;
 646         }
 647         assert minSpillPos <= maxSpillPos;
 648         return maxSpillPos;
 649     }
 650 
 651     private boolean isNotBlockBeginOrMerge(int spillPos) {
 652         int spillPosEven = spillPos & ~1;
 653         return spillPosEven == 0 || !allocator.isBlockBegin(spillPosEven) || SSAUtil.isMerge(allocator.blockForId(spillPosEven));
 654     }
 655 
 656     /**
 657      * @param minSpillPos minimal spill position
 658      * @param maxSpillPos maximal spill position
 659      */
 660     private int findOptimalSpillPos(int minSpillPos, int maxSpillPos) {
 661         int optimalSpillPos = findOptimalSpillPos0(minSpillPos, maxSpillPos) & (~1);
 662         if (Debug.isLogEnabled()) {
 663             Debug.log("optimal spill position: %d", optimalSpillPos);
 664         }
 665         return optimalSpillPos;
 666     }
 667 
 668     private int findOptimalSpillPos0(int minSpillPos, int maxSpillPos) {
 669         if (minSpillPos == maxSpillPos) {
 670             // trivial case, no optimization of split position possible
 671             if (Debug.isLogEnabled()) {
 672                 Debug.log("min-pos and max-pos are equal, no optimization possible");
 673             }
 674             return minSpillPos;
 675 


 690 
 691         }
 692         // search optimal block boundary between minSplitPos and maxSplitPos
 693         if (Debug.isLogEnabled()) {
 694             Debug.log("moving split pos to optimal block boundary between block B%d and B%d", minBlock.getId(), maxBlock.getId());
 695         }
 696 
 697         // currently using the same heuristic as for splitting
 698         return findOptimalSpillPos(minBlock, maxBlock, maxSpillPos);
 699     }
 700 
 701     private int findOptimalSpillPos(AbstractBlockBase<?> minBlock, AbstractBlockBase<?> maxBlock, int maxSplitPos) {
 702         int fromBlockNr = minBlock.getLinearScanNumber();
 703         int toBlockNr = maxBlock.getLinearScanNumber();
 704 
 705         assert 0 <= fromBlockNr && fromBlockNr < blockCount() : "out of range";
 706         assert 0 <= toBlockNr && toBlockNr < blockCount() : "out of range";
 707         assert fromBlockNr < toBlockNr : "must cross block boundary";
 708 
 709         /*
 710          * Try to split at end of maxBlock. We use last instruction -2 because we want to insert the
 711          * move before the block end op. If this would be after maxSplitPos, then use the
 712          * maxSplitPos.
 713          */
 714         int optimalSplitPos = allocator.getLastLirInstructionId(maxBlock) - 2;
 715         if (optimalSplitPos > maxSplitPos) {
 716             optimalSplitPos = maxSplitPos;
 717         }
 718 
 719         // minimal block probability
 720         double minProbability = maxBlock.probability();
 721         for (int i = toBlockNr - 1; i >= fromBlockNr; i--) {
 722             AbstractBlockBase<?> cur = blockAt(i);
 723 
 724             if (cur.probability() < minProbability) {
 725                 // Block with lower probability found. Split at the end of this block.
 726                 int opIdBeforeBlockEnd = allocator.getLastLirInstructionId(cur) - 2;
 727                 if (allocator.getLIR().getLIRforBlock(cur).size() > 2) {
 728                     minProbability = cur.probability();
 729                     optimalSplitPos = opIdBeforeBlockEnd;
 730                 } else {
 731                     /*
 732                      * Skip blocks with only LabelOp and BlockEndOp since they cause move ordering
 733                      * problems.
 734                      */
 735                     assert allocator.isBlockBegin(opIdBeforeBlockEnd);
 736                 }
 737             }
 738         }
 739         assert optimalSplitPos > allocator.maxOpId() || optimalSplitPos == maxSplitPos || allocator.isBlockEnd(optimalSplitPos + 2) : "algorithm must move split pos to block boundary";
 740         assert !allocator.isBlockBegin(optimalSplitPos);
 741         return optimalSplitPos;
 742     }
 743 
 744     /**
 745      * This is called for every interval that is assigned to a stack slot.
 746      */
 747     private static void handleSpillSlot(TraceInterval interval) {
 748         assert interval.location() != null && (interval.canMaterialize() || isStackSlotValue(interval.location())) : "interval not assigned to a stack slot " + interval;
 749         // Do nothing. Stack slots are not processed in this implementation.
 750     }
 751 
 752     private void splitStackInterval(TraceInterval interval) {
 753         int minSplitPos = currentPosition + 1;
 754         int maxSplitPos = Math.min(interval.firstUsage(RegisterPriority.ShouldHaveRegister), interval.to());
 755 
 756         splitBeforeUsage(interval, minSplitPos, maxSplitPos);
 757     }
 758 
 759     private void splitWhenPartialRegisterAvailable(TraceInterval interval, int registerAvailableUntil) {
 760         int minSplitPos = Math.max(interval.previousUsage(RegisterPriority.ShouldHaveRegister, registerAvailableUntil), interval.from() + 1);


src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir/src/org/graalvm/compiler/lir/alloc/trace/lsra/TraceLinearScanWalker.java
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File