1 /* 2 * Copyright (c) 2009, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 */ 23 package org.graalvm.compiler.lir.alloc.trace.lsra; 24 25 import static jdk.vm.ci.code.CodeUtil.isOdd; 26 import static jdk.vm.ci.code.ValueUtil.asRegister; 27 import static jdk.vm.ci.code.ValueUtil.isRegister; 28 import static org.graalvm.compiler.lir.LIRValueUtil.isStackSlotValue; 29 import static org.graalvm.compiler.lir.LIRValueUtil.isVariable; 30 31 import java.util.ArrayList; 32 import java.util.Arrays; 33 import java.util.BitSet; 34 35 import org.graalvm.compiler.core.common.alloc.RegisterAllocationConfig.AllocatableRegisters; 36 import org.graalvm.compiler.core.common.cfg.AbstractBlockBase; 37 import org.graalvm.compiler.core.common.util.Util; 38 import org.graalvm.compiler.debug.Debug; 39 import org.graalvm.compiler.debug.GraalError; 40 import org.graalvm.compiler.debug.Indent; 41 import org.graalvm.compiler.lir.LIRInstruction; 42 import org.graalvm.compiler.lir.LIRValueUtil; 43 import org.graalvm.compiler.lir.StandardOp.BlockEndOp; 44 import org.graalvm.compiler.lir.StandardOp.LabelOp; 45 import org.graalvm.compiler.lir.StandardOp.ValueMoveOp; 46 import org.graalvm.compiler.lir.alloc.OutOfRegistersException; 47 import org.graalvm.compiler.lir.alloc.trace.lsra.TraceInterval.RegisterPriority; 48 import org.graalvm.compiler.lir.alloc.trace.lsra.TraceInterval.SpillState; 49 import org.graalvm.compiler.lir.alloc.trace.lsra.TraceInterval.State; 50 import org.graalvm.compiler.lir.alloc.trace.lsra.TraceLinearScanPhase.TraceLinearScan; 51 import org.graalvm.compiler.lir.ssa.SSAUtil; 52 53 import jdk.vm.ci.code.Register; 54 import jdk.vm.ci.meta.Value; 55 56 /** 57 */ 58 final class TraceLinearScanWalker { 59 60 /** 61 * Adds an interval to a list sorted by {@linkplain FixedInterval#currentFrom() current from} 62 * positions. 63 * 64 * @param list the list 65 * @param interval the interval to add 66 * @return the new head of the list 67 */ 68 private static FixedInterval addToListSortedByCurrentFromPositions(FixedInterval list, FixedInterval interval) { 69 FixedInterval prev = null; 70 FixedInterval cur = list; 71 while (cur.currentFrom() < interval.currentFrom()) { 72 prev = cur; 73 cur = cur.next; 74 } 75 FixedInterval result = list; 76 if (prev == null) { 77 // add to head of list 78 result = interval; 79 } else { 80 // add before 'cur' 81 prev.next = interval; 82 } 83 interval.next = cur; 84 return result; 85 } 86 87 /** 88 * Adds an interval to a list sorted by {@linkplain TraceInterval#from() current from} 89 * positions. 90 * 91 * @param list the list 92 * @param interval the interval to add 93 * @return the new head of the list 94 */ 95 private static TraceInterval addToListSortedByFromPositions(TraceInterval list, TraceInterval interval) { 96 TraceInterval prev = null; 97 TraceInterval cur = list; 98 while (cur.from() < interval.from()) { 99 prev = cur; 100 cur = cur.next; 101 } 102 TraceInterval result = list; 103 if (prev == null) { 104 // add to head of list 105 result = interval; 106 } else { 107 // add before 'cur' 108 prev.next = interval; 109 } 110 interval.next = cur; 111 return result; 112 } 113 114 /** 115 * Adds an interval to a list sorted by {@linkplain TraceInterval#from() start} positions and 116 * {@linkplain TraceInterval#firstUsage(RegisterPriority) first usage} positions. 117 * 118 * @param list the list 119 * @param interval the interval to add 120 * @return the new head of the list 121 */ 122 private static TraceInterval addToListSortedByStartAndUsePositions(TraceInterval list, TraceInterval interval) { 123 TraceInterval newHead = list; 124 TraceInterval prev = null; 125 TraceInterval cur = newHead; 126 while (cur.from() < interval.from() || (cur.from() == interval.from() && cur.firstUsage(RegisterPriority.None) < interval.firstUsage(RegisterPriority.None))) { 127 prev = cur; 128 cur = cur.next; 129 } 130 if (prev == null) { 131 newHead = interval; 132 } else { 133 prev.next = interval; 134 } 135 interval.next = cur; 136 return newHead; 137 } 138 139 /** 140 * Removes an interval from a list. 141 * 142 * @param list the list 143 * @param interval the interval to remove 144 * @return the new head of the list 145 */ 146 private static TraceInterval removeAny(TraceInterval list, TraceInterval interval) { 147 TraceInterval newHead = list; 148 TraceInterval prev = null; 149 TraceInterval cur = newHead; 150 while (cur != interval) { 151 assert cur != null && cur != TraceInterval.EndMarker : "interval has not been found in list: " + interval; 152 prev = cur; 153 cur = cur.next; 154 } 155 if (prev == null) { 156 newHead = cur.next; 157 } else { 158 prev.next = cur.next; 159 } 160 return newHead; 161 } 162 163 private Register[] availableRegs; 164 165 private final int[] usePos; 166 private final int[] blockPos; 167 private final BitSet isInMemory; 168 169 private final ArrayList<TraceInterval>[] spillIntervals; 170 171 private TraceLocalMoveResolver moveResolver; // for ordering spill moves 172 173 private int minReg; 174 175 private int maxReg; 176 177 private final TraceLinearScan allocator; 178 179 /** 180 * Sorted list of intervals, not live before the current position. 181 */ 182 private TraceInterval unhandledAnyList; 183 184 /** 185 * Sorted list of intervals, live at the current position. 186 */ 187 private TraceInterval activeAnyList; 188 189 private FixedInterval activeFixedList; 190 191 /** 192 * Sorted list of intervals in a life time hole at the current position. 193 */ 194 private FixedInterval inactiveFixedList; 195 196 /** 197 * The current position (intercept point through the intervals). 198 */ 199 private int currentPosition; 200 201 /** 202 * Only 10% of the lists in {@link #spillIntervals} are actually used. But when they are used, 203 * they can grow quite long. The maximum length observed was 45 (all numbers taken from a 204 * bootstrap run of Graal). Therefore, we initialize {@link #spillIntervals} with this marker 205 * value, and allocate a "real" list only on demand in {@link #setUsePos}. 206 */ 207 private static final ArrayList<TraceInterval> EMPTY_LIST = new ArrayList<>(0); 208 209 // accessors mapped to same functions in class LinearScan 210 private int blockCount() { 211 return allocator.blockCount(); 212 } 213 214 private AbstractBlockBase<?> blockAt(int idx) { 215 return allocator.blockAt(idx); 216 } 217 218 @SuppressWarnings("unused") 219 private AbstractBlockBase<?> blockOfOpWithId(int opId) { 220 return allocator.blockForId(opId); 221 } 222 223 TraceLinearScanWalker(TraceLinearScan allocator, FixedInterval unhandledFixedFirst, TraceInterval unhandledAnyFirst) { 224 this.allocator = allocator; 225 226 unhandledAnyList = unhandledAnyFirst; 227 activeAnyList = TraceInterval.EndMarker; 228 activeFixedList = FixedInterval.EndMarker; 229 // we don't need a separate unhandled list for fixed. 230 inactiveFixedList = unhandledFixedFirst; 231 currentPosition = -1; 232 233 moveResolver = allocator.createMoveResolver(); 234 int numRegs = allocator.getRegisters().size(); 235 spillIntervals = Util.uncheckedCast(new ArrayList<?>[numRegs]); 236 for (int i = 0; i < numRegs; i++) { 237 spillIntervals[i] = EMPTY_LIST; 238 } 239 usePos = new int[numRegs]; 240 blockPos = new int[numRegs]; 241 isInMemory = new BitSet(numRegs); 242 } 243 244 private void initUseLists(boolean onlyProcessUsePos) { 245 for (Register register : availableRegs) { 246 int i = register.number; 247 usePos[i] = Integer.MAX_VALUE; 248 249 if (!onlyProcessUsePos) { 250 blockPos[i] = Integer.MAX_VALUE; 251 spillIntervals[i].clear(); 252 isInMemory.clear(i); 253 } 254 } 255 } 256 257 private int maxRegisterNumber() { 258 return maxReg; 259 } 260 261 private int minRegisterNumber() { 262 return minReg; 263 } 264 265 private boolean isRegisterInRange(int reg) { 266 return reg >= minRegisterNumber() && reg <= maxRegisterNumber(); 267 } 268 269 private void excludeFromUse(IntervalHint i) { 270 Value location = i.location(); 271 int i1 = asRegister(location).number; 272 if (isRegisterInRange(i1)) { 273 usePos[i1] = 0; 274 } 275 } 276 277 private void setUsePos(TraceInterval interval, int usePos, boolean onlyProcessUsePos) { 278 if (usePos != -1) { 279 assert usePos != 0 : "must use excludeFromUse to set usePos to 0"; 280 int i = asRegister(interval.location()).number; 281 if (isRegisterInRange(i)) { 282 if (this.usePos[i] > usePos) { 283 this.usePos[i] = usePos; 284 } 285 if (!onlyProcessUsePos) { 286 ArrayList<TraceInterval> list = spillIntervals[i]; 287 if (list == EMPTY_LIST) { 288 list = new ArrayList<>(2); 289 spillIntervals[i] = list; 290 } 291 list.add(interval); 292 // set is in memory flag 293 if (interval.inMemoryAt(currentPosition)) { 294 isInMemory.set(i); 295 } 296 } 297 } 298 } 299 } 300 301 private void setUsePos(FixedInterval interval, int usePos, boolean onlyProcessUsePos) { 302 assert onlyProcessUsePos; 303 if (usePos != -1) { 304 assert usePos != 0 : "must use excludeFromUse to set usePos to 0"; 305 int i = asRegister(interval.location()).number; 306 if (isRegisterInRange(i)) { 307 if (this.usePos[i] > usePos) { 308 this.usePos[i] = usePos; 309 } 310 } 311 } 312 } 313 314 private void setBlockPos(IntervalHint i, int blockPos) { 315 if (blockPos != -1) { 316 int reg = asRegister(i.location()).number; 317 if (isRegisterInRange(reg)) { 318 if (this.blockPos[reg] > blockPos) { 319 this.blockPos[reg] = blockPos; 320 } 321 if (usePos[reg] > blockPos) { 322 usePos[reg] = blockPos; 323 } 324 } 325 } 326 } 327 328 private void freeExcludeActiveFixed() { 329 FixedInterval interval = activeFixedList; 330 while (interval != FixedInterval.EndMarker) { 331 assert isRegister(interval.location()) : "active interval must have a register assigned"; 332 excludeFromUse(interval); 333 interval = interval.next; 334 } 335 } 336 337 private void freeExcludeActiveAny() { 338 TraceInterval interval = activeAnyList; 339 while (interval != TraceInterval.EndMarker) { 340 assert isRegister(interval.location()) : "active interval must have a register assigned"; 341 excludeFromUse(interval); 342 interval = interval.next; 343 } 344 } 345 346 private void freeCollectInactiveFixed(TraceInterval current) { 347 FixedInterval interval = inactiveFixedList; 348 while (interval != FixedInterval.EndMarker) { 349 if (current.to() <= interval.from()) { 350 assert interval.intersectsAt(current) == -1 : "must not intersect"; 351 setUsePos(interval, interval.from(), true); 352 } else { 353 setUsePos(interval, interval.currentIntersectsAt(current), true); 354 } 355 interval = interval.next; 356 } 357 } 358 359 private void spillExcludeActiveFixed() { 360 FixedInterval interval = activeFixedList; 361 while (interval != FixedInterval.EndMarker) { 362 excludeFromUse(interval); 363 interval = interval.next; 364 } 365 } 366 367 private void spillBlockInactiveFixed(TraceInterval current) { 368 FixedInterval interval = inactiveFixedList; 369 while (interval != FixedInterval.EndMarker) { 370 if (current.to() > interval.currentFrom()) { 371 setBlockPos(interval, interval.currentIntersectsAt(current)); 372 } else { 373 assert interval.currentIntersectsAt(current) == -1 : "invalid optimization: intervals intersect"; 374 } 375 376 interval = interval.next; 377 } 378 } 379 380 private void spillCollectActiveAny(RegisterPriority registerPriority) { 381 TraceInterval interval = activeAnyList; 382 while (interval != TraceInterval.EndMarker) { 383 setUsePos(interval, Math.min(interval.nextUsage(registerPriority, currentPosition), interval.to()), false); 384 interval = interval.next; 385 } 386 } 387 388 @SuppressWarnings("unused") 389 private int insertIdAtBasicBlockBoundary(int opId) { 390 assert allocator.isBlockBegin(opId) : "Not a block begin: " + opId; 391 assert allocator.instructionForId(opId) instanceof LabelOp; 392 assert allocator.instructionForId(opId - 2) instanceof BlockEndOp; 393 394 AbstractBlockBase<?> toBlock = allocator.blockForId(opId); 395 AbstractBlockBase<?> fromBlock = allocator.blockForId(opId - 2); 396 397 if (fromBlock.getSuccessorCount() == 1) { 398 // insert move in predecessor 399 return opId - 2; 400 } 401 assert toBlock.getPredecessorCount() == 1 : String.format("Critical Edge? %s->%s", fromBlock, toBlock); 402 // insert move in successor 403 return opId + 2; 404 } 405 406 private void insertMove(int operandId, TraceInterval srcIt, TraceInterval dstIt) { 407 // output all moves here. When source and target are equal, the move is 408 // optimized away later in assignRegNums 409 410 int opId = (operandId + 1) & ~1; 411 AbstractBlockBase<?> opBlock = allocator.blockForId(opId); 412 assert opId > 0 && allocator.blockForId(opId - 2) == opBlock : "cannot insert move at block boundary"; 413 414 // calculate index of instruction inside instruction list of current block 415 // the minimal index (for a block with no spill moves) can be calculated because the 416 // numbering of instructions is known. 417 // When the block already contains spill moves, the index must be increased until the 418 // correct index is reached. 419 ArrayList<LIRInstruction> instructions = allocator.getLIR().getLIRforBlock(opBlock); 420 int index = (opId - instructions.get(0).id()) >> 1; 421 assert instructions.get(index).id() <= opId : "error in calculation"; 422 423 while (instructions.get(index).id() != opId) { 424 index++; 425 assert 0 <= index && index < instructions.size() : "index out of bounds"; 426 } 427 assert 1 <= index && index < instructions.size() : "index out of bounds"; 428 assert instructions.get(index).id() == opId : "error in calculation"; 429 430 // insert new instruction before instruction at position index 431 moveResolver.moveInsertPosition(instructions, index); 432 moveResolver.addMapping(srcIt, dstIt); 433 } 434 435 private int findOptimalSplitPos(AbstractBlockBase<?> minBlock, AbstractBlockBase<?> maxBlock, int maxSplitPos) { 436 int fromBlockNr = minBlock.getLinearScanNumber(); 437 int toBlockNr = maxBlock.getLinearScanNumber(); 438 439 assert 0 <= fromBlockNr && fromBlockNr < blockCount() : "out of range"; 440 assert 0 <= toBlockNr && toBlockNr < blockCount() : "out of range"; 441 assert fromBlockNr < toBlockNr : "must cross block boundary"; 442 443 // Try to split at end of maxBlock. If this would be after 444 // maxSplitPos, then use the begin of maxBlock 445 int optimalSplitPos = allocator.getLastLirInstructionId(maxBlock) + 2; 446 if (optimalSplitPos > maxSplitPos) { 447 optimalSplitPos = allocator.getFirstLirInstructionId(maxBlock); 448 } 449 450 // minimal block probability 451 double minProbability = maxBlock.probability(); 452 for (int i = toBlockNr - 1; i >= fromBlockNr; i--) { 453 AbstractBlockBase<?> cur = blockAt(i); 454 455 if (cur.probability() < minProbability) { 456 // Block with lower probability found. Split at the end of this block. 457 minProbability = cur.probability(); 458 optimalSplitPos = allocator.getLastLirInstructionId(cur) + 2; 459 } 460 } 461 assert optimalSplitPos > allocator.maxOpId() || allocator.isBlockBegin(optimalSplitPos) : "algorithm must move split pos to block boundary"; 462 463 return optimalSplitPos; 464 } 465 466 @SuppressWarnings({"unused"}) 467 private int findOptimalSplitPos(TraceInterval interval, int minSplitPos, int maxSplitPos, boolean doLoopOptimization) { 468 int optimalSplitPos = findOptimalSplitPos0(minSplitPos, maxSplitPos); 469 if (Debug.isLogEnabled()) { 470 Debug.log("optimal split position: %d", optimalSplitPos); 471 } 472 return optimalSplitPos; 473 } 474 475 private int findOptimalSplitPos0(int minSplitPos, int maxSplitPos) { 476 if (minSplitPos == maxSplitPos) { 477 // trivial case, no optimization of split position possible 478 if (Debug.isLogEnabled()) { 479 Debug.log("min-pos and max-pos are equal, no optimization possible"); 480 } 481 return minSplitPos; 482 483 } 484 assert minSplitPos < maxSplitPos : "must be true then"; 485 assert minSplitPos > 0 : "cannot access minSplitPos - 1 otherwise"; 486 487 // reason for using minSplitPos - 1: when the minimal split pos is exactly at the 488 // beginning of a block, then minSplitPos is also a possible split position. 489 // Use the block before as minBlock, because then minBlock.lastLirInstructionId() + 2 == 490 // minSplitPos 491 AbstractBlockBase<?> minBlock = allocator.blockForId(minSplitPos - 1); 492 493 // reason for using maxSplitPos - 1: otherwise there would be an assert on failure 494 // when an interval ends at the end of the last block of the method 495 // (in this case, maxSplitPos == allocator().maxLirOpId() + 2, and there is no 496 // block at this opId) 497 AbstractBlockBase<?> maxBlock = allocator.blockForId(maxSplitPos - 1); 498 499 assert minBlock.getLinearScanNumber() <= maxBlock.getLinearScanNumber() : "invalid order"; 500 if (minBlock == maxBlock) { 501 // split position cannot be moved to block boundary : so split as late as possible 502 if (Debug.isLogEnabled()) { 503 Debug.log("cannot move split pos to block boundary because minPos and maxPos are in same block"); 504 } 505 return maxSplitPos; 506 507 } 508 // seach optimal block boundary between minSplitPos and maxSplitPos 509 if (Debug.isLogEnabled()) { 510 Debug.log("moving split pos to optimal block boundary between block B%d and B%d", minBlock.getId(), maxBlock.getId()); 511 } 512 513 return findOptimalSplitPos(minBlock, maxBlock, maxSplitPos); 514 } 515 516 // split an interval at the optimal position between minSplitPos and 517 // maxSplitPos in two parts: 518 // 1) the left part has already a location assigned 519 // 2) the right part is sorted into to the unhandled-list 520 @SuppressWarnings("try") 521 private void splitBeforeUsage(TraceInterval interval, int minSplitPos, int maxSplitPos) { 522 523 try (Indent indent = Debug.logAndIndent("splitting interval %s between %d and %d", interval, minSplitPos, maxSplitPos)) { 524 525 assert interval.from() < minSplitPos : "cannot split at start of interval"; 526 assert currentPosition < minSplitPos : "cannot split before current position"; 527 assert minSplitPos <= maxSplitPos : "invalid order"; 528 assert maxSplitPos <= interval.to() : "cannot split after end of interval"; 529 530 final int optimalSplitPos = findOptimalSplitPos(interval, minSplitPos, maxSplitPos, true); 531 532 if (optimalSplitPos == interval.to() && interval.nextUsage(RegisterPriority.MustHaveRegister, minSplitPos) == Integer.MAX_VALUE) { 533 // the split position would be just before the end of the interval 534 // . no split at all necessary 535 if (Debug.isLogEnabled()) { 536 Debug.log("no split necessary because optimal split position is at end of interval"); 537 } 538 return; 539 } 540 // must calculate this before the actual split is performed and before split position is 541 // moved to odd opId 542 final int optimalSplitPosFinal; 543 boolean blockBegin = allocator.isBlockBegin(optimalSplitPos); 544 assert blockBegin || !allocator.isBlockBegin(optimalSplitPos - 1); 545 boolean moveNecessary = !blockBegin; 546 if (blockBegin) { 547 optimalSplitPosFinal = optimalSplitPos; 548 } else { 549 // move position before actual instruction (odd opId) 550 optimalSplitPosFinal = (optimalSplitPos - 1) | 1; 551 } 552 553 // TODO( je) better define what min split pos max split pos mean. 554 assert minSplitPos <= optimalSplitPosFinal && optimalSplitPosFinal <= maxSplitPos || minSplitPos == maxSplitPos && optimalSplitPosFinal == minSplitPos - 1 : "out of range"; 555 assert optimalSplitPosFinal <= interval.to() : "cannot split after end of interval"; 556 assert optimalSplitPosFinal > interval.from() : "cannot split at start of interval"; 557 558 if (Debug.isLogEnabled()) { 559 Debug.log("splitting at position %d", optimalSplitPosFinal); 560 } 561 assert optimalSplitPosFinal > currentPosition : "Can not split interval " + interval + " at current position: " + currentPosition; 562 563 assert blockBegin || ((optimalSplitPosFinal & 1) == 1) : "split pos must be odd when not on block boundary"; 564 assert !blockBegin || ((optimalSplitPosFinal & 1) == 0) : "split pos must be even on block boundary"; 565 566 // TODO (je) duplicate code. try to fold 567 if (optimalSplitPosFinal == interval.to() && interval.nextUsage(RegisterPriority.MustHaveRegister, minSplitPos) == Integer.MAX_VALUE) { 568 // the split position would be just before the end of the interval 569 // . no split at all necessary 570 if (Debug.isLogEnabled()) { 571 Debug.log("no split necessary because optimal split position is at end of interval"); 572 } 573 return; 574 } 575 TraceInterval splitPart = interval.split(optimalSplitPosFinal, allocator); 576 577 splitPart.setInsertMoveWhenActivated(moveNecessary); 578 579 assert splitPart.from() >= currentPosition : "cannot append new interval before current walk position"; 580 unhandledAnyList = TraceLinearScanWalker.addToListSortedByStartAndUsePositions(unhandledAnyList, splitPart); 581 582 if (Debug.isLogEnabled()) { 583 Debug.log("left interval %s: %s", moveNecessary ? " " : "", interval.logString()); 584 Debug.log("right interval %s: %s", moveNecessary ? "(move)" : "", splitPart.logString()); 585 } 586 } 587 } 588 589 // split an interval at the optimal position between minSplitPos and 590 // maxSplitPos in two parts: 591 // 1) the left part has already a location assigned 592 // 2) the right part is always on the stack and therefore ignored in further processing 593 @SuppressWarnings("try") 594 private void splitForSpilling(TraceInterval interval) { 595 // calculate allowed range of splitting position 596 int maxSplitPos = currentPosition; 597 int previousUsage = interval.previousUsage(RegisterPriority.ShouldHaveRegister, maxSplitPos); 598 if (previousUsage == currentPosition) { 599 /* 600 * If there is a usage with ShouldHaveRegister priority at the current position fall 601 * back to MustHaveRegister priority. This only happens if register priority was 602 * downgraded to MustHaveRegister in #allocLockedRegister. 603 */ 604 previousUsage = interval.previousUsage(RegisterPriority.MustHaveRegister, maxSplitPos); 605 } 606 int minSplitPos = Math.max(previousUsage + 1, interval.from()); 607 608 try (Indent indent = Debug.logAndIndent("splitting and spilling interval %s between %d and %d", interval, minSplitPos, maxSplitPos)) { 609 610 assert interval.from() <= minSplitPos : "cannot split before start of interval"; 611 assert minSplitPos <= maxSplitPos : "invalid order"; 612 assert maxSplitPos < interval.to() : "cannot split at end end of interval"; 613 assert currentPosition < interval.to() : "interval must not end before current position"; 614 615 if (minSplitPos == interval.from()) { 616 // the whole interval is never used, so spill it entirely to memory 617 618 try (Indent indent2 = Debug.logAndIndent("spilling entire interval because split pos is at beginning of interval (use positions: %d)", interval.numUsePos())) { 619 620 assert interval.firstUsage(RegisterPriority.MustHaveRegister) > currentPosition : String.format("interval %s must not have use position before currentPosition %d", interval, 621 currentPosition); 622 623 allocator.assignSpillSlot(interval); 624 handleSpillSlot(interval); 625 changeSpillState(interval, minSplitPos); 626 627 // Also kick parent intervals out of register to memory when they have no use 628 // position. This avoids short interval in register surrounded by intervals in 629 // memory . avoid useless moves from memory to register and back 630 TraceInterval parent = interval; 631 while (parent != null && parent.isSplitChild()) { 632 parent = parent.getSplitChildBeforeOpId(parent.from()); 633 634 if (isRegister(parent.location())) { 635 if (parent.firstUsage(RegisterPriority.ShouldHaveRegister) == Integer.MAX_VALUE) { 636 // parent is never used, so kick it out of its assigned register 637 if (Debug.isLogEnabled()) { 638 Debug.log("kicking out interval %d out of its register because it is never used", parent.operandNumber); 639 } 640 allocator.assignSpillSlot(parent); 641 handleSpillSlot(parent); 642 } else { 643 // do not go further back because the register is actually used by 644 // the interval 645 parent = null; 646 } 647 } 648 } 649 } 650 651 } else { 652 // search optimal split pos, split interval and spill only the right hand part 653 int optimalSplitPos = findOptimalSplitPos(interval, minSplitPos, maxSplitPos, false); 654 655 assert minSplitPos <= optimalSplitPos && optimalSplitPos <= maxSplitPos : "out of range"; 656 assert optimalSplitPos < interval.to() : "cannot split at end of interval"; 657 assert optimalSplitPos >= interval.from() : "cannot split before start of interval"; 658 659 if (!allocator.isBlockBegin(optimalSplitPos)) { 660 // move position before actual instruction (odd opId) 661 optimalSplitPos = (optimalSplitPos - 1) | 1; 662 } 663 664 try (Indent indent2 = Debug.logAndIndent("splitting at position %d", optimalSplitPos)) { 665 assert allocator.isBlockBegin(optimalSplitPos) || ((optimalSplitPos & 1) == 1) : "split pos must be odd when not on block boundary"; 666 assert !allocator.isBlockBegin(optimalSplitPos) || ((optimalSplitPos & 1) == 0) : "split pos must be even on block boundary"; 667 668 TraceInterval spilledPart = interval.split(optimalSplitPos, allocator); 669 allocator.assignSpillSlot(spilledPart); 670 handleSpillSlot(spilledPart); 671 changeSpillState(spilledPart, optimalSplitPos); 672 673 if (!allocator.isBlockBegin(optimalSplitPos)) { 674 if (Debug.isLogEnabled()) { 675 Debug.log("inserting move from interval %s to %s", interval, spilledPart); 676 } 677 insertMove(optimalSplitPos, interval, spilledPart); 678 } else { 679 if (Debug.isLogEnabled()) { 680 Debug.log("no need to insert move. done by data-flow resolution"); 681 } 682 } 683 684 // the currentSplitChild is needed later when moves are inserted for reloading 685 assert spilledPart.currentSplitChild() == interval : "overwriting wrong currentSplitChild"; 686 spilledPart.makeCurrentSplitChild(); 687 688 if (Debug.isLogEnabled()) { 689 Debug.log("left interval: %s", interval.logString()); 690 Debug.log("spilled interval : %s", spilledPart.logString()); 691 } 692 } 693 } 694 } 695 } 696 697 /** 698 * Change spill state of an interval. 699 * 700 * Note: called during register allocation. 701 * 702 * @param spillPos position of the spill 703 */ 704 private void changeSpillState(TraceInterval interval, int spillPos) { 705 if (TraceLinearScanPhase.Options.LIROptTraceRAEliminateSpillMoves.getValue(allocator.getOptions())) { 706 switch (interval.spillState()) { 707 case NoSpillStore: 708 final int minSpillPos = calculateMinSpillPos(interval.spillDefinitionPos(), spillPos); 709 final int maxSpillPos = calculateMaxSpillPos(minSpillPos, spillPos); 710 711 final int optimalSpillPos = findOptimalSpillPos(minSpillPos, maxSpillPos); 712 713 /* Cannot spill at block begin since it interferes with move resolution. */ 714 assert isNotBlockBeginOrMerge(optimalSpillPos) : "Spill pos at block begin: " + optimalSpillPos; 715 assert !allocator.isBlockEnd(optimalSpillPos) : "Spill pos at block end: " + optimalSpillPos; 716 assert (optimalSpillPos & 1) == 0 : "Spill pos must be even " + optimalSpillPos; 717 718 interval.setSpillDefinitionPos(optimalSpillPos); 719 interval.setSpillState(SpillState.SpillStore); 720 break; 721 case SpillStore: 722 case StartInMemory: 723 case NoOptimization: 724 case NoDefinitionFound: 725 // nothing to do 726 break; 727 728 default: 729 throw GraalError.shouldNotReachHere("other states not allowed at this time"); 730 } 731 } else { 732 interval.setSpillState(SpillState.NoOptimization); 733 } 734 } 735 736 private int calculateMinSpillPos(int spillDefinitionPos, int spillPos) { 737 int spillDefinitionPosEven = spillDefinitionPos & ~1; 738 if (spillDefinitionPosEven == 0 || !allocator.isBlockBegin(spillDefinitionPosEven) || spillDefinitionPos == spillPos) { 739 assert !allocator.isBlockEnd(spillDefinitionPosEven) : "Defintion at block end? " + spillDefinitionPos; 740 return spillDefinitionPos; 741 } 742 assert allocator.isBlockBegin(spillDefinitionPosEven); 743 if (SSAUtil.isMerge(allocator.blockForId(spillDefinitionPos))) { 744 /* Spill at merge are OK since there will be no resolution moves. */ 745 return spillDefinitionPos; 746 } 747 int minSpillPos = spillDefinitionPosEven + 2; 748 while (allocator.isBlockEnd(minSpillPos)) { 749 // +2 is block begin, +4 is the instruction afterwards 750 minSpillPos += 4; 751 } 752 assert minSpillPos <= spillPos : String.format("No minSpillPos found. defPos: %d, spillPos: %d, minSpillPos, %d", spillDefinitionPos, spillPos, minSpillPos); 753 return minSpillPos; 754 } 755 756 private int calculateMaxSpillPos(final int minSpillPos, int spillPos) { 757 int spillPosEven = spillPos & ~1; 758 if (spillPosEven == 0) { 759 return spillPos; 760 } 761 if ((minSpillPos & ~1) == spillPosEven) { 762 assert isNotBlockBeginOrMerge(spillPos); 763 return spillPos; 764 } 765 int maxSpillPos; 766 /* Move away from block end. */ 767 if (allocator.isBlockEnd(spillPosEven)) { 768 /* Block end. Use instruction before. */ 769 maxSpillPos = spillPosEven - 2; 770 } else if (allocator.isBlockBegin(spillPosEven)) { 771 /* Block begin. Use instruction before previous block end. */ 772 maxSpillPos = spillPosEven - 4; 773 } else { 774 return spillPos; 775 } 776 assert !allocator.isBlockEnd(maxSpillPos) : "Can no longer be a block end! " + maxSpillPos; 777 778 /* Skip block begins. */ 779 while (allocator.isBlockBegin(maxSpillPos) && maxSpillPos > minSpillPos) { 780 // -2 is block end, -4 is the instruction before 781 maxSpillPos -= 4; 782 } 783 assert minSpillPos <= maxSpillPos; 784 return maxSpillPos; 785 } 786 787 private boolean isNotBlockBeginOrMerge(int spillPos) { 788 int spillPosEven = spillPos & ~1; 789 return spillPosEven == 0 || !allocator.isBlockBegin(spillPosEven) || SSAUtil.isMerge(allocator.blockForId(spillPosEven)); 790 } 791 792 /** 793 * @param minSpillPos minimal spill position 794 * @param maxSpillPos maximal spill position 795 */ 796 private int findOptimalSpillPos(int minSpillPos, int maxSpillPos) { 797 int optimalSpillPos = findOptimalSpillPos0(minSpillPos, maxSpillPos) & (~1); 798 if (Debug.isLogEnabled()) { 799 Debug.log("optimal spill position: %d", optimalSpillPos); 800 } 801 return optimalSpillPos; 802 } 803 804 private int findOptimalSpillPos0(int minSpillPos, int maxSpillPos) { 805 if (minSpillPos == maxSpillPos) { 806 // trivial case, no optimization of split position possible 807 if (Debug.isLogEnabled()) { 808 Debug.log("min-pos and max-pos are equal, no optimization possible"); 809 } 810 return minSpillPos; 811 812 } 813 assert minSpillPos < maxSpillPos : "must be true then"; 814 assert minSpillPos >= 0 : "cannot access minSplitPos - 1 otherwise"; 815 816 AbstractBlockBase<?> minBlock = allocator.blockForId(minSpillPos); 817 AbstractBlockBase<?> maxBlock = allocator.blockForId(maxSpillPos); 818 819 assert minBlock.getLinearScanNumber() <= maxBlock.getLinearScanNumber() : "invalid order"; 820 if (minBlock == maxBlock) { 821 // split position cannot be moved to block boundary : so split as late as possible 822 if (Debug.isLogEnabled()) { 823 Debug.log("cannot move split pos to block boundary because minPos and maxPos are in same block"); 824 } 825 return maxSpillPos; 826 827 } 828 // search optimal block boundary between minSplitPos and maxSplitPos 829 if (Debug.isLogEnabled()) { 830 Debug.log("moving split pos to optimal block boundary between block B%d and B%d", minBlock.getId(), maxBlock.getId()); 831 } 832 833 // currently using the same heuristic as for splitting 834 return findOptimalSpillPos(minBlock, maxBlock, maxSpillPos); 835 } 836 837 private int findOptimalSpillPos(AbstractBlockBase<?> minBlock, AbstractBlockBase<?> maxBlock, int maxSplitPos) { 838 int fromBlockNr = minBlock.getLinearScanNumber(); 839 int toBlockNr = maxBlock.getLinearScanNumber(); 840 841 assert 0 <= fromBlockNr && fromBlockNr < blockCount() : "out of range"; 842 assert 0 <= toBlockNr && toBlockNr < blockCount() : "out of range"; 843 assert fromBlockNr < toBlockNr : "must cross block boundary"; 844 845 /* 846 * Try to split at end of maxBlock. We use last instruction -2 because we want to insert the 847 * move before the block end op. If this would be after maxSplitPos, then use the 848 * maxSplitPos. 849 */ 850 int optimalSplitPos = allocator.getLastLirInstructionId(maxBlock) - 2; 851 if (optimalSplitPos > maxSplitPos) { 852 optimalSplitPos = maxSplitPos; 853 } 854 855 // minimal block probability 856 double minProbability = maxBlock.probability(); 857 for (int i = toBlockNr - 1; i >= fromBlockNr; i--) { 858 AbstractBlockBase<?> cur = blockAt(i); 859 860 if (cur.probability() < minProbability) { 861 // Block with lower probability found. Split at the end of this block. 862 int opIdBeforeBlockEnd = allocator.getLastLirInstructionId(cur) - 2; 863 if (allocator.getLIR().getLIRforBlock(cur).size() > 2) { 864 minProbability = cur.probability(); 865 optimalSplitPos = opIdBeforeBlockEnd; 866 } else { 867 /* 868 * Skip blocks with only LabelOp and BlockEndOp since they cause move ordering 869 * problems. 870 */ 871 assert allocator.isBlockBegin(opIdBeforeBlockEnd); 872 } 873 } 874 } 875 assert optimalSplitPos > allocator.maxOpId() || optimalSplitPos == maxSplitPos || allocator.isBlockEnd(optimalSplitPos + 2) : "algorithm must move split pos to block boundary"; 876 assert !allocator.isBlockBegin(optimalSplitPos); 877 return optimalSplitPos; 878 } 879 880 /** 881 * This is called for every interval that is assigned to a stack slot. 882 */ 883 private static void handleSpillSlot(TraceInterval interval) { 884 assert interval.location() != null && (interval.canMaterialize() || isStackSlotValue(interval.location())) : "interval not assigned to a stack slot " + interval; 885 // Do nothing. Stack slots are not processed in this implementation. 886 } 887 888 private void splitStackInterval(TraceInterval interval) { 889 int minSplitPos = currentPosition + 1; 890 int maxSplitPos = Math.min(interval.firstUsage(RegisterPriority.ShouldHaveRegister), interval.to()); 891 892 splitBeforeUsage(interval, minSplitPos, maxSplitPos); 893 } 894 895 private void splitWhenPartialRegisterAvailable(TraceInterval interval, int registerAvailableUntil) { 896 int minSplitPos = Math.max(interval.previousUsage(RegisterPriority.ShouldHaveRegister, registerAvailableUntil), interval.from() + 1); 897 splitBeforeUsage(interval, minSplitPos, registerAvailableUntil); 898 } 899 900 private void splitAndSpillInterval(TraceInterval interval) { 901 int currentPos = currentPosition; 902 /* 903 * Search the position where the interval must have a register and split at the optimal 904 * position before. The new created part is added to the unhandled list and will get a 905 * register when it is activated. 906 */ 907 int minSplitPos = currentPos + 1; 908 int maxSplitPos = interval.nextUsage(RegisterPriority.MustHaveRegister, minSplitPos); 909 910 if (maxSplitPos <= interval.to()) { 911 splitBeforeUsage(interval, minSplitPos, maxSplitPos); 912 } else { 913 Debug.log("No more usage, no need to split: %s", interval); 914 } 915 916 assert interval.nextUsage(RegisterPriority.MustHaveRegister, currentPos) == Integer.MAX_VALUE : "the remaining part is spilled to stack and therefore has no register"; 917 splitForSpilling(interval); 918 } 919 920 @SuppressWarnings("try") 921 private boolean allocFreeRegister(TraceInterval interval) { 922 try (Indent indent = Debug.logAndIndent("trying to find free register for %s", interval)) { 923 924 initUseLists(true); 925 freeExcludeActiveFixed(); 926 freeCollectInactiveFixed(interval); 927 freeExcludeActiveAny(); 928 // freeCollectUnhandled(fixedKind, cur); 929 930 // usePos contains the start of the next interval that has this register assigned 931 // (either as a fixed register or a normal allocated register in the past) 932 // only intervals overlapping with cur are processed, non-overlapping invervals can be 933 // ignored safely 934 if (Debug.isLogEnabled()) { 935 // Enable this logging to see all register states 936 try (Indent indent2 = Debug.logAndIndent("state of registers:")) { 937 for (Register register : availableRegs) { 938 int i = register.number; 939 Debug.log("reg %d (%s): usePos: %d", register.number, register, usePos[i]); 940 } 941 } 942 } 943 944 Register hint = null; 945 IntervalHint locationHint = interval.locationHint(true); 946 if (locationHint != null && locationHint.location() != null && isRegister(locationHint.location())) { 947 hint = asRegister(locationHint.location()); 948 if (Debug.isLogEnabled()) { 949 Debug.log("hint register %3d (%4s) from interval %s", hint.number, hint, locationHint); 950 } 951 } 952 assert interval.location() == null : "register already assigned to interval"; 953 954 // the register must be free at least until this position 955 int regNeededUntil = interval.from() + 1; 956 int intervalTo = interval.to(); 957 958 boolean needSplit = false; 959 int splitPos = -1; 960 961 Register reg = null; 962 Register minFullReg = null; 963 Register maxPartialReg = null; 964 965 for (Register availableReg : availableRegs) { 966 int number = availableReg.number; 967 if (usePos[number] >= intervalTo) { 968 // this register is free for the full interval 969 if (minFullReg == null || availableReg.equals(hint) || (usePos[number] < usePos[minFullReg.number] && !minFullReg.equals(hint))) { 970 minFullReg = availableReg; 971 } 972 } else if (usePos[number] > regNeededUntil) { 973 // this register is at least free until regNeededUntil 974 if (maxPartialReg == null || availableReg.equals(hint) || (usePos[number] > usePos[maxPartialReg.number] && !maxPartialReg.equals(hint))) { 975 maxPartialReg = availableReg; 976 } 977 } 978 } 979 980 if (minFullReg != null) { 981 reg = minFullReg; 982 } else if (maxPartialReg != null) { 983 needSplit = true; 984 reg = maxPartialReg; 985 } else { 986 return false; 987 } 988 989 splitPos = usePos[reg.number]; 990 interval.assignLocation(reg.asValue(allocator.getKind(interval))); 991 if (Debug.isLogEnabled()) { 992 Debug.log("selected register %d (%s)", reg.number, reg); 993 } 994 995 assert splitPos > 0 : "invalid splitPos"; 996 if (needSplit) { 997 // register not available for full interval, so split it 998 splitWhenPartialRegisterAvailable(interval, splitPos); 999 } 1000 // only return true if interval is completely assigned 1001 return true; 1002 } 1003 } 1004 1005 private void splitAndSpillIntersectingIntervals(Register reg) { 1006 assert reg != null : "no register assigned"; 1007 1008 for (int i = 0; i < spillIntervals[reg.number].size(); i++) { 1009 TraceInterval interval = spillIntervals[reg.number].get(i); 1010 removeFromList(interval); 1011 splitAndSpillInterval(interval); 1012 } 1013 } 1014 1015 // Split an Interval and spill it to memory so that cur can be placed in a register 1016 @SuppressWarnings("try") 1017 private void allocLockedRegister(TraceInterval interval) { 1018 try (Indent indent = Debug.logAndIndent("alloc locked register: need to split and spill to get register for %s", interval)) { 1019 1020 // the register must be free at least until this position 1021 int firstUsage = interval.firstUsage(RegisterPriority.MustHaveRegister); 1022 int firstShouldHaveUsage = interval.firstUsage(RegisterPriority.ShouldHaveRegister); 1023 int regNeededUntil = Math.min(firstUsage, interval.from() + 1); 1024 int intervalTo = interval.to(); 1025 assert regNeededUntil >= 0 && regNeededUntil < Integer.MAX_VALUE : "interval has no use"; 1026 1027 Register reg; 1028 Register ignore; 1029 /* 1030 * In the common case we don't spill registers that have _any_ use position that is 1031 * closer than the next use of the current interval, but if we can't spill the current 1032 * interval we weaken this strategy and also allow spilling of intervals that have a 1033 * non-mandatory requirements (no MustHaveRegister use position). 1034 */ 1035 for (RegisterPriority registerPriority = RegisterPriority.LiveAtLoopEnd; true; registerPriority = RegisterPriority.MustHaveRegister) { 1036 // collect current usage of registers 1037 initUseLists(false); 1038 spillExcludeActiveFixed(); 1039 // spillBlockUnhandledFixed(cur); 1040 spillBlockInactiveFixed(interval); 1041 spillCollectActiveAny(registerPriority); 1042 if (Debug.isLogEnabled()) { 1043 printRegisterState(); 1044 } 1045 1046 reg = null; 1047 ignore = interval.location() != null && isRegister(interval.location()) ? asRegister(interval.location()) : null; 1048 1049 for (Register availableReg : availableRegs) { 1050 int number = availableReg.number; 1051 if (availableReg.equals(ignore)) { 1052 // this register must be ignored 1053 } else if (usePos[number] > regNeededUntil) { 1054 /* 1055 * If the use position is the same, prefer registers (active intervals) 1056 * where the value is already on the stack. 1057 */ 1058 if (reg == null || (usePos[number] > usePos[reg.number]) || (usePos[number] == usePos[reg.number] && (!isInMemory.get(reg.number) && isInMemory.get(number)))) { 1059 reg = availableReg; 1060 } 1061 } 1062 } 1063 1064 if (Debug.isLogEnabled()) { 1065 Debug.log("Register Selected: %s", reg); 1066 } 1067 1068 int regUsePos = (reg == null ? 0 : usePos[reg.number]); 1069 if (regUsePos <= firstShouldHaveUsage) { 1070 /* Check if there is another interval that is already in memory. */ 1071 if (reg == null || interval.inMemoryAt(currentPosition) || !isInMemory.get(reg.number)) { 1072 if (Debug.isLogEnabled()) { 1073 Debug.log("able to spill current interval. firstUsage(register): %d, usePos: %d", firstUsage, regUsePos); 1074 } 1075 1076 if (firstUsage <= interval.from() + 1) { 1077 if (registerPriority.equals(RegisterPriority.LiveAtLoopEnd)) { 1078 /* 1079 * Tool of last resort: we can not spill the current interval so we 1080 * try to spill an active interval that has a usage but do not 1081 * require a register. 1082 */ 1083 Debug.log("retry with register priority must have register"); 1084 continue; 1085 } 1086 String description = "cannot spill interval (" + interval + ") that is used in first instruction (possible reason: no register found) firstUsage=" + firstUsage + 1087 ", interval.from()=" + interval.from() + "; already used candidates: " + Arrays.toString(availableRegs); 1088 /* 1089 * assign a reasonable register and do a bailout in product mode to 1090 * avoid errors 1091 */ 1092 allocator.assignSpillSlot(interval); 1093 if (Debug.isDumpEnabled(Debug.INFO_LEVEL)) { 1094 dumpLIRAndIntervals(description); 1095 } 1096 throw new OutOfRegistersException("LinearScan: no register found", description); 1097 } 1098 1099 splitAndSpillInterval(interval); 1100 return; 1101 } 1102 } 1103 // common case: break out of the loop 1104 break; 1105 } 1106 1107 boolean needSplit = blockPos[reg.number] <= intervalTo; 1108 1109 int splitPos = blockPos[reg.number]; 1110 1111 if (Debug.isLogEnabled()) { 1112 Debug.log("decided to use register %d", reg.number); 1113 } 1114 assert splitPos > 0 : "invalid splitPos"; 1115 assert needSplit || splitPos > interval.from() : "splitting interval at from"; 1116 1117 interval.assignLocation(reg.asValue(allocator.getKind(interval))); 1118 if (needSplit) { 1119 // register not available for full interval : so split it 1120 splitWhenPartialRegisterAvailable(interval, splitPos); 1121 } 1122 1123 // perform splitting and spilling for all affected intervals 1124 splitAndSpillIntersectingIntervals(reg); 1125 return; 1126 } 1127 } 1128 1129 private void dumpLIRAndIntervals(String description) { 1130 Debug.dump(Debug.INFO_LEVEL, allocator.getLIR(), description); 1131 allocator.printIntervals(description); 1132 } 1133 1134 @SuppressWarnings("try") 1135 private void printRegisterState() { 1136 try (Indent indent2 = Debug.logAndIndent("state of registers:")) { 1137 for (Register reg : availableRegs) { 1138 int i = reg.number; 1139 try (Indent indent3 = Debug.logAndIndent("reg %d: usePos: %d, blockPos: %d, inMemory: %b, intervals: ", i, usePos[i], blockPos[i], isInMemory.get(i))) { 1140 for (int j = 0; j < spillIntervals[i].size(); j++) { 1141 Debug.log("%s", spillIntervals[i].get(j)); 1142 } 1143 } 1144 } 1145 } 1146 } 1147 1148 private boolean noAllocationPossible(TraceInterval interval) { 1149 if (allocator.callKillsRegisters()) { 1150 // fast calculation of intervals that can never get a register because the 1151 // the next instruction is a call that blocks all registers 1152 // Note: this only works if a call kills all registers 1153 1154 // check if this interval is the result of a split operation 1155 // (an interval got a register until this position) 1156 int pos = interval.from(); 1157 if (isOdd(pos)) { 1158 // the current instruction is a call that blocks all registers 1159 if (pos < allocator.maxOpId() && allocator.hasCall(pos + 1) && interval.to() > pos + 1) { 1160 if (Debug.isLogEnabled()) { 1161 Debug.log("free register cannot be available because all registers blocked by following call"); 1162 } 1163 1164 // safety check that there is really no register available 1165 assert !allocFreeRegister(interval) : "found a register for this interval"; 1166 return true; 1167 } 1168 } 1169 } 1170 return false; 1171 } 1172 1173 private void initVarsForAlloc(TraceInterval interval) { 1174 AllocatableRegisters allocatableRegisters = allocator.getRegisterAllocationConfig().getAllocatableRegisters(allocator.getKind(interval).getPlatformKind()); 1175 availableRegs = allocatableRegisters.allocatableRegisters; 1176 minReg = allocatableRegisters.minRegisterNumber; 1177 maxReg = allocatableRegisters.maxRegisterNumber; 1178 } 1179 1180 private static boolean isMove(LIRInstruction op, TraceInterval from, TraceInterval to) { 1181 if (ValueMoveOp.isValueMoveOp(op)) { 1182 ValueMoveOp move = ValueMoveOp.asValueMoveOp(op); 1183 if (isVariable(move.getInput()) && isVariable(move.getResult())) { 1184 return move.getInput() != null && LIRValueUtil.asVariable(move.getInput()).index == from.operandNumber && move.getResult() != null && 1185 LIRValueUtil.asVariable(move.getResult()).index == to.operandNumber; 1186 } 1187 } 1188 return false; 1189 } 1190 1191 // optimization (especially for phi functions of nested loops): 1192 // assign same spill slot to non-intersecting intervals 1193 private void combineSpilledIntervals(TraceInterval interval) { 1194 if (interval.isSplitChild()) { 1195 // optimization is only suitable for split parents 1196 return; 1197 } 1198 1199 IntervalHint locationHint = interval.locationHint(false); 1200 if (locationHint == null || !(locationHint instanceof TraceInterval)) { 1201 return; 1202 } 1203 TraceInterval registerHint = (TraceInterval) locationHint; 1204 assert registerHint.isSplitParent() : "register hint must be split parent"; 1205 1206 if (interval.spillState() != SpillState.NoOptimization || registerHint.spillState() != SpillState.NoOptimization) { 1207 // combining the stack slots for intervals where spill move optimization is applied 1208 // is not benefitial and would cause problems 1209 return; 1210 } 1211 1212 int beginPos = interval.from(); 1213 int endPos = interval.to(); 1214 if (endPos > allocator.maxOpId() || isOdd(beginPos) || isOdd(endPos)) { 1215 // safety check that lirOpWithId is allowed 1216 return; 1217 } 1218 1219 if (!isMove(allocator.instructionForId(beginPos), registerHint, interval) || !isMove(allocator.instructionForId(endPos), interval, registerHint)) { 1220 // cur and registerHint are not connected with two moves 1221 return; 1222 } 1223 1224 TraceInterval beginHint = registerHint.getSplitChildAtOpId(beginPos, LIRInstruction.OperandMode.USE); 1225 TraceInterval endHint = registerHint.getSplitChildAtOpId(endPos, LIRInstruction.OperandMode.DEF); 1226 if (beginHint == endHint || beginHint.to() != beginPos || endHint.from() != endPos) { 1227 // registerHint must be split : otherwise the re-writing of use positions does not work 1228 return; 1229 } 1230 1231 assert beginHint.location() != null : "must have register assigned"; 1232 assert endHint.location() == null : "must not have register assigned"; 1233 assert interval.firstUsage(RegisterPriority.MustHaveRegister) == beginPos : "must have use position at begin of interval because of move"; 1234 assert endHint.firstUsage(RegisterPriority.MustHaveRegister) == endPos : "must have use position at begin of interval because of move"; 1235 1236 if (isRegister(beginHint.location())) { 1237 // registerHint is not spilled at beginPos : so it would not be benefitial to 1238 // immediately spill cur 1239 return; 1240 } 1241 assert registerHint.spillSlot() != null : "must be set when part of interval was spilled"; 1242 1243 // modify intervals such that cur gets the same stack slot as registerHint 1244 // delete use positions to prevent the intervals to get a register at beginning 1245 interval.setSpillSlot(registerHint.spillSlot()); 1246 interval.removeFirstUsePos(); 1247 endHint.removeFirstUsePos(); 1248 } 1249 1250 // allocate a physical register or memory location to an interval 1251 @SuppressWarnings("try") 1252 private boolean activateCurrent(TraceInterval interval) { 1253 if (Debug.isLogEnabled()) { 1254 logCurrentStatus(); 1255 } 1256 boolean result = true; 1257 1258 try (Indent indent = Debug.logAndIndent("activating interval %s, splitParent: %d", interval, interval.splitParent().operandNumber)) { 1259 1260 if (interval.location() != null && isStackSlotValue(interval.location())) { 1261 // activating an interval that has a stack slot assigned . split it at first use 1262 // position 1263 // used for method parameters 1264 if (Debug.isLogEnabled()) { 1265 Debug.log("interval has spill slot assigned (method parameter) . split it before first use"); 1266 } 1267 splitStackInterval(interval); 1268 result = false; 1269 1270 } else { 1271 if (interval.location() == null) { 1272 // interval has not assigned register . normal allocation 1273 // (this is the normal case for most intervals) 1274 if (Debug.isLogEnabled()) { 1275 Debug.log("normal allocation of register"); 1276 } 1277 1278 // assign same spill slot to non-intersecting intervals 1279 combineSpilledIntervals(interval); 1280 1281 initVarsForAlloc(interval); 1282 if (noAllocationPossible(interval) || !allocFreeRegister(interval)) { 1283 // no empty register available. 1284 // split and spill another interval so that this interval gets a register 1285 allocLockedRegister(interval); 1286 } 1287 1288 // spilled intervals need not be move to active-list 1289 if (!isRegister(interval.location())) { 1290 result = false; 1291 } 1292 } 1293 } 1294 1295 // load spilled values that become active from stack slot to register 1296 if (interval.insertMoveWhenActivated()) { 1297 assert interval.isSplitChild(); 1298 assert interval.currentSplitChild() != null; 1299 assert interval.currentSplitChild().operandNumber != interval.operandNumber : "cannot insert move between same interval"; 1300 if (Debug.isLogEnabled()) { 1301 Debug.log("Inserting move from interval %d to %d because insertMoveWhenActivated is set", interval.currentSplitChild().operandNumber, interval.operandNumber); 1302 } 1303 1304 insertMove(interval.from(), interval.currentSplitChild(), interval); 1305 } 1306 interval.makeCurrentSplitChild(); 1307 1308 } 1309 1310 return result; // true = interval is moved to active list 1311 } 1312 1313 void finishAllocation() { 1314 // must be called when all intervals are allocated 1315 moveResolver.resolveAndAppendMoves(); 1316 } 1317 1318 @SuppressWarnings("try") 1319 private void logCurrentStatus() { 1320 try (Indent i = Debug.logAndIndent("active:")) { 1321 logList(activeFixedList); 1322 logList(activeAnyList); 1323 } 1324 try (Indent i = Debug.logAndIndent("inactive(fixed):")) { 1325 logList(inactiveFixedList); 1326 } 1327 } 1328 1329 void walk() { 1330 walkTo(Integer.MAX_VALUE); 1331 } 1332 1333 private void removeFromList(TraceInterval interval) { 1334 activeAnyList = TraceLinearScanWalker.removeAny(activeAnyList, interval); 1335 } 1336 1337 /** 1338 * Walks up to {@code from} and updates the state of {@link FixedInterval fixed intervals}. 1339 * 1340 * Fixed intervals can switch back and forth between the states {@link State#Active} and 1341 * {@link State#Inactive} (and eventually to {@link State#Handled} but handled intervals are not 1342 * managed). 1343 */ 1344 @SuppressWarnings("try") 1345 private void walkToFixed(State state, int from) { 1346 assert state == State.Active || state == State.Inactive : "wrong state"; 1347 FixedInterval prevprev = null; 1348 FixedInterval prev = (state == State.Active) ? activeFixedList : inactiveFixedList; 1349 FixedInterval next = prev; 1350 if (Debug.isLogEnabled()) { 1351 try (Indent i = Debug.logAndIndent("walkToFixed(%s, %d):", state, from)) { 1352 logList(next); 1353 } 1354 } 1355 while (next.currentFrom() <= from) { 1356 FixedInterval cur = next; 1357 next = cur.next; 1358 1359 boolean rangeHasChanged = false; 1360 while (cur.currentTo() <= from) { 1361 cur.nextRange(); 1362 rangeHasChanged = true; 1363 } 1364 1365 // also handle move from inactive list to active list 1366 rangeHasChanged = rangeHasChanged || (state == State.Inactive && cur.currentFrom() <= from); 1367 1368 if (rangeHasChanged) { 1369 // remove cur from list 1370 if (prevprev == null) { 1371 if (state == State.Active) { 1372 activeFixedList = next; 1373 } else { 1374 inactiveFixedList = next; 1375 } 1376 } else { 1377 prevprev.next = next; 1378 } 1379 prev = next; 1380 TraceInterval.State newState; 1381 if (cur.currentAtEnd()) { 1382 // move to handled state (not maintained as a list) 1383 newState = State.Handled; 1384 } else { 1385 if (cur.currentFrom() <= from) { 1386 // sort into active list 1387 activeFixedList = TraceLinearScanWalker.addToListSortedByCurrentFromPositions(activeFixedList, cur); 1388 newState = State.Active; 1389 } else { 1390 // sort into inactive list 1391 inactiveFixedList = TraceLinearScanWalker.addToListSortedByCurrentFromPositions(inactiveFixedList, cur); 1392 newState = State.Inactive; 1393 } 1394 if (prev == cur) { 1395 assert state == newState; 1396 prevprev = prev; 1397 prev = cur.next; 1398 } 1399 } 1400 intervalMoved(cur, state, newState); 1401 } else { 1402 prevprev = prev; 1403 prev = cur.next; 1404 } 1405 } 1406 } 1407 1408 /** 1409 * Walks up to {@code from} and updates the state of {@link TraceInterval intervals}. 1410 * 1411 * Trace intervals can switch once from {@link State#Unhandled} to {@link State#Active} and then 1412 * to {@link State#Handled} but handled intervals are not managed. 1413 */ 1414 @SuppressWarnings("try") 1415 private void walkToAny(int from) { 1416 TraceInterval prevprev = null; 1417 TraceInterval prev = activeAnyList; 1418 TraceInterval next = prev; 1419 if (Debug.isLogEnabled()) { 1420 try (Indent i = Debug.logAndIndent("walkToAny(%d):", from)) { 1421 logList(next); 1422 } 1423 } 1424 while (next.from() <= from) { 1425 TraceInterval cur = next; 1426 next = cur.next; 1427 1428 if (cur.to() <= from) { 1429 // remove cur from list 1430 if (prevprev == null) { 1431 activeAnyList = next; 1432 } else { 1433 prevprev.next = next; 1434 } 1435 intervalMoved(cur, State.Active, State.Handled); 1436 } else { 1437 prevprev = prev; 1438 } 1439 prev = next; 1440 } 1441 } 1442 1443 /** 1444 * Get the next interval from {@linkplain #unhandledAnyList} which starts before or at 1445 * {@code toOpId}. The returned interval is removed. 1446 * 1447 * @postcondition all intervals in {@linkplain #unhandledAnyList} start after {@code toOpId}. 1448 * 1449 * @return The next interval or null if there is no {@linkplain #unhandledAnyList unhandled} 1450 * interval at position {@code toOpId}. 1451 */ 1452 private TraceInterval nextInterval(int toOpId) { 1453 TraceInterval any = unhandledAnyList; 1454 1455 if (any != TraceInterval.EndMarker) { 1456 TraceInterval currentInterval = unhandledAnyList; 1457 if (toOpId < currentInterval.from()) { 1458 return null; 1459 } 1460 1461 unhandledAnyList = currentInterval.next; 1462 currentInterval.next = TraceInterval.EndMarker; 1463 return currentInterval; 1464 } 1465 return null; 1466 1467 } 1468 1469 /** 1470 * Walk up to {@code toOpId}. 1471 * 1472 * @postcondition {@link #currentPosition} is set to {@code toOpId}, {@link #activeFixedList} 1473 * and {@link #inactiveFixedList} are populated. 1474 */ 1475 @SuppressWarnings("try") 1476 private void walkTo(int toOpId) { 1477 assert currentPosition <= toOpId : "can not walk backwards"; 1478 for (TraceInterval currentInterval = nextInterval(toOpId); currentInterval != null; currentInterval = nextInterval(toOpId)) { 1479 int opId = currentInterval.from(); 1480 1481 // set currentPosition prior to call of walkTo 1482 currentPosition = opId; 1483 1484 // update unhandled stack intervals 1485 // updateUnhandledStackIntervals(opId); 1486 1487 // call walkTo even if currentPosition == id 1488 walkToFixed(State.Active, opId); 1489 walkToFixed(State.Inactive, opId); 1490 walkToAny(opId); 1491 1492 try (Indent indent = Debug.logAndIndent("walk to op %d", opId)) { 1493 if (activateCurrent(currentInterval)) { 1494 activeAnyList = TraceLinearScanWalker.addToListSortedByFromPositions(activeAnyList, currentInterval); 1495 intervalMoved(currentInterval, State.Unhandled, State.Active); 1496 } 1497 } 1498 } 1499 // set currentPosition prior to call of walkTo 1500 currentPosition = toOpId; 1501 1502 if (currentPosition <= allocator.maxOpId()) { 1503 // update unhandled stack intervals 1504 // updateUnhandledStackIntervals(toOpId); 1505 1506 // call walkTo if still in range 1507 walkToFixed(State.Active, toOpId); 1508 walkToFixed(State.Inactive, toOpId); 1509 walkToAny(toOpId); 1510 } 1511 } 1512 1513 private static void logList(FixedInterval i) { 1514 for (FixedInterval interval = i; interval != FixedInterval.EndMarker; interval = interval.next) { 1515 Debug.log("%s", interval.logString()); 1516 } 1517 } 1518 1519 private static void logList(TraceInterval i) { 1520 for (TraceInterval interval = i; interval != TraceInterval.EndMarker; interval = interval.next) { 1521 Debug.log("%s", interval.logString()); 1522 } 1523 } 1524 1525 private static void intervalMoved(IntervalHint interval, State from, State to) { 1526 // intervalMoved() is called whenever an interval moves from one interval list to another. 1527 // In the implementation of this method it is prohibited to move the interval to any list. 1528 if (Debug.isLogEnabled()) { 1529 Debug.log("interval moved from %s to %s: %s", from, to, interval.logString()); 1530 } 1531 } 1532 }