1 /* 2 * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 */ 23 package org.graalvm.compiler.lir.alloc.lsra; 24 25 import static jdk.vm.ci.code.ValueUtil.asRegister; 26 import static jdk.vm.ci.code.ValueUtil.asStackSlot; 27 import static jdk.vm.ci.code.ValueUtil.isRegister; 28 import static jdk.vm.ci.code.ValueUtil.isStackSlot; 29 import static org.graalvm.compiler.lir.LIRValueUtil.asVariable; 30 import static org.graalvm.compiler.lir.LIRValueUtil.isVariable; 31 import static org.graalvm.compiler.lir.debug.LIRGenerationDebugContext.getSourceForOperandFromDebugContext; 32 33 import java.util.ArrayDeque; 34 import java.util.ArrayList; 35 import java.util.BitSet; 36 import java.util.EnumSet; 37 38 import org.graalvm.compiler.core.common.LIRKind; 39 import org.graalvm.compiler.core.common.PermanentBailoutException; 40 import org.graalvm.compiler.core.common.alloc.ComputeBlockOrder; 41 import org.graalvm.compiler.core.common.cfg.AbstractBlockBase; 42 import org.graalvm.compiler.core.common.util.BitMap2D; 43 import org.graalvm.compiler.debug.Assertions; 44 import org.graalvm.compiler.debug.DebugContext; 45 import org.graalvm.compiler.debug.GraalError; 46 import org.graalvm.compiler.debug.Indent; 47 import org.graalvm.compiler.lir.InstructionValueConsumer; 48 import org.graalvm.compiler.lir.LIRInstruction; 49 import org.graalvm.compiler.lir.LIRInstruction.OperandFlag; 50 import org.graalvm.compiler.lir.LIRInstruction.OperandMode; 51 import org.graalvm.compiler.lir.StandardOp.LoadConstantOp; 52 import org.graalvm.compiler.lir.StandardOp.ValueMoveOp; 53 import org.graalvm.compiler.lir.ValueConsumer; 54 import org.graalvm.compiler.lir.alloc.lsra.Interval.RegisterPriority; 55 import org.graalvm.compiler.lir.alloc.lsra.Interval.SpillState; 56 import org.graalvm.compiler.lir.alloc.lsra.LinearScan.BlockData; 57 import org.graalvm.compiler.lir.gen.LIRGenerationResult; 58 import org.graalvm.compiler.lir.phases.AllocationPhase.AllocationContext; 59 import org.graalvm.util.EconomicSet; 60 import org.graalvm.util.Equivalence; 61 62 import jdk.vm.ci.code.Register; 63 import jdk.vm.ci.code.RegisterArray; 64 import jdk.vm.ci.code.StackSlot; 65 import jdk.vm.ci.code.TargetDescription; 66 import jdk.vm.ci.meta.AllocatableValue; 67 import jdk.vm.ci.meta.Constant; 68 import jdk.vm.ci.meta.JavaConstant; 69 import jdk.vm.ci.meta.Value; 70 import jdk.vm.ci.meta.ValueKind; 71 72 public class LinearScanLifetimeAnalysisPhase extends LinearScanAllocationPhase { 73 74 protected final LinearScan allocator; 75 protected final DebugContext debug; 76 77 /** 78 * @param linearScan 79 */ 80 protected LinearScanLifetimeAnalysisPhase(LinearScan linearScan) { 81 allocator = linearScan; 82 debug = allocator.getDebug(); 83 } 84 85 @Override 86 protected void run(TargetDescription target, LIRGenerationResult lirGenRes, AllocationContext context) { 87 numberInstructions(); 88 debug.dump(DebugContext.VERBOSE_LEVEL, lirGenRes.getLIR(), "Before register allocation"); 89 computeLocalLiveSets(); 90 computeGlobalLiveSets(); 91 buildIntervals(Assertions.detailedAssertionsEnabled(allocator.getOptions())); 92 } 93 94 /** 95 * Bit set for each variable that is contained in each loop. 96 */ 97 private BitMap2D intervalInLoop; 98 99 boolean isIntervalInLoop(int interval, int loop) { 100 return intervalInLoop.at(interval, loop); 101 } 102 103 /** 104 * Numbers all instructions in all blocks. The numbering follows the 105 * {@linkplain ComputeBlockOrder linear scan order}. 106 */ 107 protected void numberInstructions() { 108 109 allocator.initIntervals(); 110 111 ValueConsumer setVariableConsumer = (value, mode, flags) -> { 112 if (isVariable(value)) { 113 allocator.getOrCreateInterval(asVariable(value)); 114 } 115 }; 116 117 // Assign IDs to LIR nodes and build a mapping, lirOps, from ID to LIRInstruction node. 118 int numInstructions = 0; 119 for (AbstractBlockBase<?> block : allocator.sortedBlocks()) { 120 numInstructions += allocator.getLIR().getLIRforBlock(block).size(); 121 } 122 123 // initialize with correct length 124 allocator.initOpIdMaps(numInstructions); 125 126 int opId = 0; 127 int index = 0; 128 for (AbstractBlockBase<?> block : allocator.sortedBlocks()) { 129 allocator.initBlockData(block); 130 131 ArrayList<LIRInstruction> instructions = allocator.getLIR().getLIRforBlock(block); 132 133 int numInst = instructions.size(); 134 for (int j = 0; j < numInst; j++) { 135 LIRInstruction op = instructions.get(j); 136 op.setId(opId); 137 138 allocator.putOpIdMaps(index, op, block); 139 assert allocator.instructionForId(opId) == op : "must match"; 140 141 op.visitEachTemp(setVariableConsumer); 142 op.visitEachOutput(setVariableConsumer); 143 144 index++; 145 opId += 2; // numbering of lirOps by two 146 } 147 } 148 assert index == numInstructions : "must match"; 149 assert (index << 1) == opId : "must match: " + (index << 1); 150 } 151 152 /** 153 * Computes local live sets (i.e. {@link BlockData#liveGen} and {@link BlockData#liveKill}) 154 * separately for each block. 155 */ 156 @SuppressWarnings("try") 157 void computeLocalLiveSets() { 158 int liveSize = allocator.liveSetSize(); 159 160 intervalInLoop = new BitMap2D(allocator.operandSize(), allocator.numLoops()); 161 162 // iterate all blocks 163 for (final AbstractBlockBase<?> block : allocator.sortedBlocks()) { 164 try (Indent indent = debug.logAndIndent("compute local live sets for block %s", block)) { 165 166 final BitSet liveGen = new BitSet(liveSize); 167 final BitSet liveKill = new BitSet(liveSize); 168 169 ArrayList<LIRInstruction> instructions = allocator.getLIR().getLIRforBlock(block); 170 int numInst = instructions.size(); 171 172 ValueConsumer useConsumer = (operand, mode, flags) -> { 173 if (isVariable(operand)) { 174 int operandNum = allocator.operandNumber(operand); 175 if (!liveKill.get(operandNum)) { 176 liveGen.set(operandNum); 177 if (debug.isLogEnabled()) { 178 debug.log("liveGen for operand %d(%s)", operandNum, operand); 179 } 180 } 181 if (block.getLoop() != null) { 182 intervalInLoop.setBit(operandNum, block.getLoop().getIndex()); 183 } 184 } 185 186 if (allocator.detailedAsserts) { 187 verifyInput(block, liveKill, operand); 188 } 189 }; 190 ValueConsumer stateConsumer = (operand, mode, flags) -> { 191 if (LinearScan.isVariableOrRegister(operand)) { 192 int operandNum = allocator.operandNumber(operand); 193 if (!liveKill.get(operandNum)) { 194 liveGen.set(operandNum); 195 if (debug.isLogEnabled()) { 196 debug.log("liveGen in state for operand %d(%s)", operandNum, operand); 197 } 198 } 199 } 200 }; 201 ValueConsumer defConsumer = (operand, mode, flags) -> { 202 if (isVariable(operand)) { 203 int varNum = allocator.operandNumber(operand); 204 liveKill.set(varNum); 205 if (debug.isLogEnabled()) { 206 debug.log("liveKill for operand %d(%s)", varNum, operand); 207 } 208 if (block.getLoop() != null) { 209 intervalInLoop.setBit(varNum, block.getLoop().getIndex()); 210 } 211 } 212 213 if (allocator.detailedAsserts) { 214 /* 215 * Fixed intervals are never live at block boundaries, so they need not be 216 * processed in live sets. Process them only in debug mode so that this can 217 * be checked 218 */ 219 verifyTemp(liveKill, operand); 220 } 221 }; 222 223 // iterate all instructions of the block 224 for (int j = 0; j < numInst; j++) { 225 final LIRInstruction op = instructions.get(j); 226 227 try (Indent indent2 = debug.logAndIndent("handle op %d: %s", op.id(), op)) { 228 op.visitEachInput(useConsumer); 229 op.visitEachAlive(useConsumer); 230 /* 231 * Add uses of live locals from interpreter's point of view for proper debug 232 * information generation. 233 */ 234 op.visitEachState(stateConsumer); 235 op.visitEachTemp(defConsumer); 236 op.visitEachOutput(defConsumer); 237 } 238 } // end of instruction iteration 239 240 BlockData blockSets = allocator.getBlockData(block); 241 blockSets.liveGen = liveGen; 242 blockSets.liveKill = liveKill; 243 blockSets.liveIn = new BitSet(liveSize); 244 blockSets.liveOut = new BitSet(liveSize); 245 246 if (debug.isLogEnabled()) { 247 debug.log("liveGen B%d %s", block.getId(), blockSets.liveGen); 248 debug.log("liveKill B%d %s", block.getId(), blockSets.liveKill); 249 } 250 251 } 252 } // end of block iteration 253 } 254 255 private void verifyTemp(BitSet liveKill, Value operand) { 256 /* 257 * Fixed intervals are never live at block boundaries, so they need not be processed in live 258 * sets. Process them only in debug mode so that this can be checked 259 */ 260 if (isRegister(operand)) { 261 if (allocator.isProcessed(operand)) { 262 liveKill.set(allocator.operandNumber(operand)); 263 } 264 } 265 } 266 267 private void verifyInput(AbstractBlockBase<?> block, BitSet liveKill, Value operand) { 268 /* 269 * Fixed intervals are never live at block boundaries, so they need not be processed in live 270 * sets. This is checked by these assertions to be sure about it. The entry block may have 271 * incoming values in registers, which is ok. 272 */ 273 if (isRegister(operand) && block != allocator.getLIR().getControlFlowGraph().getStartBlock()) { 274 if (allocator.isProcessed(operand)) { 275 assert liveKill.get(allocator.operandNumber(operand)) : "using fixed register " + asRegister(operand) + " that is not defined in this block " + block; 276 } 277 } 278 } 279 280 /** 281 * Performs a backward dataflow analysis to compute global live sets (i.e. 282 * {@link BlockData#liveIn} and {@link BlockData#liveOut}) for each block. 283 */ 284 @SuppressWarnings("try") 285 protected void computeGlobalLiveSets() { 286 try (Indent indent = debug.logAndIndent("compute global live sets")) { 287 int numBlocks = allocator.blockCount(); 288 boolean changeOccurred; 289 boolean changeOccurredInBlock; 290 int iterationCount = 0; 291 BitSet liveOut = new BitSet(allocator.liveSetSize()); // scratch set for calculations 292 293 /* 294 * Perform a backward dataflow analysis to compute liveOut and liveIn for each block. 295 * The loop is executed until a fixpoint is reached (no changes in an iteration). 296 */ 297 do { 298 changeOccurred = false; 299 300 try (Indent indent2 = debug.logAndIndent("new iteration %d", iterationCount)) { 301 302 // iterate all blocks in reverse order 303 for (int i = numBlocks - 1; i >= 0; i--) { 304 AbstractBlockBase<?> block = allocator.blockAt(i); 305 BlockData blockSets = allocator.getBlockData(block); 306 307 changeOccurredInBlock = false; 308 309 /* 310 * liveOut(block) is the union of liveIn(sux), for successors sux of block. 311 */ 312 int n = block.getSuccessorCount(); 313 if (n > 0) { 314 liveOut.clear(); 315 // block has successors 316 if (n > 0) { 317 for (AbstractBlockBase<?> successor : block.getSuccessors()) { 318 liveOut.or(allocator.getBlockData(successor).liveIn); 319 } 320 } 321 322 if (!blockSets.liveOut.equals(liveOut)) { 323 /* 324 * A change occurred. Swap the old and new live out sets to avoid 325 * copying. 326 */ 327 BitSet temp = blockSets.liveOut; 328 blockSets.liveOut = liveOut; 329 liveOut = temp; 330 331 changeOccurred = true; 332 changeOccurredInBlock = true; 333 } 334 } 335 336 if (iterationCount == 0 || changeOccurredInBlock) { 337 /* 338 * liveIn(block) is the union of liveGen(block) with (liveOut(block) & 339 * !liveKill(block)). 340 * 341 * Note: liveIn has to be computed only in first iteration or if liveOut 342 * has changed! 343 */ 344 BitSet liveIn = blockSets.liveIn; 345 liveIn.clear(); 346 liveIn.or(blockSets.liveOut); 347 liveIn.andNot(blockSets.liveKill); 348 liveIn.or(blockSets.liveGen); 349 350 if (debug.isLogEnabled()) { 351 debug.log("block %d: livein = %s, liveout = %s", block.getId(), liveIn, blockSets.liveOut); 352 } 353 } 354 } 355 iterationCount++; 356 357 if (changeOccurred && iterationCount > 50) { 358 /* 359 * Very unlikely, should never happen: If it happens we cannot guarantee it 360 * won't happen again. 361 */ 362 throw new PermanentBailoutException("too many iterations in computeGlobalLiveSets"); 363 } 364 } 365 } while (changeOccurred); 366 367 if (Assertions.detailedAssertionsEnabled(allocator.getOptions())) { 368 verifyLiveness(); 369 } 370 371 // check that the liveIn set of the first block is empty 372 AbstractBlockBase<?> startBlock = allocator.getLIR().getControlFlowGraph().getStartBlock(); 373 if (allocator.getBlockData(startBlock).liveIn.cardinality() != 0) { 374 if (Assertions.detailedAssertionsEnabled(allocator.getOptions())) { 375 reportFailure(numBlocks); 376 } 377 // bailout if this occurs in product mode. 378 throw new GraalError("liveIn set of first block must be empty: " + allocator.getBlockData(startBlock).liveIn); 379 } 380 } 381 } 382 383 @SuppressWarnings("try") 384 protected void reportFailure(int numBlocks) { 385 try (DebugContext.Scope s = debug.forceLog()) { 386 try (Indent indent = debug.logAndIndent("report failure")) { 387 388 BitSet startBlockLiveIn = allocator.getBlockData(allocator.getLIR().getControlFlowGraph().getStartBlock()).liveIn; 389 try (Indent indent2 = debug.logAndIndent("Error: liveIn set of first block must be empty (when this fails, variables are used before they are defined):")) { 390 for (int operandNum = startBlockLiveIn.nextSetBit(0); operandNum >= 0; operandNum = startBlockLiveIn.nextSetBit(operandNum + 1)) { 391 Interval interval = allocator.intervalFor(operandNum); 392 if (interval != null) { 393 Value operand = interval.operand; 394 debug.log("var %d; operand=%s; node=%s", operandNum, operand, getSourceForOperandFromDebugContext(debug, operand)); 395 } else { 396 debug.log("var %d; missing operand", operandNum); 397 } 398 } 399 } 400 401 // print some additional information to simplify debugging 402 for (int operandNum = startBlockLiveIn.nextSetBit(0); operandNum >= 0; operandNum = startBlockLiveIn.nextSetBit(operandNum + 1)) { 403 Interval interval = allocator.intervalFor(operandNum); 404 Value operand = null; 405 Object valueForOperandFromDebugContext = null; 406 if (interval != null) { 407 operand = interval.operand; 408 valueForOperandFromDebugContext = getSourceForOperandFromDebugContext(debug, operand); 409 } 410 try (Indent indent2 = debug.logAndIndent("---- Detailed information for var %d; operand=%s; node=%s ----", operandNum, operand, valueForOperandFromDebugContext)) { 411 412 ArrayDeque<AbstractBlockBase<?>> definedIn = new ArrayDeque<>(); 413 EconomicSet<AbstractBlockBase<?>> usedIn = EconomicSet.create(Equivalence.IDENTITY); 414 for (AbstractBlockBase<?> block : allocator.sortedBlocks()) { 415 if (allocator.getBlockData(block).liveGen.get(operandNum)) { 416 usedIn.add(block); 417 try (Indent indent3 = debug.logAndIndent("used in block B%d", block.getId())) { 418 for (LIRInstruction ins : allocator.getLIR().getLIRforBlock(block)) { 419 try (Indent indent4 = debug.logAndIndent("%d: %s", ins.id(), ins)) { 420 ins.forEachState((liveStateOperand, mode, flags) -> { 421 debug.log("operand=%s", liveStateOperand); 422 return liveStateOperand; 423 }); 424 } 425 } 426 } 427 } 428 if (allocator.getBlockData(block).liveKill.get(operandNum)) { 429 definedIn.add(block); 430 try (Indent indent3 = debug.logAndIndent("defined in block B%d", block.getId())) { 431 for (LIRInstruction ins : allocator.getLIR().getLIRforBlock(block)) { 432 debug.log("%d: %s", ins.id(), ins); 433 } 434 } 435 } 436 } 437 438 int[] hitCount = new int[numBlocks]; 439 440 while (!definedIn.isEmpty()) { 441 AbstractBlockBase<?> block = definedIn.removeFirst(); 442 usedIn.remove(block); 443 for (AbstractBlockBase<?> successor : block.getSuccessors()) { 444 if (successor.isLoopHeader()) { 445 if (!block.isLoopEnd()) { 446 definedIn.add(successor); 447 } 448 } else { 449 if (++hitCount[successor.getId()] == successor.getPredecessorCount()) { 450 definedIn.add(successor); 451 } 452 } 453 } 454 } 455 try (Indent indent3 = debug.logAndIndent("**** offending usages are in: ")) { 456 for (AbstractBlockBase<?> block : usedIn) { 457 debug.log("B%d", block.getId()); 458 } 459 } 460 } 461 } 462 } 463 } catch (Throwable e) { 464 throw debug.handle(e); 465 } 466 } 467 468 protected void verifyLiveness() { 469 /* 470 * Check that fixed intervals are not live at block boundaries (live set must be empty at 471 * fixed intervals). 472 */ 473 for (AbstractBlockBase<?> block : allocator.sortedBlocks()) { 474 for (int j = 0; j <= allocator.maxRegisterNumber(); j++) { 475 assert !allocator.getBlockData(block).liveIn.get(j) : "liveIn set of fixed register must be empty"; 476 assert !allocator.getBlockData(block).liveOut.get(j) : "liveOut set of fixed register must be empty"; 477 assert !allocator.getBlockData(block).liveGen.get(j) : "liveGen set of fixed register must be empty"; 478 } 479 } 480 } 481 482 protected void addUse(AllocatableValue operand, int from, int to, RegisterPriority registerPriority, ValueKind<?> kind, boolean detailedAsserts) { 483 if (!allocator.isProcessed(operand)) { 484 return; 485 } 486 487 Interval interval = allocator.getOrCreateInterval(operand); 488 if (!kind.equals(LIRKind.Illegal)) { 489 interval.setKind(kind); 490 } 491 492 interval.addRange(from, to); 493 494 // Register use position at even instruction id. 495 interval.addUsePos(to & ~1, registerPriority, detailedAsserts); 496 497 if (debug.isLogEnabled()) { 498 debug.log("add use: %s, from %d to %d (%s)", interval, from, to, registerPriority.name()); 499 } 500 } 501 502 protected void addTemp(AllocatableValue operand, int tempPos, RegisterPriority registerPriority, ValueKind<?> kind, boolean detailedAsserts) { 503 if (!allocator.isProcessed(operand)) { 504 return; 505 } 506 507 Interval interval = allocator.getOrCreateInterval(operand); 508 if (!kind.equals(LIRKind.Illegal)) { 509 interval.setKind(kind); 510 } 511 512 interval.addRange(tempPos, tempPos + 1); 513 interval.addUsePos(tempPos, registerPriority, detailedAsserts); 514 interval.addMaterializationValue(null); 515 516 if (debug.isLogEnabled()) { 517 debug.log("add temp: %s tempPos %d (%s)", interval, tempPos, RegisterPriority.MustHaveRegister.name()); 518 } 519 } 520 521 protected void addDef(AllocatableValue operand, LIRInstruction op, RegisterPriority registerPriority, ValueKind<?> kind, boolean detailedAsserts) { 522 if (!allocator.isProcessed(operand)) { 523 return; 524 } 525 int defPos = op.id(); 526 527 Interval interval = allocator.getOrCreateInterval(operand); 528 if (!kind.equals(LIRKind.Illegal)) { 529 interval.setKind(kind); 530 } 531 532 Range r = interval.first(); 533 if (r.from <= defPos) { 534 /* 535 * Update the starting point (when a range is first created for a use, its start is the 536 * beginning of the current block until a def is encountered). 537 */ 538 r.from = defPos; 539 interval.addUsePos(defPos, registerPriority, detailedAsserts); 540 541 } else { 542 /* 543 * Dead value - make vacuous interval also add register priority for dead intervals 544 */ 545 interval.addRange(defPos, defPos + 1); 546 interval.addUsePos(defPos, registerPriority, detailedAsserts); 547 if (debug.isLogEnabled()) { 548 debug.log("Warning: def of operand %s at %d occurs without use", operand, defPos); 549 } 550 } 551 552 changeSpillDefinitionPos(op, operand, interval, defPos); 553 if (registerPriority == RegisterPriority.None && interval.spillState().ordinal() <= SpillState.StartInMemory.ordinal() && isStackSlot(operand)) { 554 // detection of method-parameters and roundfp-results 555 interval.setSpillState(SpillState.StartInMemory); 556 } 557 interval.addMaterializationValue(getMaterializedValue(op, operand, interval)); 558 559 if (debug.isLogEnabled()) { 560 debug.log("add def: %s defPos %d (%s)", interval, defPos, registerPriority.name()); 561 } 562 } 563 564 /** 565 * Optimizes moves related to incoming stack based arguments. The interval for the destination 566 * of such moves is assigned the stack slot (which is in the caller's frame) as its spill slot. 567 */ 568 protected void handleMethodArguments(LIRInstruction op) { 569 if (ValueMoveOp.isValueMoveOp(op)) { 570 ValueMoveOp move = ValueMoveOp.asValueMoveOp(op); 571 if (optimizeMethodArgument(move.getInput())) { 572 StackSlot slot = asStackSlot(move.getInput()); 573 if (Assertions.detailedAssertionsEnabled(allocator.getOptions())) { 574 assert op.id() > 0 : "invalid id"; 575 assert allocator.blockForId(op.id()).getPredecessorCount() == 0 : "move from stack must be in first block"; 576 assert isVariable(move.getResult()) : "result of move must be a variable"; 577 578 if (debug.isLogEnabled()) { 579 debug.log("found move from stack slot %s to %s", slot, move.getResult()); 580 } 581 } 582 583 Interval interval = allocator.intervalFor(move.getResult()); 584 interval.setSpillSlot(slot); 585 interval.assignLocation(slot); 586 } 587 } 588 } 589 590 protected void addRegisterHint(final LIRInstruction op, final Value targetValue, OperandMode mode, EnumSet<OperandFlag> flags, final boolean hintAtDef) { 591 if (flags.contains(OperandFlag.HINT) && LinearScan.isVariableOrRegister(targetValue)) { 592 593 op.forEachRegisterHint(targetValue, mode, (registerHint, valueMode, valueFlags) -> { 594 if (LinearScan.isVariableOrRegister(registerHint)) { 595 Interval from = allocator.getOrCreateInterval((AllocatableValue) registerHint); 596 Interval to = allocator.getOrCreateInterval((AllocatableValue) targetValue); 597 598 /* hints always point from def to use */ 599 if (hintAtDef) { 600 to.setLocationHint(from); 601 } else { 602 from.setLocationHint(to); 603 } 604 if (debug.isLogEnabled()) { 605 debug.log("operation at opId %d: added hint from interval %d to %d", op.id(), from.operandNumber, to.operandNumber); 606 } 607 608 return registerHint; 609 } 610 return null; 611 }); 612 } 613 } 614 615 /** 616 * Eliminates moves from register to stack if the stack slot is known to be correct. 617 * 618 * @param op 619 * @param operand 620 */ 621 protected void changeSpillDefinitionPos(LIRInstruction op, AllocatableValue operand, Interval interval, int defPos) { 622 assert interval.isSplitParent() : "can only be called for split parents"; 623 624 switch (interval.spillState()) { 625 case NoDefinitionFound: 626 assert interval.spillDefinitionPos() == -1 : "must no be set before"; 627 interval.setSpillDefinitionPos(defPos); 628 interval.setSpillState(SpillState.NoSpillStore); 629 break; 630 631 case NoSpillStore: 632 assert defPos <= interval.spillDefinitionPos() : "positions are processed in reverse order when intervals are created"; 633 if (defPos < interval.spillDefinitionPos() - 2) { 634 // second definition found, so no spill optimization possible for this interval 635 interval.setSpillState(SpillState.NoOptimization); 636 } else { 637 // two consecutive definitions (because of two-operand LIR form) 638 assert allocator.blockForId(defPos) == allocator.blockForId(interval.spillDefinitionPos()) : "block must be equal"; 639 } 640 break; 641 642 case NoOptimization: 643 // nothing to do 644 break; 645 646 default: 647 throw GraalError.shouldNotReachHere("other states not allowed at this time"); 648 } 649 } 650 651 private static boolean optimizeMethodArgument(Value value) { 652 /* 653 * Object method arguments that are passed on the stack are currently not optimized because 654 * this requires that the runtime visits method arguments during stack walking. 655 */ 656 return isStackSlot(value) && asStackSlot(value).isInCallerFrame() && LIRKind.isValue(value); 657 } 658 659 /** 660 * Determines the register priority for an instruction's output/result operand. 661 */ 662 protected RegisterPriority registerPriorityOfOutputOperand(LIRInstruction op) { 663 if (ValueMoveOp.isValueMoveOp(op)) { 664 ValueMoveOp move = ValueMoveOp.asValueMoveOp(op); 665 if (optimizeMethodArgument(move.getInput())) { 666 return RegisterPriority.None; 667 } 668 } 669 670 // all other operands require a register 671 return RegisterPriority.MustHaveRegister; 672 } 673 674 /** 675 * Determines the priority which with an instruction's input operand will be allocated a 676 * register. 677 */ 678 protected static RegisterPriority registerPriorityOfInputOperand(EnumSet<OperandFlag> flags) { 679 if (flags.contains(OperandFlag.STACK)) { 680 return RegisterPriority.ShouldHaveRegister; 681 } 682 // all other operands require a register 683 return RegisterPriority.MustHaveRegister; 684 } 685 686 @SuppressWarnings("try") 687 protected void buildIntervals(boolean detailedAsserts) { 688 689 try (Indent indent = debug.logAndIndent("build intervals")) { 690 InstructionValueConsumer outputConsumer = (op, operand, mode, flags) -> { 691 if (LinearScan.isVariableOrRegister(operand)) { 692 addDef((AllocatableValue) operand, op, registerPriorityOfOutputOperand(op), operand.getValueKind(), detailedAsserts); 693 addRegisterHint(op, operand, mode, flags, true); 694 } 695 }; 696 697 InstructionValueConsumer tempConsumer = (op, operand, mode, flags) -> { 698 if (LinearScan.isVariableOrRegister(operand)) { 699 addTemp((AllocatableValue) operand, op.id(), RegisterPriority.MustHaveRegister, operand.getValueKind(), detailedAsserts); 700 addRegisterHint(op, operand, mode, flags, false); 701 } 702 }; 703 704 InstructionValueConsumer aliveConsumer = (op, operand, mode, flags) -> { 705 if (LinearScan.isVariableOrRegister(operand)) { 706 RegisterPriority p = registerPriorityOfInputOperand(flags); 707 int opId = op.id(); 708 int blockFrom = allocator.getFirstLirInstructionId((allocator.blockForId(opId))); 709 addUse((AllocatableValue) operand, blockFrom, opId + 1, p, operand.getValueKind(), detailedAsserts); 710 addRegisterHint(op, operand, mode, flags, false); 711 } 712 }; 713 714 InstructionValueConsumer inputConsumer = (op, operand, mode, flags) -> { 715 if (LinearScan.isVariableOrRegister(operand)) { 716 int opId = op.id(); 717 int blockFrom = allocator.getFirstLirInstructionId((allocator.blockForId(opId))); 718 RegisterPriority p = registerPriorityOfInputOperand(flags); 719 addUse((AllocatableValue) operand, blockFrom, opId, p, operand.getValueKind(), detailedAsserts); 720 addRegisterHint(op, operand, mode, flags, false); 721 } 722 }; 723 724 InstructionValueConsumer stateProc = (op, operand, mode, flags) -> { 725 if (LinearScan.isVariableOrRegister(operand)) { 726 int opId = op.id(); 727 int blockFrom = allocator.getFirstLirInstructionId((allocator.blockForId(opId))); 728 addUse((AllocatableValue) operand, blockFrom, opId + 1, RegisterPriority.None, operand.getValueKind(), detailedAsserts); 729 } 730 }; 731 732 // create a list with all caller-save registers (cpu, fpu, xmm) 733 RegisterArray callerSaveRegs = allocator.getRegisterAllocationConfig().getRegisterConfig().getCallerSaveRegisters(); 734 735 // iterate all blocks in reverse order 736 for (int i = allocator.blockCount() - 1; i >= 0; i--) { 737 738 AbstractBlockBase<?> block = allocator.blockAt(i); 739 try (Indent indent2 = debug.logAndIndent("handle block %d", block.getId())) { 740 741 ArrayList<LIRInstruction> instructions = allocator.getLIR().getLIRforBlock(block); 742 final int blockFrom = allocator.getFirstLirInstructionId(block); 743 int blockTo = allocator.getLastLirInstructionId(block); 744 745 assert blockFrom == instructions.get(0).id(); 746 assert blockTo == instructions.get(instructions.size() - 1).id(); 747 748 // Update intervals for operands live at the end of this block; 749 BitSet live = allocator.getBlockData(block).liveOut; 750 for (int operandNum = live.nextSetBit(0); operandNum >= 0; operandNum = live.nextSetBit(operandNum + 1)) { 751 assert live.get(operandNum) : "should not stop here otherwise"; 752 AllocatableValue operand = allocator.intervalFor(operandNum).operand; 753 if (debug.isLogEnabled()) { 754 debug.log("live in %d: %s", operandNum, operand); 755 } 756 757 addUse(operand, blockFrom, blockTo + 2, RegisterPriority.None, LIRKind.Illegal, detailedAsserts); 758 759 /* 760 * Add special use positions for loop-end blocks when the interval is used 761 * anywhere inside this loop. It's possible that the block was part of a 762 * non-natural loop, so it might have an invalid loop index. 763 */ 764 if (block.isLoopEnd() && block.getLoop() != null && isIntervalInLoop(operandNum, block.getLoop().getIndex())) { 765 allocator.intervalFor(operandNum).addUsePos(blockTo + 1, RegisterPriority.LiveAtLoopEnd, detailedAsserts); 766 } 767 } 768 769 /* 770 * Iterate all instructions of the block in reverse order. definitions of 771 * intervals are processed before uses. 772 */ 773 for (int j = instructions.size() - 1; j >= 0; j--) { 774 final LIRInstruction op = instructions.get(j); 775 final int opId = op.id(); 776 777 try (Indent indent3 = debug.logAndIndent("handle inst %d: %s", opId, op)) { 778 779 // add a temp range for each register if operation destroys 780 // caller-save registers 781 if (op.destroysCallerSavedRegisters()) { 782 for (Register r : callerSaveRegs) { 783 if (allocator.attributes(r).isAllocatable()) { 784 addTemp(r.asValue(), opId, RegisterPriority.None, LIRKind.Illegal, detailedAsserts); 785 } 786 } 787 if (debug.isLogEnabled()) { 788 debug.log("operation destroys all caller-save registers"); 789 } 790 } 791 792 op.visitEachOutput(outputConsumer); 793 op.visitEachTemp(tempConsumer); 794 op.visitEachAlive(aliveConsumer); 795 op.visitEachInput(inputConsumer); 796 797 /* 798 * Add uses of live locals from interpreter's point of view for proper 799 * debug information generation. Treat these operands as temp values (if 800 * the live range is extended to a call site, the value would be in a 801 * register at the call otherwise). 802 */ 803 op.visitEachState(stateProc); 804 805 // special steps for some instructions (especially moves) 806 handleMethodArguments(op); 807 808 } 809 810 } // end of instruction iteration 811 } 812 } // end of block iteration 813 814 /* 815 * Add the range [0, 1] to all fixed intervals. the register allocator need not handle 816 * unhandled fixed intervals. 817 */ 818 for (Interval interval : allocator.intervals()) { 819 if (interval != null && isRegister(interval.operand)) { 820 interval.addRange(0, 1); 821 } 822 } 823 } 824 } 825 826 /** 827 * Returns a value for a interval definition, which can be used for re-materialization. 828 * 829 * @param op An instruction which defines a value 830 * @param operand The destination operand of the instruction 831 * @param interval The interval for this defined value. 832 * @return Returns the value which is moved to the instruction and which can be reused at all 833 * reload-locations in case the interval of this instruction is spilled. Currently this 834 * can only be a {@link JavaConstant}. 835 */ 836 protected Constant getMaterializedValue(LIRInstruction op, Value operand, Interval interval) { 837 if (LoadConstantOp.isLoadConstantOp(op)) { 838 LoadConstantOp move = LoadConstantOp.asLoadConstantOp(op); 839 840 if (!allocator.neverSpillConstants()) { 841 /* 842 * Check if the interval has any uses which would accept an stack location (priority 843 * == ShouldHaveRegister). Rematerialization of such intervals can result in a 844 * degradation, because rematerialization always inserts a constant load, even if 845 * the value is not needed in a register. 846 */ 847 Interval.UsePosList usePosList = interval.usePosList(); 848 int numUsePos = usePosList.size(); 849 for (int useIdx = 0; useIdx < numUsePos; useIdx++) { 850 Interval.RegisterPriority priority = usePosList.registerPriority(useIdx); 851 if (priority == Interval.RegisterPriority.ShouldHaveRegister) { 852 return null; 853 } 854 } 855 } 856 return move.getConstant(); 857 } 858 return null; 859 } 860 }