5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 */ 23 package org.graalvm.compiler.lir.alloc.trace.bu; 24 25 import static org.graalvm.compiler.core.common.GraalOptions.DetailedAsserts; 26 import static org.graalvm.compiler.lir.LIRValueUtil.asVariable; 27 import static org.graalvm.compiler.lir.LIRValueUtil.isConstantValue; 28 import static org.graalvm.compiler.lir.LIRValueUtil.isStackSlotValue; 29 import static org.graalvm.compiler.lir.LIRValueUtil.isVariable; 30 import static jdk.vm.ci.code.ValueUtil.asAllocatableValue; 31 import static jdk.vm.ci.code.ValueUtil.asRegister; 32 import static jdk.vm.ci.code.ValueUtil.isIllegal; 33 import static jdk.vm.ci.code.ValueUtil.isRegister; 34 35 import java.util.ArrayList; 36 import java.util.BitSet; 37 import java.util.Collections; 38 import java.util.EnumSet; 39 import java.util.List; 40 41 import org.graalvm.compiler.core.common.alloc.RegisterAllocationConfig; 42 import org.graalvm.compiler.core.common.alloc.RegisterAllocationConfig.AllocatableRegisters; 43 import org.graalvm.compiler.core.common.alloc.Trace; 44 import org.graalvm.compiler.core.common.alloc.TraceBuilderResult; 45 import org.graalvm.compiler.core.common.cfg.AbstractBlockBase; 46 import org.graalvm.compiler.debug.Debug; 47 import org.graalvm.compiler.debug.Debug.Scope; 48 import org.graalvm.compiler.debug.Indent; 49 import org.graalvm.compiler.lir.InstructionValueProcedure; 50 import org.graalvm.compiler.lir.LIR; 51 import org.graalvm.compiler.lir.LIRInstruction; 52 import org.graalvm.compiler.lir.LIRInstruction.OperandFlag; 53 import org.graalvm.compiler.lir.LIRInstruction.OperandMode; 54 import org.graalvm.compiler.lir.LIRValueUtil; 55 import org.graalvm.compiler.lir.RedundantMoveElimination; 56 import org.graalvm.compiler.lir.StandardOp; 57 import org.graalvm.compiler.lir.StandardOp.BlockEndOp; 58 import org.graalvm.compiler.lir.StandardOp.LabelOp; 59 import org.graalvm.compiler.lir.Variable; 60 import org.graalvm.compiler.lir.VirtualStackSlot; 61 import org.graalvm.compiler.lir.alloc.OutOfRegistersException; 62 import org.graalvm.compiler.lir.alloc.trace.TraceAllocationPhase; 63 import org.graalvm.compiler.lir.alloc.trace.TraceAllocationPhase.TraceAllocationContext; 64 import org.graalvm.compiler.lir.alloc.trace.TraceGlobalMoveResolutionPhase; 65 import org.graalvm.compiler.lir.alloc.trace.TraceGlobalMoveResolver; 66 import org.graalvm.compiler.lir.alloc.trace.TraceRegisterAllocationPhase; 67 import org.graalvm.compiler.lir.gen.LIRGenerationResult; 68 import org.graalvm.compiler.lir.gen.LIRGeneratorTool.MoveFactory; 69 import org.graalvm.compiler.lir.ssa.SSAUtil; 70 import org.graalvm.compiler.lir.ssa.SSAUtil.PhiValueVisitor; 71 import org.graalvm.compiler.lir.ssi.SSIUtil; 72 73 import jdk.vm.ci.code.Register; 74 import jdk.vm.ci.code.RegisterArray; 75 import jdk.vm.ci.code.RegisterAttributes; 76 import jdk.vm.ci.code.RegisterValue; 77 import jdk.vm.ci.code.TargetDescription; 78 import jdk.vm.ci.common.JVMCIError; 79 import jdk.vm.ci.meta.AllocatableValue; 80 import jdk.vm.ci.meta.PlatformKind; 81 import jdk.vm.ci.meta.Value; 82 83 /** 84 * Allocates registers within a trace in a greedy, bottom-up fashion. The liveness information is 85 * computed on the fly as the instructions are traversed instead of computing it in a separate pass. 86 * The goal of this allocator is to provide a simple and fast algorithm for situations where code 87 * quality is not the primary target. 88 * 89 * This implementation does not (yet) exploit hinting information and might introduce multiple spill 90 * moves to the same stack slot (which are likely to be remove by {@link RedundantMoveElimination}. 91 * 98 private final LIRGenerationResult lirGenRes; 99 private final MoveFactory spillMoveFactory; 100 private final RegisterAllocationConfig registerAllocationConfig; 101 private final RegisterArray callerSaveRegs; 102 private final RegisterAttributes[] registerAttributes; 103 private final BitSet allocatedBlocks; 104 private final TraceBuilderResult resultTraces; 105 private final TraceGlobalMoveResolver moveResolver; 106 107 /** 108 * Maps from {@link Variable#index} to a spill stack slot. If 109 * {@linkplain org.graalvm.compiler.lir.alloc.trace.TraceRegisterAllocationPhase.Options#TraceRACacheStackSlots 110 * enabled} a {@link Variable} is always assigned to the same stack slot. 111 */ 112 private final AllocatableValue[] stackSlots; 113 114 private final ArrayList<LIRInstruction> insertInstructionsBefore; 115 private final ArrayList<LIRInstruction> insertInstructionsAfter; 116 private final boolean neverSpillConstants; 117 118 public BottomUpAllocator(TargetDescription target, LIRGenerationResult lirGenRes, MoveFactory spillMoveFactory, RegisterAllocationConfig registerAllocationConfig, 119 AllocatableValue[] cachedStackSlots, TraceBuilderResult resultTraces, boolean neverSpillConstant) { 120 this.target = target; 121 this.lirGenRes = lirGenRes; 122 this.spillMoveFactory = spillMoveFactory; 123 this.registerAllocationConfig = registerAllocationConfig; 124 this.callerSaveRegs = registerAllocationConfig.getRegisterConfig().getCallerSaveRegisters(); 125 this.registerAttributes = registerAllocationConfig.getRegisterConfig().getAttributesMap(); 126 this.allocatedBlocks = new BitSet(lirGenRes.getLIR().getControlFlowGraph().getBlocks().length); 127 this.resultTraces = resultTraces; 128 this.moveResolver = new TraceGlobalMoveResolver(lirGenRes, spillMoveFactory, target.arch); 129 this.neverSpillConstants = neverSpillConstant; 130 131 this.insertInstructionsBefore = new ArrayList<>(4); 132 this.insertInstructionsAfter = new ArrayList<>(4); 133 134 if (TraceRegisterAllocationPhase.Options.TraceRACacheStackSlots.getValue()) { 135 this.stackSlots = cachedStackSlots; 136 } else { 137 this.stackSlots = new AllocatableValue[lirGenRes.getLIR().numVariables()]; 138 } 139 140 } 141 142 private LIR getLIR() { 143 return lirGenRes.getLIR(); 144 } 145 146 /** 147 * Gets an object describing the attributes of a given register according to this register 148 * configuration. 149 */ 150 private RegisterAttributes attributes(Register reg) { 151 return registerAttributes[reg.number]; 152 } 153 154 /** 155 * Returns a new spill slot or a cached entry if there is already one for the variable. 156 */ 157 private AllocatableValue allocateSpillSlot(Variable var) { 158 int variableIndex = var.index; 159 AllocatableValue cachedStackSlot = stackSlots[variableIndex]; 160 if (cachedStackSlot != null) { 161 TraceRegisterAllocationPhase.globalStackSlots.increment(); 162 assert cachedStackSlot.getValueKind().equals(var.getValueKind()) : "CachedStackSlot: kind mismatch? " + var.getValueKind() + " vs. " + cachedStackSlot.getValueKind(); 163 return cachedStackSlot; 164 } 165 VirtualStackSlot slot = lirGenRes.getFrameMapBuilder().allocateSpillSlot(var.getValueKind()); 166 stackSlots[variableIndex] = slot; 167 TraceRegisterAllocationPhase.allocatedStackSlots.increment(); 168 return slot; 169 } 170 171 private final PhiValueVisitor resolveLoopBackEdgeVisitor = (Value in, Value out) -> { 172 resolveBackEdge(in, out); 173 }; 174 175 private void resolveBackEdge(Value in, Value out) { 176 if (!isIllegal(in) && !TraceGlobalMoveResolver.isMoveToSelf(out, in)) { 177 TraceGlobalMoveResolutionPhase.addMapping(moveResolver, out, in); 178 } 179 } 180 181 @Override 182 protected void run(@SuppressWarnings("hiding") TargetDescription target, @SuppressWarnings("hiding") LIRGenerationResult lirGenRes, Trace trace, TraceAllocationContext context) { 183 allocate(trace); 184 } 185 186 private void allocate(Trace trace) { 187 if (neverSpillConstants) { 188 throw JVMCIError.unimplemented("NeverSpillConstant not supported!"); 189 } 190 new Allocator().allocateTrace(trace); 191 assert verify(trace); 192 } 193 194 private boolean verify(Trace trace) { 195 for (AbstractBlockBase<?> block : trace.getBlocks()) { 196 assert LIR.verifyBlock(lirGenRes.getLIR(), block); 197 } 198 return true; 199 } 200 201 private static boolean requiresRegisters(LIRInstruction instruction, Value value, OperandMode mode, EnumSet<OperandFlag> flags) { 202 assert isVariable(value) : "Not a variable " + value; 203 if (instruction instanceof LabelOp) { 204 // phi and incoming values do not require a register 205 return false; 206 } 207 if (mode == OperandMode.DEF || mode == OperandMode.TEMP) { 208 return true; 209 } 210 return !flags.contains(OperandFlag.STACK); 211 } 212 213 private void resolveFindInsertPos(AbstractBlockBase<?> fromBlock, AbstractBlockBase<?> toBlock) { 214 LIR lir = lirGenRes.getLIR(); 215 if (fromBlock.getSuccessorCount() <= 1) { 216 if (Debug.isLogEnabled()) { 217 Debug.log("inserting moves at end of fromBlock B%d", fromBlock.getId()); 218 } 219 220 List<LIRInstruction> instructions = lir.getLIRforBlock(fromBlock); 221 LIRInstruction instr = instructions.get(instructions.size() - 1); 222 if (instr instanceof StandardOp.JumpOp) { 223 // insert moves before branch 224 moveResolver.setInsertPosition(instructions, instructions.size() - 1); 225 } else { 226 moveResolver.setInsertPosition(instructions, instructions.size()); 227 } 228 229 } else { 230 if (Debug.isLogEnabled()) { 231 Debug.log("inserting moves at beginning of toBlock B%d", toBlock.getId()); 232 } 233 234 if (DetailedAsserts.getValue()) { 235 assert lir.getLIRforBlock(fromBlock).get(0) instanceof StandardOp.LabelOp : "block does not start with a label"; 236 237 /* 238 * Because the number of predecessor edges matches the number of successor edges, 239 * blocks which are reached by switch statements may have be more than one 240 * predecessor but it will be guaranteed that all predecessors will be the same. 241 */ 242 for (AbstractBlockBase<?> predecessor : toBlock.getPredecessors()) { 243 assert fromBlock == predecessor : "all critical edges must be broken"; 244 } 245 } 246 247 moveResolver.setInsertPosition(lir.getLIRforBlock(toBlock), 1); 248 } 249 } 250 251 private final class Allocator { 252 253 /** 254 * Maps from {@linkplain Register#number register} to the current {@linkplain Variable 310 } 311 312 private AllocatableValue getCurrentLocation(Variable var) { 313 return locations[var.index]; 314 } 315 316 private void insertSpillMoveBefore(AllocatableValue dst, Value src) { 317 LIRInstruction move = spillMoveFactory.createMove(dst, src); 318 insertInstructionsBefore.add(move); 319 Debug.log("insert before %s", move); 320 } 321 322 private void insertSpillMoveAfter(AllocatableValue dst, Value src) { 323 LIRInstruction inst = currentInstructions.get(currentInstructionIndex); 324 if (!(inst instanceof BlockEndOp)) { 325 LIRInstruction move = spillMoveFactory.createMove(dst, src); 326 insertInstructionsAfter.add(move); 327 Debug.log("insert after %s", move); 328 } else { 329 Debug.log("Block end op. No from %s to %s necessary.", src, dst); 330 } 331 } 332 333 private void insertInstructions() { 334 // TODO (je) this is can probably be improved 335 currentInstructions.ensureCapacity(currentInstructions.size() + insertInstructionsBefore.size() + insertInstructionsAfter.size()); 336 LIRInstruction inst = currentInstructions.get(currentInstructionIndex); 337 // insert after 338 if (insertInstructionsAfter.size() != 0) { 339 Collections.reverse(insertInstructionsAfter); 340 assert !(inst instanceof BlockEndOp) : "Cannot insert instruction after the block end op: " + inst; 341 currentInstructions.addAll(currentInstructionIndex + 1, insertInstructionsAfter); 342 insertInstructionsAfter.clear(); 343 } 344 // insert before 345 if (insertInstructionsBefore.size() != 0) { 346 assert !(inst instanceof LabelOp) : "Cannot insert instruction before the label op: " + inst; 347 currentInstructions.addAll(currentInstructionIndex, insertInstructionsBefore); 348 insertInstructionsBefore.clear(); 349 } 350 } 351 352 @SuppressWarnings("try") 353 private void allocateTrace(Trace trace) { 354 try (Scope s = Debug.scope("BottomUpAllocator", trace.getBlocks()); Indent indent = Debug.logAndIndent("%s (Trace%d)", trace, trace.getId())) { 355 AbstractBlockBase<?> successorBlock = null; 356 for (int i = trace.getBlocks().length - 1; i >= 0; i--) { 357 AbstractBlockBase<?> block = trace.getBlocks()[i]; 358 // handle PHIs 359 if (successorBlock != null) { 360 resolvePhis(successorBlock, block); 361 } 362 allocateBlock(block); 363 successorBlock = block; 364 } 365 resolveLocalDataFlow(trace); 366 } catch (Throwable e) { 367 throw Debug.handle(e); 368 } 369 } 370 371 /** 372 * Resolve phi values, i.e. set the current location of values in the predecessors block 373 * (which is not yet allocated) to the location of the variable defined by the phi in the 374 * successor (which is already allocated). For constant inputs we insert moves. 375 */ 376 private void resolvePhis(AbstractBlockBase<?> successorBlock, AbstractBlockBase<?> block) { 377 // Note that we are only visiting PHI values, not transient SSI values. 378 phiVisitor.loads.clear(); 379 SSAUtil.forEachPhiValuePair(getLIR(), successorBlock, block, phiVisitor); 380 if (phiVisitor.loads.size() > 0) { 381 ArrayList<LIRInstruction> instructions = getLIR().getLIRforBlock(block); 382 instructions.addAll(instructions.size() - 1, phiVisitor.loads); 383 } 384 } 385 386 private final PhiVisitor phiVisitor = new PhiVisitor(); 387 388 private final class PhiVisitor implements PhiValueVisitor { 389 390 private final ArrayList<LIRInstruction> loads = new ArrayList<>(); 391 392 @Override 393 public void visit(Value phiIn, Value phiOut) { 394 assert isStackSlotValue(phiIn) || isRegister(phiIn) : "PHI defined values is not a register or stack slot: " + phiIn; 395 AllocatableValue in = asAllocatableValue(phiIn); 396 397 AllocatableValue dest = isRegister(in) ? getCurrentValue(asRegister(in)) : in; 398 final LIRInstruction load; 399 if (isConstantValue(phiOut)) { 400 // insert move from constant 401 load = spillMoveFactory.createLoad(dest, LIRValueUtil.asConstant(phiOut)); 402 } else { 403 assert isVariable(phiOut) : "Not a variable or constant: " + phiOut; 404 // insert move from variable 405 load = spillMoveFactory.createMove(dest, asVariable(phiOut)); 406 } 407 Debug.log("Inserting load %s", load); 408 loads.add(load); 409 return; 410 } 411 } 412 413 private void resolveLocalDataFlow(Trace trace) { 414 for (AbstractBlockBase<?> block : trace.getBlocks()) { 415 for (AbstractBlockBase<?> pred : block.getPredecessors()) { 416 if (resultTraces.getTraceForBlock(pred).equals(trace)) { 417 resolveFindInsertPos(pred, block); 418 SSIUtil.forEachValuePair(getLIR(), block, pred, resolveLoopBackEdgeVisitor); 419 moveResolver.resolveAndAppendMoves(); 420 } 421 } 422 } 423 } 424 425 @SuppressWarnings("try") 426 private void allocateBlock(AbstractBlockBase<?> block) { 427 try (Indent indent = Debug.logAndIndent("handle block %s", block)) { 428 currentInstructions = getLIR().getLIRforBlock(block); 429 for (currentInstructionIndex = currentInstructions.size() - 1; currentInstructionIndex >= 0; currentInstructionIndex--) { 430 LIRInstruction inst = currentInstructions.get(currentInstructionIndex); 431 if (inst != null) { 432 inst.setId(currentOpId); 433 allocateInstruction(inst); 434 } 435 } 436 allocatedBlocks.set(block.getId()); 437 } 438 } 439 440 @SuppressWarnings("try") 441 private void allocateInstruction(LIRInstruction op) { 442 assert op != null && op.id() == currentOpId; 443 try (Indent indent = Debug.logAndIndent("handle inst: %d: %s", op.id(), op)) { 444 try (Indent indent1 = Debug.logAndIndent("output pos")) { 445 // spill caller saved registers 446 if (op.destroysCallerSavedRegisters()) { 447 spillCallerSavedRegisters(); 448 } 449 450 // fixed 451 op.forEachOutput(allocFixedRegisterProcedure); 452 op.forEachTemp(allocFixedRegisterProcedure); 453 op.forEachAlive(allocFixedRegisterProcedure); 454 // variable 455 op.forEachOutput(allocRegisterProcedure); 456 op.forEachTemp(allocRegisterProcedure); 457 op.forEachAlive(allocRegisterProcedure); 458 /* state do never require a register */ 459 // op.forEachState(allocRegisterProcedure); 460 461 // should have 462 op.forEachTemp(allocStackOrRegisterProcedure); 463 op.forEachOutput(allocStackOrRegisterProcedure); 464 } 465 try (Indent indent1 = Debug.logAndIndent("input pos")) { 466 467 currentOpId++; 468 469 // fixed 470 op.forEachInput(allocFixedRegisterProcedure); 471 // variable 472 op.forEachInput(allocRegisterProcedure); 473 474 op.forEachAlive(allocStackOrRegisterProcedure); 475 op.forEachState(allocStackOrRegisterProcedure); 476 op.forEachInput(allocStackOrRegisterProcedure); 477 } 478 479 // insert spill/load instructions 480 insertInstructions(); 481 currentOpId++; 482 } 483 } 484 485 private void spillCallerSavedRegisters() { 486 for (Register reg : callerSaveRegs) { 487 if (attributes(reg).isAllocatable()) { 488 evacuateRegisterAndSpill(reg); 489 assert checkRegisterUsage(reg); 490 evacuateRegisterAndSpill(reg); 491 // setCurrentValue(reg, reg.asValue()); 492 setLastRegisterUsage(reg, currentOpId); 493 } 494 } 495 if (Debug.isLogEnabled()) { 496 Debug.log("operation destroys all caller-save registers"); 497 } 498 } 499 500 private final InstructionValueProcedure allocFixedRegisterProcedure = new InstructionValueProcedure() { 501 @Override 502 public Value doValue(LIRInstruction instruction, Value value, OperandMode mode, EnumSet<OperandFlag> flags) { 503 return allocFixedRegister(value); 504 } 505 }; 506 private final InstructionValueProcedure allocRegisterProcedure = new InstructionValueProcedure() { 507 @Override 508 public Value doValue(LIRInstruction instruction, Value value, OperandMode mode, EnumSet<OperandFlag> flags) { 509 return allocRegister(instruction, value, mode, flags); 510 } 511 }; | 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 */ 23 package org.graalvm.compiler.lir.alloc.trace.bu; 24 25 import static jdk.vm.ci.code.ValueUtil.asAllocatableValue; 26 import static jdk.vm.ci.code.ValueUtil.asRegister; 27 import static jdk.vm.ci.code.ValueUtil.isIllegal; 28 import static jdk.vm.ci.code.ValueUtil.isRegister; 29 import static org.graalvm.compiler.core.common.GraalOptions.DetailedAsserts; 30 import static org.graalvm.compiler.lir.LIRValueUtil.asVariable; 31 import static org.graalvm.compiler.lir.LIRValueUtil.isConstantValue; 32 import static org.graalvm.compiler.lir.LIRValueUtil.isStackSlotValue; 33 import static org.graalvm.compiler.lir.LIRValueUtil.isVariable; 34 35 import java.util.ArrayList; 36 import java.util.BitSet; 37 import java.util.Collections; 38 import java.util.EnumSet; 39 40 import org.graalvm.compiler.core.common.alloc.RegisterAllocationConfig; 41 import org.graalvm.compiler.core.common.alloc.RegisterAllocationConfig.AllocatableRegisters; 42 import org.graalvm.compiler.core.common.alloc.Trace; 43 import org.graalvm.compiler.core.common.alloc.TraceBuilderResult; 44 import org.graalvm.compiler.core.common.cfg.AbstractBlockBase; 45 import org.graalvm.compiler.debug.Debug; 46 import org.graalvm.compiler.debug.Debug.Scope; 47 import org.graalvm.compiler.debug.Indent; 48 import org.graalvm.compiler.lir.InstructionValueProcedure; 49 import org.graalvm.compiler.lir.LIR; 50 import org.graalvm.compiler.lir.LIRInstruction; 51 import org.graalvm.compiler.lir.LIRInstruction.OperandFlag; 52 import org.graalvm.compiler.lir.LIRInstruction.OperandMode; 53 import org.graalvm.compiler.lir.LIRValueUtil; 54 import org.graalvm.compiler.lir.RedundantMoveElimination; 55 import org.graalvm.compiler.lir.StandardOp; 56 import org.graalvm.compiler.lir.StandardOp.BlockEndOp; 57 import org.graalvm.compiler.lir.StandardOp.JumpOp; 58 import org.graalvm.compiler.lir.StandardOp.LabelOp; 59 import org.graalvm.compiler.lir.Variable; 60 import org.graalvm.compiler.lir.VirtualStackSlot; 61 import org.graalvm.compiler.lir.alloc.OutOfRegistersException; 62 import org.graalvm.compiler.lir.alloc.trace.GlobalLivenessInfo; 63 import org.graalvm.compiler.lir.alloc.trace.TraceAllocationPhase; 64 import org.graalvm.compiler.lir.alloc.trace.TraceAllocationPhase.TraceAllocationContext; 65 import org.graalvm.compiler.lir.alloc.trace.TraceGlobalMoveResolutionPhase; 66 import org.graalvm.compiler.lir.alloc.trace.TraceGlobalMoveResolver; 67 import org.graalvm.compiler.lir.alloc.trace.TraceRegisterAllocationPhase; 68 import org.graalvm.compiler.lir.gen.LIRGenerationResult; 69 import org.graalvm.compiler.lir.gen.LIRGeneratorTool.MoveFactory; 70 import org.graalvm.compiler.lir.ssa.SSAUtil; 71 72 import jdk.vm.ci.code.Register; 73 import jdk.vm.ci.code.RegisterArray; 74 import jdk.vm.ci.code.RegisterAttributes; 75 import jdk.vm.ci.code.RegisterValue; 76 import jdk.vm.ci.code.TargetDescription; 77 import jdk.vm.ci.common.JVMCIError; 78 import jdk.vm.ci.meta.AllocatableValue; 79 import jdk.vm.ci.meta.PlatformKind; 80 import jdk.vm.ci.meta.Value; 81 82 /** 83 * Allocates registers within a trace in a greedy, bottom-up fashion. The liveness information is 84 * computed on the fly as the instructions are traversed instead of computing it in a separate pass. 85 * The goal of this allocator is to provide a simple and fast algorithm for situations where code 86 * quality is not the primary target. 87 * 88 * This implementation does not (yet) exploit hinting information and might introduce multiple spill 89 * moves to the same stack slot (which are likely to be remove by {@link RedundantMoveElimination}. 90 * 97 private final LIRGenerationResult lirGenRes; 98 private final MoveFactory spillMoveFactory; 99 private final RegisterAllocationConfig registerAllocationConfig; 100 private final RegisterArray callerSaveRegs; 101 private final RegisterAttributes[] registerAttributes; 102 private final BitSet allocatedBlocks; 103 private final TraceBuilderResult resultTraces; 104 private final TraceGlobalMoveResolver moveResolver; 105 106 /** 107 * Maps from {@link Variable#index} to a spill stack slot. If 108 * {@linkplain org.graalvm.compiler.lir.alloc.trace.TraceRegisterAllocationPhase.Options#TraceRACacheStackSlots 109 * enabled} a {@link Variable} is always assigned to the same stack slot. 110 */ 111 private final AllocatableValue[] stackSlots; 112 113 private final ArrayList<LIRInstruction> insertInstructionsBefore; 114 private final ArrayList<LIRInstruction> insertInstructionsAfter; 115 private final boolean neverSpillConstants; 116 117 private final GlobalLivenessInfo livenessInfo; 118 119 public BottomUpAllocator(TargetDescription target, LIRGenerationResult lirGenRes, MoveFactory spillMoveFactory, RegisterAllocationConfig registerAllocationConfig, 120 AllocatableValue[] cachedStackSlots, TraceBuilderResult resultTraces, boolean neverSpillConstant, GlobalLivenessInfo livenessInfo) { 121 this.target = target; 122 this.lirGenRes = lirGenRes; 123 this.spillMoveFactory = spillMoveFactory; 124 this.registerAllocationConfig = registerAllocationConfig; 125 this.callerSaveRegs = registerAllocationConfig.getRegisterConfig().getCallerSaveRegisters(); 126 this.registerAttributes = registerAllocationConfig.getRegisterConfig().getAttributesMap(); 127 this.allocatedBlocks = new BitSet(lirGenRes.getLIR().getControlFlowGraph().getBlocks().length); 128 this.resultTraces = resultTraces; 129 this.moveResolver = new TraceGlobalMoveResolver(lirGenRes, spillMoveFactory, target.arch); 130 this.neverSpillConstants = neverSpillConstant; 131 this.livenessInfo = livenessInfo; 132 133 this.insertInstructionsBefore = new ArrayList<>(4); 134 this.insertInstructionsAfter = new ArrayList<>(4); 135 136 if (TraceRegisterAllocationPhase.Options.TraceRACacheStackSlots.getValue(lirGenRes.getLIR().getOptions())) { 137 this.stackSlots = cachedStackSlots; 138 } else { 139 this.stackSlots = new AllocatableValue[lirGenRes.getLIR().numVariables()]; 140 } 141 142 } 143 144 private LIR getLIR() { 145 return lirGenRes.getLIR(); 146 } 147 148 /** 149 * Gets an object describing the attributes of a given register according to this register 150 * configuration. 151 */ 152 private RegisterAttributes attributes(Register reg) { 153 return registerAttributes[reg.number]; 154 } 155 156 /** 157 * Returns a new spill slot or a cached entry if there is already one for the variable. 158 */ 159 private AllocatableValue allocateSpillSlot(Variable var) { 160 int variableIndex = var.index; 161 AllocatableValue cachedStackSlot = stackSlots[variableIndex]; 162 if (cachedStackSlot != null) { 163 TraceRegisterAllocationPhase.globalStackSlots.increment(); 164 assert cachedStackSlot.getValueKind().equals(var.getValueKind()) : "CachedStackSlot: kind mismatch? " + var.getValueKind() + " vs. " + cachedStackSlot.getValueKind(); 165 return cachedStackSlot; 166 } 167 VirtualStackSlot slot = lirGenRes.getFrameMapBuilder().allocateSpillSlot(var.getValueKind()); 168 stackSlots[variableIndex] = slot; 169 TraceRegisterAllocationPhase.allocatedStackSlots.increment(); 170 return slot; 171 } 172 173 @Override 174 protected void run(@SuppressWarnings("hiding") TargetDescription target, @SuppressWarnings("hiding") LIRGenerationResult lirGenRes, Trace trace, TraceAllocationContext context) { 175 allocate(trace); 176 } 177 178 private void allocate(Trace trace) { 179 if (neverSpillConstants) { 180 throw JVMCIError.unimplemented("NeverSpillConstant not supported!"); 181 } 182 new Allocator().allocateTrace(trace); 183 assert verify(trace); 184 } 185 186 private boolean verify(Trace trace) { 187 for (AbstractBlockBase<?> block : trace.getBlocks()) { 188 assert LIR.verifyBlock(lirGenRes.getLIR(), block); 189 } 190 return true; 191 } 192 193 private static boolean requiresRegisters(LIRInstruction instruction, Value value, OperandMode mode, EnumSet<OperandFlag> flags) { 194 assert isVariable(value) : "Not a variable " + value; 195 if (instruction instanceof LabelOp) { 196 // phi and incoming values do not require a register 197 return false; 198 } 199 if (mode == OperandMode.DEF || mode == OperandMode.TEMP) { 200 return true; 201 } 202 return !flags.contains(OperandFlag.STACK); 203 } 204 205 private void resolveFindInsertPos(AbstractBlockBase<?> fromBlock, AbstractBlockBase<?> toBlock) { 206 LIR lir = lirGenRes.getLIR(); 207 if (fromBlock.getSuccessorCount() <= 1) { 208 if (Debug.isLogEnabled()) { 209 Debug.log("inserting moves at end of fromBlock B%d", fromBlock.getId()); 210 } 211 212 ArrayList<LIRInstruction> instructions = lir.getLIRforBlock(fromBlock); 213 LIRInstruction instr = instructions.get(instructions.size() - 1); 214 if (instr instanceof StandardOp.JumpOp) { 215 // insert moves before branch 216 moveResolver.setInsertPosition(instructions, instructions.size() - 1); 217 } else { 218 moveResolver.setInsertPosition(instructions, instructions.size()); 219 } 220 221 } else { 222 if (Debug.isLogEnabled()) { 223 Debug.log("inserting moves at beginning of toBlock B%d", toBlock.getId()); 224 } 225 226 if (DetailedAsserts.getValue(getLIR().getOptions())) { 227 assert lir.getLIRforBlock(fromBlock).get(0) instanceof StandardOp.LabelOp : "block does not start with a label"; 228 229 /* 230 * Because the number of predecessor edges matches the number of successor edges, 231 * blocks which are reached by switch statements may have be more than one 232 * predecessor but it will be guaranteed that all predecessors will be the same. 233 */ 234 for (AbstractBlockBase<?> predecessor : toBlock.getPredecessors()) { 235 assert fromBlock == predecessor : "all critical edges must be broken"; 236 } 237 } 238 239 moveResolver.setInsertPosition(lir.getLIRforBlock(toBlock), 1); 240 } 241 } 242 243 private final class Allocator { 244 245 /** 246 * Maps from {@linkplain Register#number register} to the current {@linkplain Variable 302 } 303 304 private AllocatableValue getCurrentLocation(Variable var) { 305 return locations[var.index]; 306 } 307 308 private void insertSpillMoveBefore(AllocatableValue dst, Value src) { 309 LIRInstruction move = spillMoveFactory.createMove(dst, src); 310 insertInstructionsBefore.add(move); 311 Debug.log("insert before %s", move); 312 } 313 314 private void insertSpillMoveAfter(AllocatableValue dst, Value src) { 315 LIRInstruction inst = currentInstructions.get(currentInstructionIndex); 316 if (!(inst instanceof BlockEndOp)) { 317 LIRInstruction move = spillMoveFactory.createMove(dst, src); 318 insertInstructionsAfter.add(move); 319 Debug.log("insert after %s", move); 320 } else { 321 Debug.log("Block end op. No from %s to %s necessary.", src, dst); 322 requireResolution = true; 323 } 324 } 325 326 private void insertInstructions() { 327 // TODO (je) this is can probably be improved 328 currentInstructions.ensureCapacity(currentInstructions.size() + insertInstructionsBefore.size() + insertInstructionsAfter.size()); 329 LIRInstruction inst = currentInstructions.get(currentInstructionIndex); 330 // insert after 331 if (insertInstructionsAfter.size() != 0) { 332 Collections.reverse(insertInstructionsAfter); 333 assert !(inst instanceof BlockEndOp) : "Cannot insert instruction after the block end op: " + inst; 334 currentInstructions.addAll(currentInstructionIndex + 1, insertInstructionsAfter); 335 insertInstructionsAfter.clear(); 336 } 337 // insert before 338 if (insertInstructionsBefore.size() != 0) { 339 assert !(inst instanceof LabelOp) : "Cannot insert instruction before the label op: " + inst; 340 currentInstructions.addAll(currentInstructionIndex, insertInstructionsBefore); 341 insertInstructionsBefore.clear(); 342 } 343 } 344 345 @SuppressWarnings("try") 346 private void allocateTrace(Trace trace) { 347 try (Scope s = Debug.scope("BottomUpAllocator", trace.getBlocks()); Indent indent = Debug.logAndIndent("%s (Trace%d)", trace, trace.getId())) { 348 AbstractBlockBase<?>[] blocks = trace.getBlocks(); 349 int lastBlockIdx = blocks.length - 1; 350 AbstractBlockBase<?> successorBlock = blocks[lastBlockIdx]; 351 // handle last block 352 allocateBlock(successorBlock); 353 // handle remaining blocks 354 for (int i = lastBlockIdx - 1; i >= 0; i--) { 355 AbstractBlockBase<?> block = blocks[i]; 356 // handle PHIs 357 resolvePhis(block, successorBlock); 358 boolean needResolution = allocateBlock(block); 359 360 if (needResolution) { 361 // resolve local data flow 362 resolveIntraTraceEdge(block, successorBlock); 363 } 364 successorBlock = block; 365 } 366 resolveLoopBackEdge(trace); 367 } catch (Throwable e) { 368 throw Debug.handle(e); 369 } 370 } 371 372 private final ArrayList<LIRInstruction> phiResolutionMoves = new ArrayList<>(); 373 374 /** 375 * Resolve phi values, i.e., set the current location of values in the predecessors block 376 * (which is not yet allocated) to the location of the variable defined by the phi in the 377 * successor (which is already allocated). For constant inputs we insert moves. 378 */ 379 private void resolvePhis(AbstractBlockBase<?> from, AbstractBlockBase<?> to) { 380 if (SSAUtil.isMerge(to)) { 381 JumpOp jump = SSAUtil.phiOut(getLIR(), from); 382 LabelOp label = SSAUtil.phiIn(getLIR(), to); 383 384 assert phiResolutionMoves.isEmpty(); 385 386 for (int i = 0; i < label.getPhiSize(); i++) { 387 visitPhiValuePair(jump.getOutgoingValue(i), label.getIncomingValue(i)); 388 } 389 if (!phiResolutionMoves.isEmpty()) { 390 ArrayList<LIRInstruction> instructions = getLIR().getLIRforBlock(from); 391 instructions.addAll(instructions.size() - 1, phiResolutionMoves); 392 phiResolutionMoves.clear(); 393 } 394 } 395 } 396 397 private void visitPhiValuePair(Value phiOut, Value phiIn) { 398 assert isStackSlotValue(phiIn) || isRegister(phiIn) : "PHI defined values is not a register or stack slot: " + phiIn; 399 AllocatableValue in = asAllocatableValue(phiIn); 400 401 AllocatableValue dest = isRegister(in) ? getCurrentValue(asRegister(in)) : in; 402 final LIRInstruction move; 403 if (isConstantValue(phiOut)) { 404 // insert move from constant 405 move = spillMoveFactory.createLoad(dest, LIRValueUtil.asConstant(phiOut)); 406 } else { 407 assert isVariable(phiOut) : "Not a variable or constant: " + phiOut; 408 // insert move from variable 409 move = spillMoveFactory.createMove(dest, asVariable(phiOut)); 410 } 411 Debug.log("Inserting load %s", move); 412 phiResolutionMoves.add(move); 413 } 414 415 private boolean requireResolution; 416 417 /** 418 * Intra-trace edges, i.e., edge where both, the source and the target block are in the same 419 * trace, are either 420 * <ul> 421 * <li><em>immediate forward edges</em>, i.e., an edge from {@code i}th block of the trace 422 * to the {@code (i+1)}th block, or 423 * <li>a <em>loop back-edge</em> from the last block of the trace to the loop header. 424 * </ul> 425 * This property is guaranteed due to splitting of <em>critical edge</em>. 426 * 427 * Since forward edges are handled locally during bottom-up allocation we only need to check 428 * for the second case. 429 */ 430 private void resolveLoopBackEdge(Trace trace) { 431 AbstractBlockBase<?>[] blocks = trace.getBlocks(); 432 AbstractBlockBase<?> endBlock = blocks[blocks.length - 1]; 433 if (endBlock.isLoopEnd()) { 434 assert endBlock.getSuccessorCount() == 1; 435 AbstractBlockBase<?> targetBlock = endBlock.getSuccessors()[0]; 436 assert targetBlock.isLoopHeader() : String.format("Successor %s or loop end %s is not a loop header?", targetBlock, endBlock); 437 if (resultTraces.getTraceForBlock(targetBlock).equals(trace)) { 438 resolveLoopBackEdge(endBlock, targetBlock); 439 } 440 } 441 } 442 443 private void resolveLoopBackEdge(AbstractBlockBase<?> from, AbstractBlockBase<?> to) { 444 assert resultTraces.getTraceForBlock(from).equals(resultTraces.getTraceForBlock(to)) : "Not on the same trace? " + from + " -> " + to; 445 resolveFindInsertPos(from, to); 446 LIR lir = getLIR(); 447 448 if (SSAUtil.isMerge(to)) { 449 JumpOp blockEnd = SSAUtil.phiOut(lir, from); 450 LabelOp label = SSAUtil.phiIn(lir, to); 451 452 for (int i = 0; i < label.getPhiSize(); i++) { 453 Value incomingValue = label.getIncomingValue(i); 454 Value outgoingValue = blockEnd.getOutgoingValue(i); 455 resolveValuePair(incomingValue, outgoingValue); 456 } 457 } 458 resolveTraceEdge(from, to); 459 moveResolver.resolveAndAppendMoves(); 460 } 461 462 private void resolveIntraTraceEdge(AbstractBlockBase<?> from, AbstractBlockBase<?> to) { 463 assert resultTraces.getTraceForBlock(from).equals(resultTraces.getTraceForBlock(to)) : "Not on the same trace? " + from + " -> " + to; 464 resolveFindInsertPos(from, to); 465 resolveTraceEdge(from, to); 466 moveResolver.resolveAndAppendMoves(); 467 } 468 469 private void resolveTraceEdge(AbstractBlockBase<?> from, AbstractBlockBase<?> to) { 470 Value[] out = livenessInfo.getOutLocation(from); 471 Value[] in = livenessInfo.getInLocation(to); 472 473 assert out != null; 474 assert in != null; 475 assert out.length == in.length; 476 477 for (int i = 0; i < out.length; i++) { 478 Value incomingValue = in[i]; 479 Value outgoingValue = out[i]; 480 resolveValuePair(incomingValue, outgoingValue); 481 } 482 } 483 484 private void resolveValuePair(Value incomingValue, Value outgoingValue) { 485 if (!isIllegal(incomingValue) && !TraceGlobalMoveResolver.isMoveToSelf(outgoingValue, incomingValue)) { 486 TraceGlobalMoveResolutionPhase.addMapping(moveResolver, outgoingValue, incomingValue); 487 } 488 } 489 490 /** 491 * @return {@code true} if the block requires data-flow resolution. 492 */ 493 @SuppressWarnings("try") 494 private boolean allocateBlock(AbstractBlockBase<?> block) { 495 // might be set in insertSpillMoveAfter 496 requireResolution = false; 497 498 try (Indent indent = Debug.logAndIndent("handle block %s", block)) { 499 currentInstructions = getLIR().getLIRforBlock(block); 500 for (currentInstructionIndex = currentInstructions.size() - 1; currentInstructionIndex >= 0; currentInstructionIndex--) { 501 LIRInstruction inst = currentInstructions.get(currentInstructionIndex); 502 if (inst != null) { 503 inst.setId(currentOpId); 504 allocateInstruction(inst, block); 505 } 506 } 507 allocatedBlocks.set(block.getId()); 508 } 509 return requireResolution; 510 } 511 512 @SuppressWarnings("try") 513 private void allocateInstruction(LIRInstruction op, AbstractBlockBase<?> block) { 514 assert op != null && op.id() == currentOpId; 515 try (Indent indent = Debug.logAndIndent("handle inst: %d: %s", op.id(), op)) { 516 try (Indent indent1 = Debug.logAndIndent("output pos")) { 517 // spill caller saved registers 518 if (op.destroysCallerSavedRegisters()) { 519 spillCallerSavedRegisters(); 520 } 521 522 // fixed 523 op.forEachOutput(allocFixedRegisterProcedure); 524 op.forEachTemp(allocFixedRegisterProcedure); 525 op.forEachAlive(allocFixedRegisterProcedure); 526 // variable 527 op.forEachOutput(allocRegisterProcedure); 528 op.forEachTemp(allocRegisterProcedure); 529 op.forEachAlive(allocRegisterProcedure); 530 /* state do never require a register */ 531 // op.forEachState(allocRegisterProcedure); 532 533 // should have 534 op.forEachTemp(allocStackOrRegisterProcedure); 535 op.forEachOutput(allocStackOrRegisterProcedure); 536 if (op instanceof LabelOp) { 537 processIncoming(block, op); 538 } 539 } 540 try (Indent indent1 = Debug.logAndIndent("input pos")) { 541 542 currentOpId++; 543 544 // fixed 545 op.forEachInput(allocFixedRegisterProcedure); 546 // variable 547 op.forEachInput(allocRegisterProcedure); 548 549 op.forEachAlive(allocStackOrRegisterProcedure); 550 if (op instanceof BlockEndOp) { 551 processOutgoing(block, op); 552 } 553 op.forEachState(allocStackOrRegisterProcedure); 554 op.forEachInput(allocStackOrRegisterProcedure); 555 } 556 557 // insert spill/load instructions 558 insertInstructions(); 559 currentOpId++; 560 } 561 } 562 563 private void processIncoming(AbstractBlockBase<?> block, LIRInstruction instruction) { 564 int[] vars = livenessInfo.getBlockIn(block); 565 Value[] locs = new Value[vars.length]; 566 for (int i = 0; i < vars.length; i++) { 567 int varNum = vars[i]; 568 if (varNum >= 0) { 569 locs[i] = allocStackOrRegister(instruction, livenessInfo.getVariable(varNum), OperandMode.DEF, LabelOp.incomingFlags); 570 } 571 } 572 livenessInfo.setInLocations(block, locs); 573 } 574 575 private void processOutgoing(AbstractBlockBase<?> block, LIRInstruction instruction) { 576 int[] vars = livenessInfo.getBlockOut(block); 577 Value[] locs = new Value[vars.length]; 578 for (int i = 0; i < vars.length; i++) { 579 locs[i] = allocStackOrRegister(instruction, livenessInfo.getVariable(vars[i]), OperandMode.ALIVE, JumpOp.outgoingFlags); 580 } 581 livenessInfo.setOutLocations(block, locs); 582 } 583 584 private void spillCallerSavedRegisters() { 585 for (Register reg : callerSaveRegs) { 586 if (attributes(reg).isAllocatable()) { 587 evacuateRegisterAndSpill(reg); 588 assert checkRegisterUsage(reg); 589 setLastRegisterUsage(reg, currentOpId); 590 } 591 } 592 if (Debug.isLogEnabled()) { 593 Debug.log("operation destroys all caller-save registers"); 594 } 595 } 596 597 private final InstructionValueProcedure allocFixedRegisterProcedure = new InstructionValueProcedure() { 598 @Override 599 public Value doValue(LIRInstruction instruction, Value value, OperandMode mode, EnumSet<OperandFlag> flags) { 600 return allocFixedRegister(value); 601 } 602 }; 603 private final InstructionValueProcedure allocRegisterProcedure = new InstructionValueProcedure() { 604 @Override 605 public Value doValue(LIRInstruction instruction, Value value, OperandMode mode, EnumSet<OperandFlag> flags) { 606 return allocRegister(instruction, value, mode, flags); 607 } 608 }; |