1 /// Copyright: Copyright (c) 2018-2019 Andrey Penechko. 2 /// License: $(WEB boost.org/LICENSE_1_0.txt, Boost License 1.0). 3 /// Authors: Andrey Penechko. 4 5 /// Given LIR produces a set of live intervals and "live in" bitmap 6 /// Implementation of: 7 /// "Linear Scan Register Allocation on SSA Form" 8 // TODO: backward propagation of hints 9 module vox.be.reg_alloc.liveness_analysis; 10 11 import vox.all; 12 13 /* 14 // buildIntervals 15 for each block b in reverse order do 16 live = union of successor.liveIn for each successor of b 17 18 for each phi function phi of successors of b do 19 live.add(phi.inputOf(b)) 20 21 for each opd in live do 22 intervals[opd].addRange(b.from, b.to) 23 24 for each operation op of b in reverse order do 25 for each output operand opd of op do 26 intervals[opd].setFrom(op.id) 27 live.remove(opd) 28 for each input operand opd of op do 29 intervals[opd].addRange(b.from, op.id) 30 live.add(opd) 31 // extension 32 if op requires two operand form and op is not commutative 33 we need to extend range of right-most opd by 1 34 35 for each phi function phi of b do 36 live.remove(phi.output) 37 38 if b is loop header then 39 loopEnd = last block of the loop starting at b 40 for each opd in live do 41 intervals[opd].addRange(b.from, loopEnd.to) 42 b.liveIn = live 43 */ 44 //version = LivePrint; 45 void pass_live_intervals(CompilationContext* context, ModuleDeclNode* mod, FunctionDeclNode* fun, LivenessInfo* liveness) 46 { 47 if (fun.isExternal) return; 48 49 IrFunction* lirData = context.getAst!IrFunction(fun.backendData.lirData); 50 lirData.orderBlocks; 51 lirData.assignSequentialBlockIndices; 52 //dumpFunction(context, lirData, "Live"); 53 pass_live_intervals_func(context, lirData, liveness); 54 55 if (context.printLiveIntervals && context.printDumpOf(fun)) 56 { 57 liveness.dump(context, lirData); 58 59 FuncDumpSettings set; 60 set.printLiveness = true; 61 set.printVregLiveness = true; 62 set.printPregLiveness = true; 63 set.printLivenessLinearIndex = true; 64 set.printBlockFlags = true; 65 66 IrDumpContext dumpCtx = { 67 context : context, 68 ir : lirData, 69 settings : &set, 70 liveness : liveness, 71 passName : "Live" 72 }; 73 dumpFunction(&dumpCtx); 74 } 75 } 76 77 void pass_live_intervals_func(CompilationContext* context, IrFunction* ir, LivenessInfo* liveness) 78 { 79 liveness.initStorage(context, ir); 80 liveness.assignSequentialIndices(context, ir); 81 liveness.setIntervalUsesLength(context, ir); 82 83 void liveAdd(IrIndex someOperand) 84 { 85 context.assertf(someOperand.isVirtReg, "not vreg, but %s", someOperand.kind); 86 version(LivePrint) writefln("[LIVE] liveAdd %s #%s", someOperand, someOperand.storageUintIndex); 87 liveness.bitmap.liveBuckets.setBitAt(someOperand.storageUintIndex); 88 } 89 90 void liveRemove(IrIndex someOperand) 91 { 92 context.assertf(someOperand.isVirtReg, "not vreg, but %s", someOperand.kind); 93 version(LivePrint) writefln("[LIVE] liveRemove %s #%s", someOperand, someOperand.storageUintIndex); 94 liveness.bitmap.liveBuckets.resetBitAt(someOperand.storageUintIndex); 95 } 96 97 // algorithm start 98 // for each block b in reverse order do 99 foreach (IrIndex blockIndex, ref IrBasicBlock block; ir.blocksReverse) 100 { 101 // Is also where phi functions are located 102 uint blockFromPos = liveness.linearIndices.basicBlock(blockIndex); 103 uint blockToPos = liveness.linearIndices.instr(block.lastInstr); 104 version(LivePrint) writefln("[LIVE] % 3s %s", blockFromPos, blockIndex); 105 106 // live = union of successor.liveIn for each successor of block 107 liveness.bitmap.liveBuckets[] = 0; 108 foreach (IrIndex succIndex; block.successors.range(ir)) 109 { 110 foreach (size_t i, size_t bucket; liveness.bitmap.blockLiveInBuckets(succIndex)) 111 liveness.bitmap.liveBuckets[i] |= bucket; 112 } 113 114 // for each phi function phi of successors of block do 115 // live.add(phi.inputOf(block)) 116 foreach (IrIndex succIndex; block.successors.range(ir)) 117 { 118 foreach (IrIndex phiIndex, ref IrPhi phi; ir.getBlock(succIndex).phis(ir)) 119 { 120 IrIndex[] phiPreds = ir.getBlock(phi.blockIndex).predecessors.data(ir); 121 foreach (i, ref IrIndex phiArg; phi.args(ir)) 122 { 123 if (phiPreds[i] == blockIndex) 124 { 125 if (phiArg.isVirtReg) 126 { 127 liveAdd(phiArg); 128 LiveInterval* it = liveness.vint(phiArg); 129 it.prependUse(UsePosition(blockToPos+2, UseKind.phi)); 130 } 131 } 132 } 133 } 134 } 135 136 //writef("in %s %s live:", blockIndex, liveness.linearIndices.basicBlock(blockIndex)); 137 //foreach (size_t index; liveness.bitmap.live.bitsSet) 138 // writef(" %s", index); 139 //writeln; 140 141 // for each opd in live do 142 foreach (size_t index; liveness.bitmap.liveBuckets.bitsSet) 143 { 144 // intervals[opd].addRange(block.from, block.to) 145 liveness.vint(index).addRange(context, blockFromPos, blockToPos+2); 146 version(LivePrint) writefln("[LIVE] addRange vreg.#%s [%s; %s)", index, 147 blockFromPos, blockToPos); 148 } 149 150 // for each operation op of b in reverse order do 151 foreach(IrIndex instrIndex, ref IrInstrHeader instrHeader; block.instructionsReverse(ir)) 152 { 153 IrIndex result = instrHeader.tryGetResult(ir); // nullable 154 IrIndex[] args = instrHeader.args(ir); 155 156 uint linearInstrIndex = liveness.linearIndices.instr(instrIndex); 157 version(LivePrint) writefln("[LIVE] % 3s %s", linearInstrIndex, instrIndex); 158 159 InstrInfo instrInfo = context.machineInfo.instrInfo[instrHeader.op]; 160 //writefln("isMov %s %s", cast(Amd64Opcode)instrHeader.op, instrInfo.isMov); 161 // -------------- Assign interval hints -------------- 162 if (instrInfo.isMov) { 163 IrIndex from = args[0]; 164 IrIndex to = result; 165 if (from.isPhysReg && to.isVirtReg) { 166 liveness.vint(to).storageHint = from; 167 } else if (from.isVirtReg && to.isPhysReg) { 168 liveness.vint(from).storageHint = to; 169 } 170 } 171 172 // for each output operand opd of op do 173 if (instrHeader.hasResult) { 174 if (result.isVirtReg) { 175 liveness.get(result).setFrom(context, linearInstrIndex); 176 LiveInterval* it = liveness.vint(result); 177 it.regClass = ir.getValueType(context, it.definition).isTypeFloat ? 1 : 0; 178 it.prependUse(UsePosition(linearInstrIndex, UseKind.instruction)); 179 liveRemove(result); 180 } else if (result.isPhysReg && !instrInfo.isMov) { 181 uint physRegResultOffset = linearInstrIndex+ENUM_STEP; 182 // needed to account for sub/add rsp instructions around call 183 if (instrHeader.extendFixedResultRange) 184 physRegResultOffset += ENUM_STEP; 185 // non-mov, extend fixed interval to the next instr (which must be mov from that phys reg) 186 liveness.pint(result).addRange(context, linearInstrIndex, physRegResultOffset); 187 } 188 } 189 190 // HANDLE IMPLICIT ARGS 191 // if non-mov instruction assigns to phys register, 192 // movs must follow instruction immediately matching the order of results 193 // if non-mov instruction accepts 1 or more phys registers, then 194 // it must be preceded by movs from vregs to pregs in matching order 195 // Example: 196 // eax = v.20 // args[0] 197 // ecx = v.45 // args[2] 198 // optional non-mov instruction (for call, which has extendFixedArgRange) 199 // edx, ecx = some_instr(eax, v.100, ecx) // edx is results[0] 200 // optional non-mov instruction (for call, which has extendFixedResultRange) 201 // v.200 = edx // results[0] (aka result) 202 // v.300 = ecx // results[1] 203 uint physRegArgsOffset = 0; 204 foreach(IrIndex arg; args) { 205 if (arg.isVirtReg) { 206 LiveInterval* it = liveness.vint(arg); 207 it.addRange(context, blockFromPos, linearInstrIndex); 208 it.prependUse(UsePosition(linearInstrIndex, UseKind.instruction)); 209 liveAdd(arg); 210 } 211 else if (arg.isPhysReg) { 212 physRegArgsOffset += ENUM_STEP; 213 } 214 } 215 216 if (physRegArgsOffset != 0) 217 { 218 // needed to account for sub/add rsp instructions around call 219 if (instrHeader.extendFixedArgRange) 220 physRegArgsOffset += ENUM_STEP; 221 } 222 223 // extension 224 // if op requires two operand form and op is not commutative and arg0 != arg1 225 // we need to extend range of right-most opd by 1 226 // see more info in register allocator 227 if (instrInfo.isResultInDst && instrHeader.numArgs == 2 && !instrInfo.isCommutative) 228 { 229 IrIndex arg0 = args[0]; 230 IrIndex arg1 = args[1]; 231 if ( !sameIndexOrPhysReg(arg0, arg1) ) { 232 if (arg1.isVirtReg || arg1.isPhysReg) { 233 liveness.get(arg1).addRange(context, blockFromPos, linearInstrIndex+1); 234 } 235 } 236 } 237 238 if (!instrInfo.isMov) 239 { 240 foreach(IrIndex arg; args) 241 { 242 if (arg.isPhysReg) 243 { 244 // non-mov, extend fixed interval to the preceding mov instr 245 liveness.pint(arg).addRange(context, linearInstrIndex - physRegArgsOffset, linearInstrIndex); 246 physRegArgsOffset -= ENUM_STEP; 247 } 248 } 249 } 250 251 // add fixed intervals for function calls 252 if (instrInfo.isCall) 253 { 254 IrIndex callee = args[0]; 255 CallConv* cc = context.types.getCalleeCallConv(callee, ir, context); 256 FullRegSet volatileRegs = cc.volatileRegs; 257 foreach(PhysReg reg; volatileRegs) { 258 liveness.pint(reg).addRange(context, linearInstrIndex, linearInstrIndex+1); 259 } 260 } 261 } 262 263 // for each phi function phi of b do 264 foreach(IrIndex phiIndex, ref IrPhi phi; block.phis(ir)) 265 { 266 // live.remove(phi.output) 267 if (phi.result.isVirtReg) { 268 liveRemove(phi.result); 269 liveness.get(phi.result).setFrom(context, blockFromPos); 270 LiveInterval* it = liveness.vint(phi.result); 271 it.regClass = ir.getValueType(context, it.definition).isTypeFloat ? 1 : 0; 272 } 273 } 274 275 // if b is loop header then 276 if (block.isLoopHeader) 277 { 278 // We need to find the loop block with the max position 279 // Use loop header as starting block in case it is in max position 280 uint maxPos = blockToPos+2; 281 IrIndex loopEnd = blockIndex; 282 // loopEnd = last block of the loop starting at b 283 foreach(IrIndex pred; block.predecessors.range(ir)) { 284 uint blockEndPos = liveness.linearIndices[ir.getBlock(pred).lastInstr]+2; 285 if (blockEndPos > maxPos) { 286 maxPos = blockEndPos; 287 loopEnd = pred; 288 } 289 } 290 291 if (loopEnd != blockIndex) // skip if header jumps to itself 292 for (IrIndex loopBlockIndex = block.nextBlock;;) 293 { 294 IrBasicBlock* loopBlock = ir.getBlock(loopBlockIndex); 295 size_t[] liveIns = liveness.bitmap.blockLiveInBuckets(loopBlockIndex); 296 297 // add live in of loop header to all blocks of the loop 298 liveIns[] |= liveness.bitmap.liveBuckets[]; 299 300 if (loopBlockIndex == loopEnd) break; 301 loopBlockIndex = loopBlock.nextBlock; 302 } 303 304 // for each opd in live do 305 // intervals[opd].addRange(b.from, loopEnd.to) 306 foreach (size_t index; liveness.bitmap.liveBuckets.bitsSet) 307 { 308 liveness.vint(index).addRange(context, blockFromPos, maxPos); 309 } 310 } 311 312 // b.liveIn = live 313 size_t[] liveIns = liveness.bitmap.blockLiveInBuckets(blockIndex); 314 liveIns[] = liveness.bitmap.liveBuckets; 315 } 316 317 // create ranges for parameter registers in start block 318 foreach(IrIndex instrIndex, ref IrInstrHeader instrHeader; ir.getBlock(ir.entryBasicBlock).instructions(ir)) 319 { 320 InstrInfo instrInfo = context.machineInfo.instrInfo[instrHeader.op]; 321 if (instrInfo.isMov) { 322 IrIndex from = instrHeader.arg(ir, 0); 323 if (from.isPhysReg) { 324 uint linearInstrIndex = liveness.linearIndices.instr(instrIndex); 325 liveness.pint(from).addRange(context, 1, linearInstrIndex); 326 } 327 IrIndex result = instrHeader.result(ir); 328 if (result.isVirtReg && from.isSomeReg) 329 { 330 liveness.vint(result).storageHint = from; 331 } 332 } 333 } 334 335 // Reset length from 0 to actual length 336 liveness.resetIntervalUsesLength(context, ir); 337 338 //if (context.validateIr) 339 //foreach(interval; liveness.intervals) { 340 // LiveRange prev; 341 // foreach(i, range; interval.ranges) { 342 // context.assertf(range.from < range.to, "Invalid range %s [%s, %s)", i, range.from, range.to); 343 // if (i == 0) continue; 344 // context.assertf(prev.to < range.from, "Ranges are not sequential %s [%s, %s) %s [%s, %s)", i-1, prev.from, prev.to, i, range.from, range.to); 345 // } 346 //} 347 }