1 /**
2 Copyright: Copyright (c) 2017-2019 Andrey Penechko.
3 License: $(WEB boost.org/LICENSE_1_0.txt, Boost License 1.0).
4 Authors: Andrey Penechko.
5 */
6 /// IR Builder. IR creation API
7 module vox.ir.ir_builder;
8 
9 import std.stdio;
10 import std.string : format;
11 
12 import vox.all;
13 import vox.ir.ir_index;
14 
15 //version = IrPrint;
16 
17 struct InstrWithResult
18 {
19 	IrIndex instruction;
20 	IrIndex result;
21 }
22 
23 /// Controls behavior of emitInstr functions
24 struct ExtraInstrArgs
25 {
26 	/// Gets copied into InstrHeader.op when IrInstrFlags.isGeneric is set
27 	IrOpcode opcode;
28 
29 	/// Gets copied into InstrHeader.cond when IrInstrFlags.hasCondition is set
30 	ubyte cond;
31 
32 	/// Always gets copied into InstrHeader.argSize
33 	IrArgSize argSize;
34 
35 	/// When true newly created instruction is added as a user of each argument
36 	bool addUsers = true;
37 
38 	/// Is checked when instruction has variadic result (IrInstrFlags.hasVariadicResult)
39 	/// If `hasResult` is false, no result is allocated and `result` value is ignored
40 	/// If `hasResult` is true, then `result` is checked:
41 	///    If `result` is defined:
42 	///       then instrHeader.result is set to its value
43 	///       or else a new virtual register is created
44 	bool hasResult;
45 
46 	/// Will be added to the number of allocated argument slots and to number of arguments
47 	ubyte extraArgSlots;
48 
49 	/// If instruction has variadic result, see docs on 'hasResult'
50 	/// If instruction always has result, then 'result' is used when defined
51 	///    when not defined, new virtual register is created
52 	/// If instruction always has no result, 'result' value is ignored
53 	IrIndex result;
54 
55 	/// When instruction has virtual regiter as result, result.type is set to 'type'
56 	/// Not set if 'result' is present
57 	IrIndex type;
58 }
59 
60 struct IrLabel
61 {
62 	/// If isAllocated
63 	///   blockIndex points to new block
64 	/// else
65 	///   If numPredecessors == 0, blockIndex points to currentBlock at
66 	//      scope start
67 	///   If numPredecessors == 1, blockIndex points to first predecessor
68 	/// If numPredecessors > 1, blockIndex points to a new block and isAllocated must be true
69 	IrIndex blockIndex;
70 	///
71 	bool isAllocated;
72 	///
73 	uint numPredecessors;
74 }
75 
76 struct BlockVarPair
77 {
78 	IrIndex blockId;
79 	IrIndex var;
80 
81 	void toString()(scope void delegate(const(char)[]) sink) const {
82 		import std.format : formattedWrite;
83 		sink.formattedWrite("(%s %s)", blockId, var);
84 	}
85 }
86 
87 
88 // papers:
89 // 1. Simple and Efficient Construction of Static Single Assignment Form
90 struct IrBuilder
91 {
92 	CompilationContext* context;
93 	IrFunction* ir;
94 
95 	// Stores current definition of variable per block during SSA-form IR construction.
96 	private HashMap!(BlockVarPair, IrIndex, BlockVarPair.init) blockVarDef;
97 
98 	private uint nextIrVarIndex;
99 
100 	private IrIndex returnVar;
101 	private uint numRemovedVregs;
102 
103 	void free() {
104 		blockVarDef.free(context.arrayArena);
105 	}
106 
107 	private void setPointers(IrFunction* ir, CompilationContext* context)
108 	{
109 		this.context = context;
110 		this.ir = ir;
111 
112 		ir.instrPtr = context.irStorage.instrHeaderBuffer.nextPtr;
113 		ir.instrPayloadPtr = context.irStorage.instrPayloadBuffer.nextPtr;
114 		ir.instrNextPtr = context.irStorage.instrNextBuffer.nextPtr;
115 		ir.instrPrevPtr = context.irStorage.instrPrevBuffer.nextPtr;
116 		ir.phiPtr = context.irStorage.phiBuffer.nextPtr;
117 		ir.arrayPtr = context.irStorage.arrayBuffer.nextPtr;
118 		ir.vregPtr = context.irStorage.vregBuffer.nextPtr;
119 		ir.basicBlockPtr = context.irStorage.basicBlockBuffer.nextPtr;
120 		ir.stackSlotPtr = context.irStorage.stackSlotBuffer.nextPtr;
121 	}
122 
123 	private void reset() {
124 		blockVarDef.clear();
125 		numRemovedVregs = 0;
126 		returnVar = IrIndex.init;
127 	}
128 
129 	/// Must be called before compilation of each function. Allows reusing temp buffers.
130 	/// Sets up entry and exit basic blocks.
131 	void begin(IrFunction* ir, CompilationContext* context)
132 	{
133 		setPointers(ir, context);
134 		reset();
135 
136 		setupEntryExitBlocks();
137 
138 		IrIndex returnType = context.types.getReturnType(ir.type, context);
139 		if (returnType.isTypeNoreturn)
140 		{
141 			addUnreachable(ir.exitBasicBlock);
142 		}
143 		else if (returnType.isTypeVoid)
144 		{
145 			emitInstr!(IrOpcode.ret)(ir.exitBasicBlock);
146 		}
147 		else
148 		{
149 			returnVar = newIrVarIndex(returnType);
150 			IrIndex retValue = readVariable(ir.exitBasicBlock, returnVar);
151 			emitInstr!(IrOpcode.ret_val)(ir.exitBasicBlock, retValue);
152 		}
153 		ir.getBlock(ir.exitBasicBlock).isFinished = true;
154 	}
155 
156 	/// Must be called before IR to LIR pass
157 	void beginLir(IrFunction* ir, IrFunction* oldIr, CompilationContext* c)
158 	{
159 		setPointers(ir, c);
160 		reset();
161 	}
162 
163 	/// Copies ir data to the end of IR buffer, to allow for modification
164 	void beginDup(IrFunction* ir, CompilationContext* context) {
165 		this.context = context;
166 		this.ir = ir;
167 
168 		dupIrStorage(ir, context);
169 		reset();
170 	}
171 
172 	/// perfoms GC of removed entities
173 	void finalizeIr() {
174 		version(IrPrint) writefln("[IR] finalizeIr removed %s vreg", numRemovedVregs);
175 
176 		// --------------- GC REMOVED VREGS ---------------
177 		IrIndex lastUsedReg = ir.lastVirtReg;
178 		IrIndex firstRemovedReg = ir.firstVirtReg;
179 
180 		// if zero registers exists, this must not be called
181 		// if only removed registers exist, then `lastUsedReg` becomes null
182 		// otherwise it's last used register
183 		void updateLastUsedReg() {
184 			if (lastUsedReg.isUndefined) return; // already no used regs left
185 
186 			while(ir.getVirtReg(lastUsedReg).isRemoved)
187 			{
188 				if (lastUsedReg.storageUintIndex == 0) {
189 					// we reached the start of array of vregs. No used regs can be found anymore
190 					lastUsedReg = IrIndex();
191 					return;
192 				}
193 				// get prev register
194 				lastUsedReg.storageUintIndex = lastUsedReg.storageUintIndex - 1;
195 			}
196 			// `lastUsedReg` is not removed
197 		}
198 
199 		// called once per number of removed regs
200 		void updateFirstRemovedReg() {
201 			while(!ir.getVirtReg(firstRemovedReg).isRemoved)
202 			{
203 				// get next register
204 				firstRemovedReg.storageUintIndex = firstRemovedReg.storageUintIndex + 1;
205 			}
206 		}
207 
208 		uint numProcessedVregs = 0;
209 
210 		// max loop iterations == min(numUsedRegs, numRemovedVregs)
211 		// Actual time complexity is O(numVirtualRegisters)
212 		// 0 times in best case, were all removed regs are already at the end
213 		while (numProcessedVregs < numRemovedVregs)
214 		{
215 			updateLastUsedReg;
216 
217 			// no used regs left
218 			if (lastUsedReg.isUndefined) break;
219 
220 			updateFirstRemovedReg;
221 
222 			// all removed regs are already at the end
223 			if (firstRemovedReg.storageUintIndex > lastUsedReg.storageUintIndex) break;
224 
225 			// move last used reg into the place of first removed register
226 			moveVreg(lastUsedReg, firstRemovedReg);
227 			// mark as removed for updateLastUsedReg
228 			ir.getVirtReg(lastUsedReg).type = lastUsedReg;
229 
230 			++numProcessedVregs;
231 		}
232 
233 		// all removed regs were moved to the end of array
234 		ir.numVirtualRegisters -= numRemovedVregs;
235 		//writefln("remove %s %s, %s", ir.numVirtualRegisters, context.irStorage.vregBuffer.length, numRemovedVregs);
236 		context.irStorage.vregBuffer.unput(numRemovedVregs);
237 	}
238 
239 	void setupEntryExitBlocks()
240 	{
241 		assert(ir.numBasicBlocks == 0);
242 		// Canonical function CFG has entry block, and single exit block.
243 		appendBasicBlockSlot; // entry block at index 0
244 		appendBasicBlockSlot; // exit block at index 1
245 
246 		ir.getBlock(ir.entryBasicBlock).nextBlock = ir.exitBasicBlock;
247 		sealBlock(ir.entryBasicBlock);
248 		ir.getBlock(ir.exitBasicBlock).prevBlock = ir.entryBasicBlock;
249 	}
250 
251 	// memory is not initialized
252 	void appendInstructionSlots(uint numSlots) {
253 		ir.numInstructions += numSlots;
254 		context.irStorage.instrHeaderBuffer.voidPut(numSlots);
255 		context.irStorage.instrNextBuffer.voidPut(numSlots);
256 		context.irStorage.instrPrevBuffer.voidPut(numSlots);
257 	}
258 
259 	// memory is not initialized
260 	// slots for instruction result and arguments
261 	void appendPayloadSlots(uint numSlots) {
262 		ir.numPayloadSlots += numSlots;
263 		context.irStorage.instrPayloadBuffer.voidPut(numSlots);
264 	}
265 
266 	// memory is initialized
267 	IrIndex appendPhiSlot() {
268 		IrIndex result = IrIndex(ir.numPhis, IrValueKind.phi);
269 		ir.numPhis += 1;
270 		context.irStorage.phiBuffer.put(IrPhi());
271 		return result;
272 	}
273 
274 	// memory is not initialized
275 	IrIndex appendVirtRegSlot() {
276 		IrIndex result = IrIndex(ir.numVirtualRegisters, IrValueKind.virtualRegister);
277 		ir.numVirtualRegisters += 1;
278 		context.irStorage.vregBuffer.voidPut(1);
279 		//writefln("add %s %s", ir.numVirtualRegisters, context.irStorage.vregBuffer.length);
280 		return result;
281 	}
282 
283 	// memory is initialized
284 	IrIndex appendBasicBlockSlot() {
285 		IrIndex result = IrIndex(ir.numBasicBlocks, IrValueKind.basicBlock);
286 		ir.numBasicBlocks += 1;
287 		context.irStorage.basicBlockBuffer.put(IrBasicBlock());
288 		return result;
289 	}
290 
291 	// memory is initialized
292 	IrIndex appendStackSlot(IrIndex type, SizeAndAlignment sizealign, StackSlotKind kind) {
293 		IrIndex result = IrIndex(ir.numStackSlots, IrValueKind.stackSlot);
294 		ir.numStackSlots += 1;
295 		StackSlot slot = StackSlot(sizealign, kind);
296 		slot.type = context.types.appendPtr(type);
297 
298 		context.assertf(slot.sizealign.size % slot.sizealign.alignment == 0, "size is not multiple of alignment (%s)", slot.sizealign);
299 		context.assertf(slot.sizealign.alignmentPower <= 4, "Big alignments (> 16) aren't implemented");
300 
301 		context.irStorage.stackSlotBuffer.put(slot);
302 		return result;
303 	}
304 
305 	IrIndex allocateIrArray(uint capacity)
306 	{
307 		IrIndex result = IrIndex(ir.arrayLength, IrValueKind.array);
308 
309 		context.irStorage.arrayBuffer.voidPut(capacity);
310 		ir.arrayLength += capacity;
311 
312 		return result;
313 	}
314 
315 	// Returns true if array can be extended in-place. If successful double the capacity
316 	bool tryExtendArray(IrIndex offset, uint capacity)
317 	{
318 		if (offset.storageUintIndex + capacity == ir.arrayLength)
319 		{
320 			context.irStorage.arrayBuffer.voidPut(capacity);
321 			ir.arrayLength += capacity;
322 			return true;
323 		}
324 		return false;
325 	}
326 
327 	/// Adds control-flow edge pointing `fromBlock` -> `toBlock`.
328 	void addBlockTarget(IrIndex fromBasicBlockIndex, IrIndex toBasicBlockIndex) {
329 		ir.getBlock(fromBasicBlockIndex).successors.append(&this, toBasicBlockIndex);
330 		ir.getBlock(toBasicBlockIndex).predecessors.append(&this, fromBasicBlockIndex);
331 		context.assertf(!ir.getBlock(toBasicBlockIndex).isSealed, "Cannot add block target %s -> %s. %s is sealed",
332 			fromBasicBlockIndex, toBasicBlockIndex, toBasicBlockIndex);
333 	}
334 
335 	/// Creates new block and inserts it after lastBasicBlock and sets lastBasicBlock
336 	IrIndex addBasicBlock() {
337 		assert(ir.lastBasicBlock.isDefined);
338 		IrIndex lastBasicBlock = ir.lastBasicBlock;
339 		IrIndex newBlock = appendBasicBlockSlot;
340 		ir.getBlock(newBlock).nextBlock = ir.exitBasicBlock;
341 		ir.getBlock(newBlock).prevBlock = lastBasicBlock;
342 		ir.getBlock(ir.exitBasicBlock).prevBlock = newBlock;
343 		ir.getBlock(lastBasicBlock).nextBlock = newBlock;
344 		return newBlock;
345 	}
346 
347 	// Algorithm 4: Handling incomplete CFGs
348 	/// Basic block is sealed if no further predecessors will be added to the block.
349 	/// Sealed block is not necessarily filled.
350 	/// Ignores already sealed blocks.
351 	/// `force` ignores IrBasicBlock.preventSeal
352 	void sealBlock(IrIndex basicBlockToSeal, bool force = false) {
353 		//dumpFunction(context, ir, "IR gen(Seal block)");
354 		version(IrPrint) writefln("[IR] seal %s", basicBlockToSeal);
355 
356 		IrBasicBlock* bb = ir.getBlock(basicBlockToSeal);
357 		if (bb.isSealed) return;
358 		if (bb.preventSeal && !force) return;
359 
360 		// all phis added to this block are incomplete and need to get their arguments
361 		foreach(IrIndex phiIndex, ref IrPhi phi; bb.phis(ir)) {
362 			addPhiOperands(basicBlockToSeal, phi.var, phiIndex);
363 		}
364 
365 		bb.isSealed = true;
366 	}
367 
368 	/// Allocates new variable id for this function. It should be bound to a variable
369 	/// and used with writeVariable, readVariable functions
370 	IrIndex newIrVarIndex(IrIndex varType) {
371 		IrIndex varId = context.appendTemp!IrVariableInfo;
372 		context.getTemp!IrVariableInfo(varId).type = varType;
373 		return varId;
374 	}
375 
376 	private IrIndex getVarType(IrIndex varId) {
377 		return context.getTemp!IrVariableInfo(varId).type;
378 	}
379 
380 	// Algorithm 1: Implementation of local value numbering
381 	/// Redefines `variable` with `value`. Is used for assignment to variable
382 	void writeVariable(IrIndex blockIndex, IrIndex var, IrIndex value) {
383 		context.assertf(var.kind == IrValueKind.variable, "Variable kind is %s", var.kind);
384 		context.assertf(
385 			value.kind == IrValueKind.func ||
386 			value.kind == IrValueKind.constant ||
387 			value.kind == IrValueKind.constantAggregate ||
388 			value.kind == IrValueKind.constantZero ||
389 			value.kind == IrValueKind.global ||
390 			value.kind == IrValueKind.virtualRegister ||
391 			value.kind == IrValueKind.physicalRegister ||
392 			value.kind == IrValueKind.stackSlot,
393 			"writeVariable(block %s, variable %s, value %s)",
394 			blockIndex, var, value);
395 
396 		version(IrPrint) writefln("[IR]  blockVarDef[%s %s] <- %s", blockIndex, var, value);
397 		blockVarDef.put(context.arrayArena, BlockVarPair(blockIndex, var), value);
398 	}
399 
400 	/// Returns the value that currently defines `var` within `blockIndex`
401 	IrIndex readVariable(IrIndex blockIndex, IrIndex var)
402 	//out (r) {
403 	//	writefln("readVariable %s %s %s", r, blockIndex, var);
404 	//}
405 	//do
406 	{
407 		context.assertf(var.kind == IrValueKind.variable, "Variable kind is %s", var.kind);
408 		if (auto irRef = BlockVarPair(blockIndex, var) in blockVarDef)
409 			return *irRef;
410 		return readVariableRecursive(blockIndex, var);
411 	}
412 
413 	/// Puts `user` into a list of users of `used` value
414 	void addUser(IrIndex user, IrIndex used) {
415 		//if (!used.isDefined) dumpFunction(context, ir, "IR gen(addUser)");
416 		//writefln("addUser %s %s", used, user);
417 		context.assertf(user.isDefined && used.isDefined, "%s addUser(%s, %s)",
418 			context.idString(ir.name), user, used);
419 		final switch (used.kind) with(IrValueKind) {
420 			case none: context.internal_error("addUser %s %s", user, used);
421 			case array: assert(false, "addUser array");
422 			case instruction: assert(false, "addUser instruction");
423 			case basicBlock: break; // allowed. As argument of jmp jcc
424 			case constant: break; // allowed, noop
425 			case constantAggregate: break;
426 			case constantZero: break;
427 			case global:
428 				context.globals.get(used).addUser(user);
429 				break;
430 			case phi: assert(false, "addUser phi"); // must be virt reg instead
431 			case stackSlot: break; // allowed, noop
432 			case virtualRegister:
433 				ir.getVirtReg(used).users.put(&this, user);
434 				break;
435 			case physicalRegister: break; // allowed, noop
436 			case type: break; // allowed, noop (no user tracking)
437 			case variable: assert(false, "addUser variable");
438 			case func: break; // allowed, noop (no user tracking)
439 		}
440 	}
441 
442 	/// Returns InstrWithResult (if instr has result) or IrIndex instruction otherwise
443 	/// Always returns InstrWithResult when instruction has variadic result
444 	///   in this case result can be null if no result is requested
445 	/// Inserts instruction at the end of blockIndex
446 	/// See: ExtraInstrArgs
447 	auto emitInstr(alias I)(IrIndex blockIndex, IrIndex[] args ...)
448 	{
449 		// TODO assert if I requires ExtraInstrArgs data
450 		return emitInstr!I(blockIndex, ExtraInstrArgs(), args);
451 	}
452 
453 	/// ditto
454 	auto emitInstr(alias I)(IrIndex blockIndex, ExtraInstrArgs extra, IrIndex[] args ...)
455 	{
456 		static if (getInstrInfo!I.mayHaveResult) {
457 			InstrWithResult result = emitInstr!I(extra, args);
458 			appendBlockInstr(blockIndex, result.instruction);
459 			return result;
460 		} else {
461 			IrIndex result = emitInstr!I(extra, args);
462 			appendBlockInstr(blockIndex, result);
463 			return result;
464 		}
465 	}
466 
467 	/// ditto, but inserts before instruction
468 	auto emitInstrBefore(alias I)(IrIndex instrBefore, ExtraInstrArgs extra, IrIndex[] args ...)
469 	{
470 		static if (getInstrInfo!I.mayHaveResult) {
471 			InstrWithResult result = emitInstr!I(extra, args);
472 			insertBeforeInstr(instrBefore, result.instruction);
473 			return result;
474 		} else {
475 			IrIndex result = emitInstr!I(extra, args);
476 			insertBeforeInstr(instrBefore, result);
477 			return result;
478 		}
479 	}
480 
481 	/// ditto, but inserts after instruction instead of block
482 	auto emitInstrAfter(alias I)(IrIndex instrAfter, ExtraInstrArgs extra, IrIndex[] args ...)
483 	{
484 		static if (getInstrInfo!I.mayHaveResult) {
485 			InstrWithResult result = emitInstr!I(extra, args);
486 			insertAfterInstr(instrAfter, result.instruction);
487 			return result;
488 		} else {
489 			IrIndex result = emitInstr!I(extra, args);
490 			insertAfterInstr(instrAfter, result);
491 			return result;
492 		}
493 	}
494 
495 	/// ditto
496 	/// Only creates instruction, doesn't add to basic block
497 	auto emitInstr(alias I)(ExtraInstrArgs extra, IrIndex[] args ...)
498 	{
499 		IrIndex instr = IrIndex(ir.numInstructions, IrValueKind.instruction);
500 		appendInstructionSlots(1);
501 
502 		IrInstrHeader* instrHeader = ir.getInstr(instr);
503 		*instrHeader = IrInstrHeader.init;
504 
505 		enum iinfo = getInstrInfo!I;
506 
507 		// opcode
508 		static if (iinfo.isGeneric)
509 			instrHeader.op = extra.opcode;
510 		else
511 			instrHeader.op = I;
512 
513 		instrHeader.argSize = extra.argSize;
514 
515 		// payload offset must points to first argument
516 		instrHeader._payloadOffset = ir.numPayloadSlots;
517 
518 		// result
519 		static if (iinfo.hasVariadicResult) {
520 			if (extra.hasResult) {
521 				appendPayloadSlots(1);
522 				instrHeader.hasResult = true;
523 			} else {
524 				instrHeader.hasResult = false;
525 			}
526 		} else static if (iinfo.hasResult) {
527 			appendPayloadSlots(1);
528 			instrHeader.hasResult = true;
529 		} else {
530 			instrHeader.hasResult = false;
531 		}
532 
533 		// set result
534 		static if (iinfo.mayHaveResult)
535 		{
536 			if (instrHeader.hasResult)
537 			{
538 				// advance pointer to point to arguments
539 				++instrHeader._payloadOffset;
540 
541 				if (extra.result.isDefined) {
542 					instrHeader.result(ir) = extra.result;
543 					// fix definition
544 					if (extra.result.isVirtReg) {
545 						IrVirtualRegister* virtReg = ir.getVirtReg(extra.result);
546 						virtReg.definition = instr;
547 					}
548 				} else {
549 					assert(extra.type.isType, format("Invalid extra.type (%s)", extra.type));
550 					instrHeader.result(ir) = addVirtualRegister(instr, extra.type);
551 				}
552 			}
553 		}
554 
555 		// condition
556 		static if (iinfo.hasCondition) {
557 			instrHeader.cond = extra.cond;
558 		}
559 
560 		ubyte numArgs = cast(typeof(instrHeader.numArgs))args.length;
561 		ubyte numArgSlots = cast(typeof(instrHeader.numArgs))(numArgs + extra.extraArgSlots);
562 		instrHeader.numArgs = numArgs;
563 
564 		// arguments checks
565 		static if (iinfo.hasVariadicArgs)
566 		{
567 			context.assertf(args.length <= IrInstrHeader.numArgs.max,
568 				"Too many arguments (%s), max is %s",
569 				args.length,
570 				IrInstrHeader.numArgs.max);
571 
572 			context.assertf(args.length >= iinfo.numArgs,
573 				"Instruction %s requires at least %s arguments, while passed %s",
574 				I.stringof,
575 				iinfo.numArgs,
576 				args.length);
577 		}
578 		else
579 		{
580 			context.assertf(iinfo.numArgs == args.length,
581 				"Instruction %s requires %s args, while passed %s",
582 				I.stringof, iinfo.numArgs, args.length);
583 		}
584 
585 		// allocate argument slots and hidden args after optional result
586 		appendPayloadSlots(numArgSlots + iinfo.numHiddenArgs);
587 
588 		// set arguments
589 		instrHeader.args(ir)[] = args;
590 
591 		// Instruction uses its arguments
592 		if (extra.addUsers) {
593 			foreach(IrIndex arg; args) {
594 				addUser(instr, arg);
595 			}
596 		}
597 
598 		// register extra slots. They are not considered above
599 		instrHeader.numArgs = numArgSlots;
600 
601 		static if (iinfo.mayHaveResult)
602 		{
603 			if (instrHeader.hasResult)
604 				return InstrWithResult(instr, instrHeader.result(ir));
605 			else
606 				return InstrWithResult(instr, IrIndex());
607 		} else {
608 			return instr;
609 		}
610 	}
611 
612 	/// Adds instruction to the end of basic block
613 	/// Doesn't set any instruction info except prevInstr, nextInstr index
614 	void appendBlockInstr(IrIndex blockIndex, IrIndex instr)
615 	{
616 		IrBasicBlock* block = ir.getBlock(blockIndex);
617 
618 		ir.nextInstr(instr) = blockIndex;
619 
620 		if (block.firstInstr.isDefined) {
621 			// points to prev instruction
622 			ir.prevInstr(instr) = block.lastInstr;
623 			ir.nextInstr(block.lastInstr) = instr;
624 			block.lastInstr = instr;
625 		} else {
626 			ir.prevInstr(instr) = blockIndex;
627 			block.firstInstr = instr;
628 			block.lastInstr = instr;
629 		}
630 	}
631 
632 	/// Adds instruction to the start of basic block
633 	/// Doesn't set any instruction info except prevInstr, nextInstr index
634 	void prependBlockInstr(IrIndex blockIndex, IrIndex instr)
635 	{
636 		IrBasicBlock* block = ir.getBlock(blockIndex);
637 
638 		ir.prevInstr(instr) = blockIndex;
639 
640 		if (block.lastInstr.isDefined) {
641 			// points to next instruction
642 			ir.nextInstr(instr) = block.firstInstr;
643 			ir.prevInstr(block.firstInstr) = instr;
644 			block.firstInstr = instr;
645 		} else {
646 			ir.nextInstr(instr) = blockIndex;
647 			block.lastInstr = instr;
648 			block.firstInstr = instr;
649 		}
650 	}
651 
652 	/// Inserts 'instr' after 'afterInstr'
653 	void insertAfterInstr(IrIndex afterInstr, IrIndex instr)
654 	{
655 		ir.prevInstr(instr) = afterInstr;
656 		ir.nextInstr(instr) = ir.nextInstr(afterInstr);
657 
658 		if (ir.nextInstr(afterInstr).isBasicBlock) {
659 			// 'afterInstr' is the last instr in the block
660 			ir.getBlock(ir.nextInstr(afterInstr)).lastInstr = instr;
661 		} else {
662 			// There must be instr after 'afterInstr'
663 			ir.prevInstr(ir.nextInstr(afterInstr)) = instr;
664 		}
665 		ir.nextInstr(afterInstr) = instr;
666 	}
667 
668 	/// Inserts 'instr' before lastInstr of basic block 'blockIndex'
669 	void insertBeforeLastInstr(IrIndex blockIndex, IrIndex instr)
670 	{
671 		IrBasicBlock* block = ir.getBlock(blockIndex);
672 		if (block.lastInstr.isDefined) {
673 			insertBeforeInstr(block.lastInstr, instr);
674 		} else {
675 			appendBlockInstr(blockIndex, instr);
676 		}
677 	}
678 
679 	/// Inserts 'instr' before 'beforeInstr'
680 	void insertBeforeInstr(IrIndex beforeInstr, IrIndex instr)
681 	{
682 		IrInstrHeader* beforeInstrHeader = ir.getInstr(beforeInstr);
683 
684 		ir.nextInstr(instr) = beforeInstr;
685 		ir.prevInstr(instr) = ir.prevInstr(beforeInstr);
686 
687 		if (ir.prevInstr(beforeInstr).isBasicBlock) {
688 			// 'beforeInstr' is the first instr in the block
689 			ir.getBlock(ir.prevInstr(beforeInstr)).firstInstr = instr;
690 		} else {
691 			// There must be instr before 'beforeInstr'
692 			ir.nextInstr(ir.prevInstr(beforeInstr)) = instr;
693 		}
694 
695 		ir.prevInstr(beforeInstr) = instr;
696 	}
697 
698 	IrIndex addBinBranch(IrIndex blockIndex, IrBinaryCondition cond, IrArgSize argSize, IrIndex arg0, IrIndex arg1, ref IrLabel trueExit, ref IrLabel falseExit)
699 	{
700 		auto res = addBinBranch(blockIndex, cond, argSize, arg0, arg1);
701 		forceAllocLabelBlock(trueExit, 1);
702 		forceAllocLabelBlock(falseExit, 1);
703 		addBlockTarget(blockIndex, trueExit.blockIndex);
704 		addBlockTarget(blockIndex, falseExit.blockIndex);
705 		return res;
706 	}
707 
708 	IrIndex addBinBranch(IrIndex blockIndex, IrBinaryCondition cond, IrArgSize argSize, IrIndex arg0, IrIndex arg1)
709 	{
710 		IrBasicBlock* block = ir.getBlock(blockIndex);
711 		assert(!block.isFinished);
712 		block.isFinished = true;
713 		ExtraInstrArgs extra = { cond : cond, argSize : argSize };
714 		return emitInstr!(IrOpcode.branch_binary)(blockIndex, extra, arg0, arg1);
715 	}
716 
717 	IrIndex addUnaryBranch(IrIndex blockIndex, IrUnaryCondition cond, IrArgSize argSize, IrIndex arg0, ref IrLabel trueExit, ref IrLabel falseExit)
718 	{
719 		auto res = addUnaryBranch(blockIndex, cond, argSize, arg0);
720 		forceAllocLabelBlock(trueExit, 1);
721 		forceAllocLabelBlock(falseExit, 1);
722 		addBlockTarget(blockIndex, trueExit.blockIndex);
723 		addBlockTarget(blockIndex, falseExit.blockIndex);
724 		return res;
725 	}
726 
727 	IrIndex addUnaryBranch(IrIndex blockIndex, IrUnaryCondition cond, IrArgSize argSize, IrIndex arg0)
728 	{
729 		IrBasicBlock* block = ir.getBlock(blockIndex);
730 		assert(!block.isFinished);
731 		block.isFinished = true;
732 		ExtraInstrArgs extra = { cond : cond, argSize : argSize };
733 		return emitInstr!(IrOpcode.branch_unary)(blockIndex, extra, arg0);
734 	}
735 
736 	void addReturn(IrIndex blockIndex, IrIndex returnValue)
737 	{
738 		context.assertf(returnValue.isDefined, "addReturn %s", returnValue);
739 		IrIndex returnType = context.types.getReturnType(ir.type, context);
740 		context.assertf(!returnType.isTypeVoid, "Trying to return value from void function");
741 		writeVariable(blockIndex, returnVar, returnValue);
742 		addJump(blockIndex);
743 		addBlockTarget(blockIndex, ir.exitBasicBlock);
744 	}
745 
746 	void addReturn(IrIndex blockIndex)
747 	{
748 		IrIndex returnType = context.types.getReturnType(ir.type, context);
749 		context.assertf(returnType.isTypeVoid, "Trying to return void from non-void function");
750 		addJump(blockIndex);
751 		addBlockTarget(blockIndex, ir.exitBasicBlock);
752 	}
753 
754 	void addUnreachable(IrIndex blockIndex)
755 	{
756 		IrBasicBlock* block = ir.getBlock(blockIndex);
757 		context.assertf(!block.isFinished, "%s.%s is already finished", context.idString(ir.name), blockIndex);
758 		block.isFinished = true;
759 		emitInstr!(IrOpcode.unreachable)(blockIndex);
760 	}
761 
762 	IrIndex addJump(IrIndex blockIndex)
763 	{
764 		IrBasicBlock* block = ir.getBlock(blockIndex);
765 		context.assertf(!block.isFinished, "%s.%s is already finished", context.idString(ir.name), blockIndex);
766 		block.isFinished = true;
767 		return emitInstr!(IrOpcode.jump)(blockIndex);
768 	}
769 
770 	void addJumpToLabel(IrIndex blockIndex, ref IrLabel label)
771 	{
772 		if (label.isAllocated)
773 		{
774 			// label.blockIndex points to label's own block
775 			++label.numPredecessors;
776 			addBlockTarget(blockIndex, label.blockIndex);
777 			addJump(blockIndex);
778 		}
779 		else
780 		switch (label.numPredecessors)
781 		{
782 			case 0:
783 				// label.blockIndex points to block that started the scope
784 				// no block was created for label yet
785 				label.numPredecessors = 1;
786 				label.blockIndex = blockIndex;
787 				break;
788 			case 1:
789 				// label.blockIndex points to the only predecessor of label block
790 				// no block was created for label yet
791 				IrIndex firstPred = label.blockIndex;
792 				IrIndex secondPred = blockIndex;
793 
794 				IrIndex labelBlock = addBasicBlock;
795 
796 				addJump(firstPred);
797 				addJump(secondPred);
798 				addBlockTarget(firstPred, labelBlock);
799 				addBlockTarget(secondPred, labelBlock);
800 
801 				label.blockIndex = labelBlock;
802 				label.numPredecessors = 2;
803 				label.isAllocated = true;
804 				break;
805 			default:
806 				context.unreachable;
807 		}
808 	}
809 
810 	void forceAllocLabelBlock(ref IrLabel label, int newPredecessors = 0)
811 	{
812 		if (!label.isAllocated)
813 		{
814 			switch (label.numPredecessors)
815 			{
816 				case 0:
817 					// label.blockIndex points to block that started the scope
818 					// no block was created for label yet
819 					label.blockIndex = addBasicBlock;
820 					label.isAllocated = true;
821 					break;
822 				case 1:
823 					// label.blockIndex points to the only predecessor of label block
824 					// no block was created for label yet
825 					IrIndex firstPred = label.blockIndex;
826 					label.blockIndex = addBasicBlock;
827 					addBlockTarget(firstPred, label.blockIndex);
828 					addJump(firstPred);
829 					label.isAllocated = true;
830 					break;
831 				default:
832 					context.unreachable;
833 			}
834 		}
835 
836 		label.numPredecessors += newPredecessors;
837 	}
838 
839 	/// Creates virtual register to represent result of phi/instruction
840 	/// `definition` is phi/instruction that produces a value
841 	IrIndex addVirtualRegister(IrIndex definition, IrIndex type)
842 	{
843 		IrIndex virtRegIndex = appendVirtRegSlot();
844 
845 		assert(type.isType, format("Invalid type (%s)", type));
846 		*ir.getVirtReg(virtRegIndex) = IrVirtualRegister(definition, type);
847 
848 		return virtRegIndex;
849 	}
850 
851 	// Checks if already removed
852 	void removeVirtualRegister(IrIndex virtRegIndex)
853 	{
854 		version(IrPrint) writefln("[IR] remove vreg %s", virtRegIndex);
855 		if (!ir.getVirtReg(virtRegIndex).isRemoved)
856 			++numRemovedVregs;
857 		// note: removing register while blockVarDef table contains values of this register is difficult
858 		// postpone removal until the end of IR construction
859 		// also, this way we can return memory from removed registers to arena
860 		// we will do removal after IR construction in `finalizeIr`
861 		ir.getVirtReg(virtRegIndex).type = virtRegIndex; // mark as removed
862 	}
863 
864 	private void moveVreg(IrIndex fromSlot, IrIndex toSlot) {
865 		// redirect users
866 		redirectVregUsersTo(fromSlot, toSlot);
867 		// redirect definition (phi or instr)
868 		redirectVregDefinitionTo(fromSlot, toSlot);
869 		// move data
870 		version(IrPrint) writefln("[IR] moveVreg %s -> %s", fromSlot, toSlot);
871 		*ir.getVirtReg(toSlot) = *ir.getVirtReg(fromSlot);
872 	}
873 
874 	// Adds phi function to specified block
875 	IrIndex addPhi(IrIndex blockIndex, IrIndex type, IrIndex var)
876 	{
877 		IrIndex phiIndex = appendPhiSlot;
878 
879 		IrIndex vreg = addVirtualRegister(phiIndex, type);
880 		version(IrPrint) writefln("[IR] add %s %s", vreg, phiIndex);
881 		*ir.getPhi(phiIndex) = IrPhi(blockIndex, vreg, var);
882 		IrBasicBlock* block = ir.getBlock(blockIndex);
883 		if (block.firstPhi.isDefined) {
884 			ir.getPhi(block.firstPhi).prevPhi = phiIndex;
885 			ir.getPhi(phiIndex).nextPhi = block.firstPhi;
886 		}
887 		block.firstPhi = phiIndex;
888 		return phiIndex;
889 	}
890 
891 	// Algorithm 2: Implementation of global value numbering
892 	/// Returns the last value of the variable in basic block
893 	private IrIndex readVariableRecursive(IrIndex blockIndex, IrIndex variable) {
894 		IrIndex value;
895 		if (!ir.getBlock(blockIndex).isSealed) {
896 			// Incomplete CFG
897 			IrIndex phiIndex = addPhi(blockIndex, getVarType(variable), variable);
898 			value = ir.getPhi(phiIndex).result;
899 		}
900 		else
901 		{
902 			IrSmallArray preds = ir.getBlock(blockIndex).predecessors;
903 			if (preds.length == 1) {
904 				// Optimize the common case of one predecessor: No phi needed
905 				value = readVariable(preds[0, ir], variable);
906 			}
907 			else
908 			{
909 				// Break potential cycles with operandless phi
910 				IrIndex phiIndex = addPhi(blockIndex, getVarType(variable), variable);
911 				value = ir.getPhi(phiIndex).result;
912 				writeVariable(blockIndex, variable, value);
913 				value = addPhiOperands(blockIndex, variable, phiIndex);
914 			}
915 		}
916 		with(IrValueKind)
917 		{
918 			assert(
919 				value.kind == constant ||
920 				value.kind == constantZero ||
921 				value.kind == virtualRegister ||
922 				value.kind == physicalRegister, format("%s", value));
923 		}
924 		writeVariable(blockIndex, variable, value);
925 		return value;
926 	}
927 
928 	// Adds all values of variable as arguments of phi. Values are gathered from block's predecessors.
929 	// Returns either φ result virtual register or one of its arguments if φ is trivial
930 	private IrIndex addPhiOperands(IrIndex blockIndex, IrIndex variable, IrIndex phi)
931 	{
932 		version(IrPrint) writefln("[IR] addPhiOperands %s %s %s %s", blockIndex, variable, phi, ir.getPhi(phi).result);
933 		//dumpFunction(context, ir, "IR gen(addPhiOperands)");
934 		// Determine operands from predecessors
935 		foreach (i, IrIndex predIndex; ir.getBlock(blockIndex).predecessors.range(ir))
936 		{
937 			IrIndex value = readVariable(predIndex, variable);
938 			version(IrPrint) writefln("[IR] phi operand %s %s", predIndex, value);
939 			// Phi should not be cached before loop, since readVariable can add phi to phis, reallocating the array
940 			addPhiArg(phi, value);
941 			addUser(phi, value);
942 		}
943 		return tryRemoveTrivialPhi(phi);
944 	}
945 
946 	void addPhiArg(IrIndex phiIndex, IrIndex value)
947 	{
948 		IrPhi* phi = ir.getPhi(phiIndex);
949 		// since we are iterating predecessors in addPhiOperands, appending is correct
950 		phi.args.append(&this, value);
951 		// try to set phi's type if parameter is not a self reference
952 		if (value != phi.result)
953 		{
954 			IrVirtualRegister* resReg = ir.getVirtReg(phi.result);
955 			// type is already set. Check if types match
956 			if (resReg.type.isDefined)
957 			{
958 				// do not test here, because ir to lir pass will produce invalid values at first
959 				//context.assertf(resReg.type == argType,
960 				//	"Types of phi arguments must match %s %s != %s",
961 				//	value, blockIndex, resReg.type);
962 			}
963 			else
964 			{
965 				IrIndex argType = ir.getValueType(context, value);
966 				context.assertf(argType.isType, "Invalid type (%s) of %s", argType, value);
967 				resReg.type = argType;
968 			}
969 		}
970 	}
971 
972 	// Algorithm 3: Detect and recursively remove a trivial φ function
973 	// Returns either φ result virtual register or one of its arguments if φ is trivial
974 	private IrIndex tryRemoveTrivialPhi(IrIndex phiIndex) {
975 		// skip removed phi
976 		if (ir.getPhi(phiIndex).isRemoved) return IrIndex();
977 
978 		IrIndex same; // undefined
979 		IrIndex phiResultIndex = ir.getPhi(phiIndex).result;
980 		foreach (size_t i, ref IrIndex phiArg; ir.getPhi(phiIndex).args(ir))
981 		{
982 			version(IrPrint) writefln("[IR] arg %s", phiArg);
983 			if (phiArg == same || phiArg == phiResultIndex) {
984 				version(IrPrint) writefln("[IR]   same");
985 				continue; // Unique value or self−reference
986 			}
987 			if (same.isDefined) {
988 				version(IrPrint) writefln("[IR]   %s is non-trivial", phiIndex);
989 				return phiResultIndex; // The phi merges at least two values: not trivial
990 			}
991 			version(IrPrint) writefln("[IR]   same = %s", phiArg);
992 			same = phiArg;
993 		}
994 		version(IrPrint) writefln("[IR]   %s is trivial", phiIndex);
995 		assert(same.isDefined, "Phi function got no arguments");
996 
997 		// Remember all users except the phi itself
998 		assert(phiResultIndex.kind == IrValueKind.virtualRegister, format("%s", phiResultIndex));
999 
1000 		auto users = ir.getVirtReg(phiResultIndex).users;
1001 
1002 		// Reroute all uses of phi to same and remove phi
1003 		replaceBy(phiIndex, users, phiResultIndex, same);
1004 
1005 		// Update mapping from old phi result to same, since we may need to read
1006 		// this variable in later blocks, which will cause us to read removed phi
1007 		IrIndex maybePhiVar = ir.getPhi(phiIndex).var;
1008 		if (maybePhiVar.isDefined)
1009 		{
1010 			IrIndex blockIndex = ir.getPhi(phiIndex).blockIndex;
1011 			updatePhiVarDefs(blockIndex, maybePhiVar, phiResultIndex, same);
1012 		}
1013 
1014 		removePhi(context, ir, phiIndex);
1015 
1016 		// Try to recursively remove all phi users, which might have become trivial
1017 		foreach (index, uint numUses; users.range(ir))
1018 			if (index.kind == IrValueKind.phi && index != phiIndex)
1019 				tryRemoveTrivialPhi(index);
1020 
1021 		removeVirtualRegister(phiResultIndex);
1022 		return same;
1023 	}
1024 
1025 	private void updatePhiVarDefs(IrIndex blockIndex, IrIndex var, IrIndex oldValue, IrIndex newValue)
1026 	{
1027 		version(IrPrint) writefln("[IR]   updatePhiVarDefs %s %s %s: %s", blockIndex, var, newValue, blockVarDef);
1028 		if (auto val = BlockVarPair(blockIndex, var) in blockVarDef)
1029 		{
1030 			if (*val == oldValue)
1031 			{
1032 				version(IrPrint) writefln("[IR]   phi update blockVarDef %s %s %s -> %s", blockIndex, var, *val, newValue);
1033 				*val = newValue;
1034 
1035 				foreach (i, succIndex; ir.getBlock(blockIndex).successors.range(ir)) {
1036 					updatePhiVarDefs(succIndex, var, oldValue, newValue);
1037 				}
1038 			}
1039 		}
1040 		version(IrPrint) writefln("[IR]   updatePhiVarDefs %s %s %s: %s", blockIndex, var, newValue, blockVarDef);
1041 	}
1042 
1043 	IrIndex definitionOf(IrIndex someIndex)
1044 	{
1045 		final switch (someIndex.kind) with(IrValueKind) {
1046 			case none: assert(false);
1047 			case array: assert(false);
1048 			case instruction: return someIndex;
1049 			case basicBlock: assert(false);
1050 			case constant: assert(false);
1051 			case constantAggregate: assert(false);
1052 			case constantZero: assert(false);
1053 			case global: assert(false);
1054 			case phi: return someIndex;
1055 			case func: assert(false); // TODO
1056 			case stackSlot: assert(false); // TODO
1057 			case virtualRegister: return ir.getVirtReg(someIndex).definition;
1058 			case physicalRegister: assert(false);
1059 			case type: assert(false);
1060 			case variable: assert(false);
1061 		}
1062 	}
1063 
1064 	/// Replaces all 'vreg' uses with `redirectTo`
1065 	void redirectVregUsersTo(IrIndex vreg, IrIndex redirectTo) {
1066 		context.assertf(vreg.isVirtReg, "'vreg' must be virtual register, not %s", vreg.kind);
1067 		version(IrPrint) writefln("[IR] redirectVregUsersTo %s -> %s", vreg, redirectTo);
1068 
1069 		auto users = ir.getVirtReg(vreg).users;
1070 		foreach (IrIndex userIndex, uint numUses; users.range(ir))
1071 		{
1072 			switch (userIndex.kind) with(IrValueKind) {
1073 				case instruction:
1074 					foreach (ref IrIndex arg; ir.getInstr(userIndex).args(ir))
1075 						if (arg == vreg) {
1076 							arg = redirectTo;
1077 							addUser(userIndex, redirectTo);
1078 						}
1079 					break;
1080 				case phi:
1081 					foreach (size_t i, ref IrIndex phiArg; ir.getPhi(userIndex).args(ir))
1082 						if (phiArg == vreg) {
1083 							phiArg = redirectTo;
1084 							addUser(userIndex, redirectTo);
1085 						}
1086 					break;
1087 				default: assert(false);
1088 			}
1089 		}
1090 	}
1091 
1092 	/// Redirects `vreg` definition to point to `redirectTo`
1093 	void redirectVregDefinitionTo(IrIndex vreg, IrIndex redirectTo) {
1094 		IrIndex definition = ir.getVirtReg(vreg).definition;
1095 		//writefln("%s %s -> %s", definition, vreg, redirectTo);
1096 		switch (definition.kind) {
1097 			case IrValueKind.phi: ir.getPhi(definition).result = redirectTo; break;
1098 			case IrValueKind.instruction: ir.getInstr(definition).result(ir) = redirectTo; break;
1099 			default: context.internal_error("Invalid definition %s of %s", definition.kind, vreg);
1100 		}
1101 	}
1102 
1103 	// ditto
1104 	/// Rewrites all users of phi to point to `byWhat` instead of its result `what`.
1105 	/// `what` is the result of phi (vreg), `phiUsers` is users of `what`
1106 	private void replaceBy(IrIndex phiIndex, IrSmallSet phiUsers, IrIndex what, IrIndex byWhat) {
1107 		version(IrPrint) writefln("[IR]     replaceBy %s %s -> %s", phiIndex, what, byWhat);
1108 
1109 		foreach (IrIndex phiUserIndex, uint numUses; phiUsers.range(ir))
1110 		{
1111 			version(IrPrint) writefln("[IR]     user %s %s", i, phiUserIndex);
1112 
1113 			// skip self-reference (we will delete phi anyway)
1114 			if (phiUserIndex == phiIndex) continue;
1115 
1116 			final switch (phiUserIndex.kind) with(IrValueKind) {
1117 				case none: assert(false);
1118 				case array: assert(false);
1119 				case instruction:
1120 					foreach (ref IrIndex arg; ir.getInstr(phiUserIndex).args(ir))
1121 						if (arg == what)
1122 						{
1123 							arg = byWhat;
1124 							replaceUserWith(byWhat, phiIndex, phiUserIndex);
1125 						}
1126 					break;
1127 				case basicBlock: assert(false);
1128 				case constant, constantAggregate, constantZero: assert(false);
1129 				case global: assert(false);
1130 				case phi:
1131 					if (ir.getPhi(phiUserIndex).isRemoved) continue; // skip removed phi
1132 					foreach (size_t i, ref IrIndex phiArg; ir.getPhi(phiUserIndex).args(ir))
1133 					{
1134 						if (phiArg == what)
1135 						{
1136 							phiArg = byWhat;
1137 							replaceUserWith(byWhat, phiIndex, phiUserIndex);
1138 						}
1139 					}
1140 					break;
1141 				case stackSlot: assert(false); // TODO
1142 				case virtualRegister: assert(false);
1143 				case physicalRegister: assert(false);
1144 				case type: assert(false);
1145 				case variable: assert(false);
1146 				case func: assert(false);
1147 			}
1148 		}
1149 	}
1150 
1151 	// Replace a user 'what' that uses 'used' by 'byWhat' in a list of users inside 'what'
1152 	private void replaceUserWith(IrIndex used, IrIndex what, IrIndex byWhat) {
1153 		// If argument is used once, then user appears only once.
1154 		// When replacing users with phi users, replacement will occur only for first phi user.
1155 		// Other phi users will not find any users to replace.
1156 		// So add append users instead if no replacement was done.
1157 		void replaceVregUser(IrVirtualRegister* vreg) {
1158 			uint numReplaced = vreg.users.replace(ir, what, byWhat);
1159 			if (numReplaced == 0) vreg.users.put(&this, byWhat);
1160 		}
1161 		final switch (used.kind) with(IrValueKind) {
1162 			case none, array, basicBlock, physicalRegister: assert(false);
1163 			case instruction: return replaceVregUser(ir.getVirtReg(ir.getInstr(used).result(ir)));
1164 			case constant, constantAggregate, constantZero: return; // constants dont track individual users
1165 			case global: return; // globals dont track individual users
1166 			case phi: return replaceVregUser(ir.getVirtReg(ir.getPhi(used).result));
1167 			case stackSlot: assert(false); // TODO
1168 			case virtualRegister: return replaceVregUser(ir.getVirtReg(used));
1169 			case type: return; // no user tracking
1170 			case variable: assert(false);
1171 			case func: return; // no user tracking
1172 		}
1173 	}
1174 }