1 /// Copyright: Copyright (c) 2017-2020 Andrey Penechko.
2 /// License: $(WEB boost.org/LICENSE_1_0.txt, Boost License 1.0).
3 /// Authors: Andrey Penechko.
4 
5 /// Virtual machine for IR interpretation
6 module vox.ir.ir_vm;
7 
8 import vox.all;
9 
10 /// Offset and length into CompilationContext.vmBuffer arena
11 struct IrVmSlotInfo
12 {
13 	uint offset;
14 	uint length;
15 }
16 
17 struct IrVm
18 {
19 	/// CompilationContext.vmBuffer arena holds slots
20 	/// Memory that holds stack slots and virtual registers
21 	CompilationContext* c;
22 	/// Function being interpreted
23 	IrFunction* func;
24 	/// Maps each vreg to frame slot. Relative to frameOffset
25 	/// extra item is inserted that points past last item
26 	uint* vmSlotOffsets;
27 	uint frameOffset;
28 	uint frameSize;
29 
30 	void pushFrame()
31 	{
32 		if (!func.vmSlotOffsets)
33 		{
34 			// allocate space for virt regs and stack slots
35 			func.vmSlotOffsets = c.allocateTempArray!uint(func.numVirtualRegisters+func.numStackSlots+1).ptr;
36 			uint offset = 0;
37 			func.vmSlotOffsets[0] = offset;
38 			//writefln("vreg %s %s", 0, offset);
39 			foreach(i; 0..func.numVirtualRegisters)
40 			{
41 				offset += c.types.typeSize(func.vregPtr[i].type);
42 				//writefln("vreg %s %s", i+1, offset);
43 				func.vmSlotOffsets[i+1] = offset;
44 			}
45 			foreach(i; 0..func.numStackSlots)
46 			{
47 				offset += c.types.typeSize(c.types.getPointerBaseType(func.stackSlotPtr[i].type));
48 				//writefln("slot %s %s", i+1, offset);
49 				func.vmSlotOffsets[func.numVirtualRegisters + i+1] = offset;
50 			}
51 			func.frameSize = offset;
52 		}
53 		vmSlotOffsets = func.vmSlotOffsets;
54 		frameSize = func.frameSize;
55 
56 		frameOffset = c.pushVmStack(frameSize).offset;
57 		//writefln("pushFrame %s %s %s", frameSize, frameOffset, c.vmBuffer.uintLength);
58 	}
59 
60 	void popFrame()
61 	{
62 		c.popVmStack(IrVmSlotInfo(frameOffset, frameSize));
63 	}
64 
65 	ParameterSlotIterator parameters() return {
66 		return ParameterSlotIterator(&this);
67 	}
68 
69 	IrVmSlotInfo vregSlot(IrIndex vreg) {
70 		assert(vreg.isVirtReg);
71 		uint from = frameOffset + vmSlotOffsets[vreg.storageUintIndex];
72 		uint to = frameOffset + vmSlotOffsets[vreg.storageUintIndex+1];
73 		return IrVmSlotInfo(from, to - from);
74 	}
75 
76 	IrVmSlotInfo stackSlotSlot(IrIndex slot) {
77 		assert(slot.isStackSlot);
78 		uint from = frameOffset + vmSlotOffsets[func.numVirtualRegisters + slot.storageUintIndex];
79 		uint to = frameOffset + vmSlotOffsets[func.numVirtualRegisters + slot.storageUintIndex+1];
80 		return IrVmSlotInfo(from, to - from);
81 	}
82 
83 	ubyte[] slotToSlice(IrVmSlotInfo slot)
84 	{
85 		return c.vmBuffer.bufPtr[slot.offset..slot.offset+slot.length];
86 	}
87 
88 	/// outputMem : Memory that holds the return value
89 	void run(IrVmSlotInfo outputMem)
90 	{
91 		++c.numCtfeRuns;
92 		IrIndex curBlock = func.entryBasicBlock;
93 		IrIndex prevBlock;
94 		IrBasicBlock* block = func.getBlock(curBlock);
95 
96 		block_loop:
97 		while (true)
98 		{
99 			if (prevBlock.isDefined) {
100 				uint predIndex = block.predecessors.findFirst(func, prevBlock);
101 				foreach(IrIndex phiIndex, ref IrPhi phi; block.phis(func))
102 				{
103 					IrIndex phiArg = phi.args[predIndex, func];
104 					copyToVreg(phi.result, phiArg);
105 				}
106 			}
107 
108 			instr_loop:
109 			foreach(IrIndex instrIndex, ref IrInstrHeader instrHeader; block.instructions(func))
110 			{
111 				switch(instrHeader.op)
112 				{
113 					case IrOpcode.parameter:
114 						uint paramIndex = func.getInstr(instrIndex).extraPayload(func, 1)[0].asUint;
115 						//writefln("param %s", paramIndex);
116 						break;
117 
118 					case IrOpcode.jump:
119 						prevBlock = curBlock;
120 						curBlock = block.successors[0, func];
121 						//writefln("jump %s -> %s", prevBlock, curBlock);
122 						block = func.getBlock(curBlock);
123 						break instr_loop;
124 
125 					case IrOpcode.ret_val:
126 						//writefln("ret %s", instrHeader.arg(func, 0));
127 						copyToMem(outputMem, instrHeader.arg(func, 0));
128 						break block_loop;
129 
130 					case IrOpcode.add:  binop!"+"(instrHeader); break;
131 					case IrOpcode.sub:  binop!"-"(instrHeader); break;
132 					case IrOpcode.and:  binop!"&"(instrHeader); break;
133 					case IrOpcode.or:   binop!"|"(instrHeader); break;
134 					case IrOpcode.xor:  binop!"^"(instrHeader); break;
135 					case IrOpcode.umul: binop!"*"(instrHeader); break;
136 					case IrOpcode.smul: binop!"*"(instrHeader); break;
137 					case IrOpcode.udiv: binop!"/"(instrHeader); break;
138 					case IrOpcode.sdiv: binop!"/"(instrHeader); break;
139 					case IrOpcode.shl:  binop!"<<"(instrHeader); break;
140 					case IrOpcode.lshr: binop!">>>"(instrHeader); break;
141 					case IrOpcode.ashr: binop!">>"(instrHeader); break;
142 
143 					case IrOpcode.not:
144 						long a = readValue(instrHeader.arg(func, 0)).i64;
145 						long result = !a;
146 						writeInt(instrHeader.result(func), result);
147 						break;
148 
149 					case IrOpcode.neg:
150 						long a = readValue(instrHeader.arg(func, 0)).i64;
151 						long result = -a;
152 						writeInt(instrHeader.result(func), result);
153 						break;
154 
155 					case IrOpcode.conv:
156 						long result = readValue(instrHeader.arg(func, 0)).i64;
157 						writeInt(instrHeader.result(func), result);
158 						break;
159 
160 					case IrOpcode.zext:
161 						long result = readValue(instrHeader.arg(func, 0)).i64;
162 						writeInt(instrHeader.result(func), result);
163 						break;
164 
165 					case IrOpcode.sext:
166 						long result = readValue(instrHeader.arg(func, 0)).i64;
167 						writeInt(instrHeader.result(func), result);
168 						break;
169 
170 					case IrOpcode.trunc:
171 						long result = readValue(instrHeader.arg(func, 0)).i64;
172 						writeInt(instrHeader.result(func), result);
173 						break;
174 
175 					case IrOpcode.store:
176 						IrVmSlotInfo memSlot = ptrToSlice(instrHeader.arg(func, 0));
177 						copyToMem(memSlot, instrHeader.arg(func, 1));
178 						break;
179 
180 					case IrOpcode.load:
181 						IrVmSlotInfo sourceSlot = ptrToSlice(instrHeader.arg(func, 0));
182 						IrVmSlotInfo resultSlot = vregSlot(instrHeader.result(func));
183 						slotToSlice(resultSlot)[] = slotToSlice(sourceSlot);
184 						break;
185 
186 					case IrOpcode.branch_binary:
187 						IrIndex type = getValueType(instrHeader.arg(func, 0), func, c);
188 						IrConstVal a = readValue(instrHeader.arg(func, 0));
189 						IrConstVal b = readValue(instrHeader.arg(func, 1));
190 						//writefln("br %s %s : %s %s", a, b, block.successors[0, func], block.successors[1, func]);
191 						bool result;
192 						final switch(cast(IrBinaryCondition)instrHeader.cond)
193 						{
194 							case IrBinaryCondition.eq:  result = a.u64 == b.u64; break;
195 							case IrBinaryCondition.ne:  result = a.u64 != b.u64; break;
196 
197 							case IrBinaryCondition.ugt:
198 								switch(type.basicType(c)) {
199 									case IrBasicType.i8:  result = a.u8  > b.u8;  break;
200 									case IrBasicType.i16: result = a.u16 > b.u16; break;
201 									case IrBasicType.i32: result = a.u32 > b.u32; break;
202 									case IrBasicType.i64: result = a.u64 > b.u64; break;
203 									default: c.internal_error("%s", type.basicType(c));
204 								}
205 								break;
206 							case IrBinaryCondition.uge:
207 								switch(type.basicType(c)) {
208 									case IrBasicType.i8:  result = a.u8  >= b.u8;  break;
209 									case IrBasicType.i16: result = a.u16 >= b.u16; break;
210 									case IrBasicType.i32: result = a.u32 >= b.u32; break;
211 									case IrBasicType.i64: result = a.u64 >= b.u64; break;
212 									default: c.internal_error("%s", type.basicType(c));
213 								}
214 								break;
215 							case IrBinaryCondition.ult:
216 								switch(type.basicType(c)) {
217 									case IrBasicType.i8:  result = a.u8  < b.u8;  break;
218 									case IrBasicType.i16: result = a.u16 < b.u16; break;
219 									case IrBasicType.i32: result = a.u32 < b.u32; break;
220 									case IrBasicType.i64: result = a.u64 < b.u64; break;
221 									default: c.internal_error("%s", type.basicType(c));
222 								}
223 								break;
224 							case IrBinaryCondition.ule:
225 								switch(type.basicType(c)) {
226 									case IrBasicType.i8:  result = a.u8  <= b.u8;  break;
227 									case IrBasicType.i16: result = a.u16 <= b.u16; break;
228 									case IrBasicType.i32: result = a.u32 <= b.u32; break;
229 									case IrBasicType.i64: result = a.u64 <= b.u64; break;
230 									default: c.internal_error("%s", type.basicType(c));
231 								}
232 								break;
233 
234 							case IrBinaryCondition.sgt:
235 								switch(type.basicType(c)) {
236 									case IrBasicType.i8:  result = a.i8  > b.i8;  break;
237 									case IrBasicType.i16: result = a.i16 > b.i16; break;
238 									case IrBasicType.i32: result = a.i32 > b.i32; break;
239 									case IrBasicType.i64: result = a.i64 > b.i64; break;
240 									default: c.internal_error("%s", type.basicType(c));
241 								}
242 								break;
243 							case IrBinaryCondition.sge:
244 								switch(type.basicType(c)) {
245 									case IrBasicType.i8:  result = a.i8  >= b.i8;  break;
246 									case IrBasicType.i16: result = a.i16 >= b.i16; break;
247 									case IrBasicType.i32: result = a.i32 >= b.i32; break;
248 									case IrBasicType.i64: result = a.i64 >= b.i64; break;
249 									default: c.internal_error("%s", type.basicType(c));
250 								}
251 								break;
252 							case IrBinaryCondition.slt:
253 								switch(type.basicType(c)) {
254 									case IrBasicType.i8:  result = a.i8  < b.i8;  break;
255 									case IrBasicType.i16: result = a.i16 < b.i16; break;
256 									case IrBasicType.i32: result = a.i32 < b.i32; break;
257 									case IrBasicType.i64: result = a.i64 < b.i64; break;
258 									default: c.internal_error("%s", type.basicType(c));
259 								}
260 								break;
261 							case IrBinaryCondition.sle:
262 								switch(type.basicType(c)) {
263 									case IrBasicType.i8:  result = a.i8  <= b.i8;  break;
264 									case IrBasicType.i16: result = a.i16 <= b.i16; break;
265 									case IrBasicType.i32: result = a.i32 <= b.i32; break;
266 									case IrBasicType.i64: result = a.i64 <= b.i64; break;
267 									default: c.internal_error("%s", type.basicType(c));
268 								}
269 								break;
270 
271 							case IrBinaryCondition.fgt:
272 								switch(type.basicType(c)) {
273 									case IrBasicType.f32: result = a.f32 > b.f32; break;
274 									case IrBasicType.f64: result = a.f64 > b.f64; break;
275 									default: c.internal_error("%s", type.basicType(c));
276 								}
277 								break;
278 							case IrBinaryCondition.fge:
279 								switch(type.basicType(c)) {
280 									case IrBasicType.f32: result = a.f32 >= b.f32; break;
281 									case IrBasicType.f64: result = a.f64 >= b.f64; break;
282 									default: c.internal_error("%s", type.basicType(c));
283 								}
284 								break;
285 							case IrBinaryCondition.flt:
286 								switch(type.basicType(c)) {
287 									case IrBasicType.f32: result = a.f32 < b.f32; break;
288 									case IrBasicType.f64: result = a.f64 < b.f64; break;
289 									default: c.internal_error("%s", type.basicType(c));
290 								}
291 								break;
292 							case IrBinaryCondition.fle:
293 								switch(type.basicType(c)) {
294 									case IrBasicType.f32: result = a.f32 <= b.f32; break;
295 									case IrBasicType.f64: result = a.f64 <= b.f64; break;
296 									default: c.internal_error("%s", type.basicType(c));
297 								}
298 								break;
299 						}
300 						prevBlock = curBlock;
301 						if (result)
302 							curBlock = block.successors[0, func];
303 						else
304 							curBlock = block.successors[1, func];
305 						//writefln("jump %s -> %s", prevBlock, curBlock);
306 						block = func.getBlock(curBlock);
307 						break instr_loop;
308 
309 					case IrOpcode.branch_unary:
310 						long a = readValue(instrHeader.arg(func, 0)).i64;
311 						//writefln("br %s : %s %s", a, block.successors[0, func], block.successors[1, func]);
312 						bool result;
313 						final switch(cast(IrUnaryCondition)instrHeader.cond)
314 						{
315 							case IrUnaryCondition.zero:     result = a == 0; break;
316 							case IrUnaryCondition.not_zero: result = a != 0; break;
317 						}
318 						prevBlock = curBlock;
319 						if (result)
320 							curBlock = block.successors[0, func];
321 						else
322 							curBlock = block.successors[1, func];
323 						//writefln("jump %s -> %s", prevBlock, curBlock);
324 						block = func.getBlock(curBlock);
325 						break instr_loop;
326 
327 					case IrOpcode.call:
328 						IrIndex callee = instrHeader.arg(func, 0);
329 						if (!callee.isFunction)
330 						{
331 							writefln("call of %s is not implemented", callee);
332 							assert(false);
333 						}
334 
335 						FunctionDeclNode* calleeFunc = c.getFunction(callee);
336 						AstIndex calleeIndex = AstIndex(callee.storageUintIndex);
337 						force_callee_ir_gen(calleeFunc, calleeIndex, c);
338 						if (calleeFunc.state != AstNodeState.ir_gen_done)
339 							c.internal_error(calleeFunc.loc,
340 								"Function's IR is not yet generated");
341 
342 						IrVmSlotInfo returnMem;
343 						if (instrHeader.hasResult) {
344 							IrIndex result = instrHeader.result(func);
345 							returnMem = vregSlot(result);
346 						}
347 
348 						if (calleeFunc.isBuiltin) {
349 							IrIndex result = eval_call_builtin(TokenIndex(), &this, calleeIndex, instrHeader.args(func)[1..$], c);
350 							constantToMem(slotToSlice(returnMem), result, c);
351 							break;
352 						}
353 
354 						IrFunction* irData = c.getAst!IrFunction(calleeFunc.backendData.irData);
355 						IrVm vm = IrVm(c, irData);
356 						vm.pushFrame;
357 						foreach(uint index, IrVmSlotInfo slot; vm.parameters)
358 						{
359 							copyToMem(slot, instrHeader.arg(func, index+1)); // Account for callee in 0 arg
360 						}
361 						vm.run(returnMem);
362 						vm.popFrame;
363 						break;
364 
365 					default:
366 						c.internal_error("interpret %s", cast(IrOpcode)instrHeader.op);
367 				}
368 			}
369 		}
370 	}
371 
372 	void binop(string op)(ref IrInstrHeader instrHeader)
373 	{
374 		long a = readValue(instrHeader.arg(func, 0)).i64;
375 		long b = readValue(instrHeader.arg(func, 1)).i64;
376 		mixin(`long result = a `~op~` b;`);
377 		writeInt(instrHeader.result(func), result);
378 	}
379 
380 	IrConstVal readValue(IrIndex index)
381 	{
382 		switch (index.kind) with(IrValueKind) {
383 			case constant, constantZero: return c.constants.get(index).value;
384 			case virtualRegister: return readValue(vregSlot(index));
385 			default: c.internal_error("readValue %s", index);
386 		}
387 	}
388 
389 	IrConstVal readValue(IrVmSlotInfo mem)
390 	{
391 		switch(mem.length)
392 		{
393 			case 1: return IrConstVal(*cast( ubyte*)slotToSlice(mem).ptr);
394 			case 2: return IrConstVal(*cast(ushort*)slotToSlice(mem).ptr);
395 			case 4: return IrConstVal(*cast(  uint*)slotToSlice(mem).ptr);
396 			case 8: return IrConstVal(*cast( ulong*)slotToSlice(mem).ptr);
397 			default: c.internal_error("readValue %s", mem);
398 		}
399 	}
400 
401 	IrVmSlotInfo ptrToSlice(IrIndex ptr)
402 	{
403 		IrIndex ptrType = getValueType(ptr, func, c);
404 		IrIndex ptrMemType = c.types.getPointerBaseType(ptrType);
405 		uint targetSize = c.types.typeSize(ptrMemType);
406 		switch (ptr.kind) with(IrValueKind) {
407 			case constant, constantZero: return IrVmSlotInfo(c.constants.get(ptr).u32, targetSize);
408 			case virtualRegister: return IrVmSlotInfo(readValue(vregSlot(ptr)).u32, targetSize);
409 			case stackSlot: return stackSlotSlot(ptr);
410 			default: c.internal_error("ptrToSlice %s", ptr);
411 		}
412 	}
413 
414 	void writeInt(IrIndex index, long value)
415 	{
416 		IrVmSlotInfo mem = vregSlot(index);
417 		switch(mem.length)
418 		{
419 			case 1: *cast( byte*)slotToSlice(mem).ptr = cast( byte)value; break;
420 			case 2: *cast(short*)slotToSlice(mem).ptr = cast(short)value; break;
421 			case 4: *cast(  int*)slotToSlice(mem).ptr = cast(  int)value; break;
422 			case 8: *cast( long*)slotToSlice(mem).ptr = cast( long)value; break;
423 			default: c.internal_error("writeInt %s", mem);
424 		}
425 	}
426 
427 	void copyToVreg(IrIndex vreg, IrIndex value)
428 	{
429 		copyToMem(vregSlot(vreg), value);
430 	}
431 
432 	void copyToMem(IrVmSlotInfo mem, IrIndex value)
433 	{
434 		//writefln("copyToMem %s %s", mem.length, value);
435 		if (value.isSomeConstant)
436 		{
437 			constantToMem(slotToSlice(mem), value, c);
438 		}
439 		else if (value.isVirtReg)
440 		{
441 			slotToSlice(mem)[] = slotToSlice(vregSlot(value));
442 		}
443 		else
444 		{
445 			writefln("%s", value);
446 			assert(false);
447 		}
448 	}
449 }
450 
451 struct ParameterSlotIterator
452 {
453 	IrVm* vm;
454 	int opApply(scope int delegate(uint, IrVmSlotInfo) dg)
455 	{
456 		IrBasicBlock* block = vm.func.getBlock(vm.func.entryBasicBlock);
457 		uint index;
458 		foreach(IrIndex instrIndex, ref IrInstrHeader instrHeader; block.instructions(vm.func))
459 		{
460 			if (instrHeader.op == IrOpcode.parameter)
461 			{
462 				IrIndex vreg = instrHeader.result(vm.func);
463 				auto slot = vm.vregSlot(vreg);
464 				if (int res = dg(index, slot))
465 					return res;
466 			}
467 			++index;
468 		}
469 		return 0;
470 	}
471 }