55718df70740ef24dfed2b502924b5e8c2f49705
[Chiptunes.git] / foo.S
1 /* REGISTER NAMES */
2 #define zero r16
3 #define acc r17
4 #define i0 r18
5 #define i1 r19
6 #define i2 r20
7 #define i3 r21
8 #define n r22
9 #define s r23
10 #define _ r24
11 ; r25
12 #define x r26 //==Xlo==Mh
13 #define t r27 //==Xhi==Ml
14 ; r28
15 ; r29
16 ; r30 Zlo
17 ; r31 Zhi
18 ; aliases:
19 #define Xlo r26
20 #define Xhi r27
21 #define Mh r26 //mod3 vars
22 #define Ml r27 // -"-
23
24 /* I/O REGISTERS */
25 OCR0AL = 0x26
26 DDRB = 0x01
27 PORTB = 0x02
28 PUEB = 0x03
29 SPL = 0x3D
30 SPH = 0x3E
31 CCP = 0x3C
32 CLKPSR = 0x36
33 WDTCSR = 0x31
34 SMCR = 0x3A
35 TCCR0A = 0x2E
36 TCCR0B = 0x2D
37 TIMSK0 = 0x2B
38 TIFR0 = 0x2A
39
40 .section .data
41 data:
42 .byte 0x84, 0x9d, 0xb0, 0x69, 0x9d, 0x84, 0x69, 0x58
43 .byte 0x75, 0x8c, 0xb0, 0x69, 0x8c, 0x75, 0x69, 0x58
44
45 .section .text
46 .org 0x0000 ; RESET interrupt
47 RJMP main
48 .org 0x0008 ; TIM0_OVF interrupt
49 RJMP sample
50
51 mod3: ; mod3(Mh.Ml) -> t
52 #define tmp _
53 ADD Ml, Mh
54 CLR Mh
55 ADC Mh, zero
56 MOV tmp, Ml
57 SWAP tmp
58 ANDI tmp, 0x0f
59 SWAP Mh
60 OR tmp, Mh
61 ANDI Ml, 0x0f
62 ADD Ml, tmp
63 MOV tmp, Ml
64 LSR tmp
65 LSR tmp
66 ANDI Ml, 0x03
67 ADD Ml, tmp
68 MOV tmp, Ml
69 LSR tmp
70 LSR tmp
71 ANDI Ml, 0x03
72 ADD Ml, tmp
73 CPI Ml, 3
74 BRPL skip
75 SUBI Ml, 3
76 skip:
77 RET
78 #undef tmp
79
80 ; definitions to mul-tree readable:
81 #define a1 x
82 #define a2 _
83 .macro always _bit ; nop; for when a test() is not necessary (see tree)
84 .endm
85 .macro never _bit ; nop; for when a test() is not necessary (see tree)
86 .endm
87 .macro test _bit,_jmpto
88 SBRC t, \_bit
89 RJMP \_jmpto
90 .endm
91 .macro shift16
92 LSR a2
93 ROR a1
94 .endm
95 .macro shift8 ; top three bits don't need to be corrrect, so save cycles by not carrying
96 LSR a1
97 .endm
98 .macro shift0 ; nop; last shift is common
99 .endm
100 .macro add16
101 ADD a1, i0
102 ADC a2, i1
103 .endm
104 .macro add8 ; ditto with carrying
105 ADD a1, i0
106 .endm
107 #undef a2
108 #undef a1
109
110 g: ; g(i, t) -> t
111 #define tmp _
112 ANDI t, 0x07
113 MOV tmp, i2
114 ANDI tmp, 3
115 CPSE tmp, zero
116 SUBI t, -8
117 #undef tmp
118
119 ;TODO: check correctness!
120 #define tmp _
121 MOV tmp, t ; NOTE: must move value away from `t`, as that is also hi(X)
122 LDI Xhi, hi8(data) ; hi(data) always zero, but still need to clear the register
123 LDI Xlo, lo8(data)
124 ADD Xlo, tmp ;<-- the offset (formerly `t`) into data[]
125 ;ADC Xhi, zero ; data == 0x40 t <= 0x10, so can never overflow
126 LD tmp, X
127 MOV t, tmp
128 #undef tmp
129
130 #define a1 x
131 #define a2 _
132 #define a0 t
133 CLR a2
134 CLR a1
135
136 /* decision tree multiplication saves cycles and (hopefully) reduces code size
137 _xxx?
138 / \
139 _xx?0 _xx1?
140 | |
141 _x?00 _x?01
142 / \ / \
143 _?000 _?100 _?001 _?101
144 / \ / \ | / \
145 _0000 _1000 _0100 _1100 _1001 _0101 _1101
146 | | | | | | |
147 ... ... ... ... ... ... ...
148 | | | | | | |
149 B0 58 84 8C 69 75 9D */
150 test 0, m____1
151 m____0: shift16
152 never 1
153 m___00: shift16
154 test 2, m__100
155 m__000: shift16
156 test 3, m_1000
157 m_0000: shift16
158 always 4
159 add16 $ shift16
160 always 5
161 add8 $ shift8
162 never 6
163 shift8
164 always 7
165 add8 $ shift0
166 RJMP end_mul ; calc'd 0xb0
167
168 m_1000: add16 $ shift16
169 always 4
170 add16 $ shift16
171 never 5
172 shift8
173 always 6
174 add8 $ shift8
175 never 7
176 shift0
177 RJMP end_mul ; calc'd 0x58
178
179 m__100: add16 $ shift16
180 test 3, m_1100
181 m_0100: shift16
182 RJMP upper_8 ;'ll calc 0x84
183 ;TODO: combine shift16 above with add_shift16 below to save progmem
184 m_1100: add16 $ shift16
185 upper_8: ; used twice, so deduplicated
186 never 4
187 shift16
188 never 5
189 shift8
190 never 6
191 shift8
192 always 7
193 add8 $ shift0
194 RJMP end_mul ; calc'd 0x8c
195
196 m____1: add16 $ shift16
197 never 1
198 m___01: shift16
199 test 2, m__101
200 m__001: shift16
201 always 3
202 m_1001: add16 $ shift16
203 never 4
204 shift16
205 always 5
206 add8 $ shift8
207 always 6
208 add8 $ shift8
209 never 7
210 shift0
211 RJMP end_mul ; calc'd 0x69
212
213 m__101: add16 $ shift16
214 test 3, m_1101
215 m_0101: shift16
216 always 4
217 add16 $ shift16
218 always 5
219 add8 $ shift8
220 always 6
221 add8 $ shift8
222 never 7
223 shift0
224 RJMP end_mul ; calc'd 0x75
225
226 m_1101: add16 $ shift16
227 always 4
228 add16 $ shift16
229 never 5
230 shift8
231 never 6
232 shift8
233 always 7
234 add8 $ shift0
235 ; calc'd 0x9d
236
237 end_mul:
238 LSR a1 ;final shift is a common operation for all
239
240 MOV t, a1 ;;TODO: use a1 in main() directly
241 #undef a0
242 #undef a1
243 #undef a2
244 RET ; TODO: replace CALL/RET with IJMP?
245
246 main: ; setup routine
247 CLR zero
248 CLR i0
249 CLR i1
250 CLR i2
251 CLR i3
252 CLR acc ; we output a dummy sample before the actual first one
253
254 #define one _
255 LDI one, 1
256 LDI x, 0x5f ; RAMEND
257 OUT SPL, x ; init stack ptr
258 OUT SPH, zero ; -"-
259 OUT PUEB, zero ; disable pullups
260 LDI x, 0x05
261 OUT DDRB, x ; PORTB0:pwm, PORTB2:debug
262 LDI x, 0xd8
263 OUT CCP, x ; change protected ioregs
264 OUT CLKPSR, one ; clock prescaler 1/2 (4Mhz)
265 OUT WDTCSR, zero; turn off watchdog ;;TODO: incomplete - see datasheet pg48
266 ; OUT SMCR, 2 ; sleep mode 'power down' ('idle' (default) has faster response time)
267
268 ;set timer/counter0 to 8bit fastpwm, non-inverting, no prescaler
269 LDI x, 0x81
270 OUT TCCR0A, x
271 LDI x, 0x09
272 OUT TCCR0B, x
273 OUT TIMSK0, one ; enable tim0_ovf
274 OUT TIFR0, one ; TODO: why?
275 SEI
276 #undef one
277 RJMP sample
278
279 loop:
280 SLEEP ; wait for interrupt
281 RJMP loop
282
283 sample:
284 ; potential TODO: softcounter in r25 to only update duty cicle every n iterations
285 ; potential TODO: save/restore status register (SREG=0x3f) (only if something in mainloop)
286
287 OUT OCR0AL, acc ; start by outputting a sample, because routine has variable runtime
288 SBI PORTB, 2 ; to measure runtime
289
290 MOV n, i2
291 LSL n
292 LSL n
293 #define tmp _
294 MOV tmp, i1
295 SWAP tmp
296 ANDI tmp, 0x0f
297 LSR tmp
298 LSR tmp
299 OR n, tmp
300 #undef tmp
301 MOV s, i3
302 LSR s
303 ROR s
304 ANDI s, 0x80
305 #define tmp _
306 MOV tmp, i2
307 LSR tmp
308 OR s, tmp
309 #undef tmp
310
311 ; voice 1:
312 MOV t, n
313 RCALL g
314 SWAP t
315 ANDI t, 1
316 MOV acc, t
317
318 ; voice 2:
319 #define tmp _
320 MOV tmp, i2
321 LSL tmp
322 LSL tmp
323 LSL tmp
324 MOV t, i1
325 SWAP t
326 ANDI t, 0xf
327 LSR t
328 OR t, tmp
329 #undef tmp
330 EOR t, n
331 RCALL g
332 LSR t
333 LSR t
334 ANDI t, 3
335 AND t, s
336 ADD acc, t
337
338 ; voice 3:
339 MOV Ml, i2
340 SWAP Ml
341 ANDI Ml, 0xf0
342 LSL Ml
343 #define tmp _
344 MOV tmp, i1
345 LSR tmp
346 LSR tmp
347 LSR tmp
348 OR Ml, tmp
349 #undef tmp
350 MOV Mh, i3
351 SWAP Mh
352 ANDI Mh, 0xf0
353 LSL Mh
354 #define tmp _
355 MOV tmp, i2
356 LSR tmp
357 LSR tmp
358 LSR tmp
359 OR Mh, tmp
360 #undef tmp
361 RCALL mod3
362 ADD t, n
363 RCALL g
364 LSR t
365 LSR t
366 ANDI t, 3
367 MOV x, s
368 INC x
369 #define tmp _
370 MOV tmp, x
371 LSR tmp
372 LSR tmp
373 ADD tmp, x
374 ROR tmp
375 LSR tmp
376 ADD tmp, x
377 ROR tmp
378 LSR tmp
379 ADD tmp, x
380 ROR tmp
381 LSR tmp
382 AND t, tmp
383 #undef tmp
384 ADD acc, t
385
386 ; voice 4:
387 MOV Ml, i2
388 SWAP Ml
389 ANDI Ml, 0xf0
390 LSL Ml
391 LSL Ml
392 #define tmp _
393 MOV tmp, i1
394 LSR tmp
395 LSR tmp
396 OR Ml, tmp
397 #undef tmp
398 MOV Mh, i3
399 SWAP Mh
400 ANDI Mh, 0xf0
401 LSL Mh
402 LSL Mh
403 #define tmp _
404 MOV tmp, i2
405 LSR tmp
406 LSR tmp
407 OR Mh, tmp
408 #undef tmp
409 RCALL mod3
410 SUB t, n
411 NEG t
412 SUBI t, -8
413 RCALL g
414 LSR t
415 ANDI t, 3
416 INC s
417 #define tmp _
418 MOV tmp, s
419 LSR tmp
420 ADD tmp, s
421 ROR tmp
422 LSR tmp
423 LSR tmp
424 ADD tmp, s
425 ROR tmp
426 ADD tmp, s
427 ROR tmp
428 LSR tmp
429 LSR tmp
430 AND t, tmp
431 #undef tmp
432 ADD acc, t
433
434 SWAP acc ; acc<<4, to be passed to OCR0AL
435
436 SUBI i0, -1
437 SBCI i1, -1
438 SBCI i2, -1
439 SBCI i3, -1
440
441 CBI PORTB, 2 ; end runtime measurement
442 ;TODO: to reduce jitter: clear pending tim0_ovf (TIFR0[TOV0] <- 1) ?
443 RETI ; reenables interrupts
Imprint / Impressum