sc68fordevelopers  2.2.1
macro68.h
Go to the documentation of this file.
1 
16 /* Copyright (C) 1998-2001 Ben(jamin) Gerard */
17 
18 #ifndef _MACRO68_H_
19 #define _MACRO68_H_
20 
21 #include "emu68/srdef68.h"
22 #include "emu68/excep68.h"
23 
24 #ifdef __cplusplus
25 extern "C" {
26 #endif
27 
28 #ifndef EMU68CYCLE
29 # define ADDCYCLE(N)
30 # define SETCYCLE(N)
31 #else
32 # define ADDCYCLE(N) reg68.cycle += (N)
33 # define SETCYCLE(N) reg68.cycle = (N)
34 #endif
35 
41 #define EXCEPTION(VECTOR,LVL) \
42 { \
43  pushl(reg68.pc); pushw(reg68.sr); \
44  reg68.sr &= 0x70FF; \
45  reg68.sr |= (0x2000+((LVL)<<SR_IPL_BIT)); \
46  reg68.pc = read_L(VECTOR); \
47 }
48 
50 #define ILLEGAL \
51 {\
52  EMU68error_add("Illegal pc:%06x",reg68.pc); \
53  EXCEPTION(ILLEGAL_VECTOR,ILLEGAL_LVL); \
54 }
55 
57 #define BUSERROR(ADDR,MODE) \
58 {\
59  EMU68error_add("bus error pc:%06x addr:%06x (%c)",\
60  reg68.pc,ADDR,MODE?'W':'R');\
61  EXCEPTION(BUSERROR_VECTOR,BUSERROR_LVL) \
62 }
63 
65 #define LINEA EXCEPTION(LINEA_VECTOR,LINEA_LVL)
66 
68 #define LINEF EXCEPTION(LINEF_VECTOR,LINEF_LVL)
69 
71 #define TRAPV if(reg68.sr&SR_V) EXCEPTION(TRAPV_VECTOR,TRAPV_LVL)
72 
74 #define TRAP(TRAP_N) EXCEPTION(TRAP_VECTOR(TRAP_N),TRAP_LVL)
75 
77 #define CHK EXCEPTION(CHK_VECTOR,CHK_LVL)
78 
80 #define CHKW(CHK_A,CHK_B) if((CHK_B)<0 || (CHK_B)>(CHK_A)){ CHK; }
81 
90 #define NOP
91 
93 #define RESET EMU68_reset()
94 
99 #define STOP reg68.sr = (u16)get_nextw(); reg68.status = 1
100 
102 #define RTS reg68.pc = popl()
103 
105 #define RTE reg68.sr = popw(); RTS
106 
108 #define RTR reg68.sr = (reg68.sr&0xFF00) | (u8)popw(); RTS
109 
118 #define NBCDB(NBCD_S,NBCD_A) (NBCD_S)=(NBCD_A)
119 
121 #define EXG(A,B) (A)^=(B); (B)^=(A); (A)^=(B)
122 
124 #define EXTW(D) (D) = ((D)&0xFFFF0000) | ((u16)(s32)(s8)(D))
125 
127 #define EXTL(D) (D) = (s32)(s16)(D)
128 
130 #define TAS(TAS_A) { TSTB(TAS_A,TAS_A); (TAS_A) |= 0x80000000; }
131 
133 #define CLR(CLR_S,CLR_A) \
134 {\
135  (CLR_A) = (CLR_A); \
136  reg68.sr =(reg68.sr&~(SR_N|SR_V|SR_C)) | SR_Z;\
137  CLR_S = 0;\
138 }
139 
141 #define CLRB(A,B) CLR(A,B)
142 
144 #define CLRW(A,B) CLR(A,B)
145 
147 #define CLRL(A,B) CLR(A,B)
148 
150 #define LINK(R_LNK) \
151  pushl(reg68.a[R_LNK]); \
152  reg68.a[R_LNK] = reg68.a[7]; \
153  reg68.a[7] += get_nextw()
154 
156 #define UNLK(R_LNK) \
157  reg68.a[7]=reg68.a[R_LNK]; \
158  reg68.a[R_LNK]=popl()
159 
161 #define SWAP(SWP_A) \
162 { \
163  (SWP_A) = ((u32)(SWP_A)>>16) | ((SWP_A)<<16); \
164  reg68.sr = (reg68.sr&~(SR_V|SR_C|SR_Z|SR_N)) | \
165  ((!(SWP_A))<<SR_Z_BIT) | \
166  (((s32)(SWP_A)>>31)&SR_N); \
167 }
168 
176 #if 0
177 
178 #define BTST(V,BIT) \
179  reg68.sr = (reg68.sr&(~SR_Z)) | ((((V)&(1<<(BIT)))==0)<<SR_Z_BIT)
180 
182 #define BSET(V,BIT) BTST(V,BIT); (V) |= (1<<(BIT));
183 
185 #define BCLR(V,BIT) BTST(V,BIT); (V) &= ~(1<<(BIT));
186 
188 #define BCHG(V,BIT) BTST(V,BIT); (V) ^= (1<<(BIT));
189 */
190 #endif
191 
193 #define BTST(V,BIT) \
194  reg68.sr = (reg68.sr&(~SR_Z)) | (((((V)>>(BIT))&1)^1)<<SR_Z_BIT)
195 
197 #define BSET(V,BIT) \
198 if( (V)&(1<<(BIT)) ) { reg68.sr &= ~SR_Z; }\
199 else { (V) |= 1<<(BIT); reg68.sr |= SR_Z; }
200 
202 #define BCLR(V,BIT) \
203 if( (V)&(1<<(BIT)) ) { (V) &= ~(1<<(BIT)); reg68.sr &= ~SR_Z; }\
204 else { reg68.sr |= SR_Z; }
205 
207 #define BCHG(V,BIT) \
208 if( (V)&(1<<(BIT)) ) { (V) &= ~(1<<(BIT)); reg68.sr &= ~SR_Z; }\
209 else { (V) |= 1<<(BIT); reg68.sr |= SR_Z; }
210 
218 #define MOVE(MOV_A) reg68.sr = (reg68.sr&(0xFF00 | SR_X)) \
219  | (((MOV_A)==0)<<SR_Z_BIT) | (((s32)(MOV_A)>>31)&SR_N);
220 #define TST(TST_V) MOVE(TST_V)
221 #define TSTB(TST_S,TST_A) { TST_S=TST_A; TST(TST_S); }
222 #define TSTW(TST_S,TST_A) { TST_S=TST_A; TST(TST_S); }
223 #define TSTL(TST_S,TST_A) { TST_S=TST_A; TST(TST_S); }
224 
233 #define MULSW(MUL_S, MUL_A, MUL_B) MUL_S = muls68(MUL_A, MUL_B)
234 
236 #define MULUW(MUL_S, MUL_A, MUL_B) MUL_S = mulu68(MUL_A, MUL_B)
237 
239 #define DIVSW(DIV_S, DIV_A, DIV_B) DIV_S = divs68(DIV_A, DIV_B)
240 
242 #define DIVUW(DIV_S, DIV_A, DIV_B) DIV_S = divu68(DIV_A, DIV_B)
243 
252 #define AND(AND_S, AND_A, AND_B) AND_S = and68(AND_A, AND_B)
253 
255 #define ANDB(AND_S, AND_A, AND_B) AND(AND_S, AND_A, AND_B)
256 
258 #define ANDW(AND_S, AND_A, AND_B) AND(AND_S, AND_A, AND_B)
259 
261 #define ANDL(AND_S, AND_A, AND_B) AND(AND_S, AND_A, AND_B)
262 
263 
265 #define ORR(ORR_S, ORR_A, ORR_B) ORR_S = orr68(ORR_A, ORR_B)
266 
268 #define ORB(ORR_S, ORR_A, ORR_B) ORR(ORR_S, ORR_A, ORR_B)
269 
271 #define ORW(ORR_S, ORR_A, ORR_B) ORR(ORR_S, ORR_A, ORR_B)
272 
274 #define ORL(ORR_S, ORR_A, ORR_B) ORR(ORR_S, ORR_A, ORR_B)
275 
276 
278 #define EOR(EOR_S, EOR_A, EOR_B) EOR_S = eor68(EOR_A, EOR_B)
279 
281 #define EORB(EOR_S, EOR_A, EOR_B) EOR(EOR_S, EOR_A, EOR_B)
282 
284 #define EORW(EOR_S, EOR_A, EOR_B) EOR(EOR_S, EOR_A, EOR_B)
285 
287 #define EORL(EOR_S, EOR_A, EOR_B) EOR(EOR_S, EOR_A, EOR_B)
288 
289 
291 #define NOT(NOT_S,NOT_A) NOT_S = not68(NOT_A)
292 
294 #define NOTB(A,B) NOT(A,B)
295 
297 #define NOTW(A,B) NOT(A,B)
298 
300 #define NOTL(A,B) NOT(A,B)
301 
306 #define ADD(ADD_S,ADD_A,ADD_B,ADD_X) ADD_S=add68(ADD_A,ADD_B,ADD_X)
307 #define SUB(SUB_S,SUB_A,SUB_B,SUB_X) SUB_S=sub68(SUB_B,SUB_A,SUB_X)
308 #define CMP(SUB_A,SUB_B) sub68(SUB_B,SUB_A,0)
309 
310 #define ADDB(ADD_S, ADD_A, ADD_B) ADD(ADD_S, ADD_A, ADD_B,0)
311 #define ADDW(ADD_S, ADD_A, ADD_B) ADD(ADD_S, ADD_A, ADD_B,0)
312 #define ADDL(ADD_S, ADD_A, ADD_B) ADD(ADD_S, ADD_A, ADD_B,0)
313 #define ADDXB(ADD_S, ADD_A, ADD_B) \
314  ADD(ADD_S, ADD_A, ADD_B, (reg68.sr&SR_X)<<(24-SR_X_BIT))
315 #define ADDXW(ADD_S, ADD_A, ADD_B) \
316  ADD(ADD_S, ADD_A, ADD_B, (reg68.sr&SR_X)<<(16-SR_X_BIT))
317 #define ADDXL(ADD_S, ADD_A, ADD_B) \
318  ADD(ADD_S, ADD_A, ADD_B, (reg68.sr&SR_X)>>SR_X_BIT )
319 
320 #define ADDA(ADD_S, ADD_A, ADD_B) (ADD_S) = (ADD_A) + (ADD_B)
321 #define ADDAW(ADD_S, ADD_A, ADD_B) ADDA(ADD_S, ADD_A>>16, ADD_B)
322 #define ADDAL(ADD_S, ADD_A, ADD_B) ADDA(ADD_S, ADD_A, ADD_B)
323 
324 #define SUBB(SUB_S, SUB_A, SUB_B) SUB(SUB_S, SUB_A, SUB_B,0)
325 #define SUBW(SUB_S, SUB_A, SUB_B) SUB(SUB_S, SUB_A, SUB_B,0)
326 #define SUBL(SUB_S, SUB_A, SUB_B) SUB(SUB_S, SUB_A, SUB_B,0)
327 
328 #define SUBXB(SUB_S, SUB_A, SUB_B) \
329  SUB(SUB_S, SUB_A, SUB_B, (reg68.sr&SR_X)<<(24-SR_X_BIT))
330 #define SUBXW(SUB_S, SUB_A, SUB_B) \
331  SUB(SUB_S, SUB_A, SUB_B, (reg68.sr&SR_X)<<(16-SR_X_BIT))
332 #define SUBXL(SUB_S, SUB_A, SUB_B) \
333  SUB(SUB_S, SUB_A, SUB_B, (reg68.sr&SR_X)>>SR_X_BIT)
334 
335 #define SUBA(SUB_S, SUB_A, SUB_B) (SUB_S) = (SUB_B) - (SUB_A)
336 #define SUBAW(SUB_S, SUB_A, SUB_B) \
337  {\
338  s32 ZOB = (SUB_A)>>16;\
339  SUBA(SUB_S, ZOB, SUB_B);\
340  }
341 #define SUBAL(SUB_S, SUB_A, SUB_B) SUBA(SUB_S, SUB_A, SUB_B)
342 
343 #define CMPB(CMP_A, CMP_B) CMP(CMP_A, CMP_B)
344 #define CMPW(CMP_A, CMP_B) CMP(CMP_A, CMP_B)
345 #define CMPL(CMP_A, CMP_B) CMP(CMP_A, CMP_B)
346 #define CMPA(CMP_A, CMP_B) CMP(CMP_A, CMP_B)
347 #define CMPAW(CMP_A, CMP_B) \
348  {\
349  s32 ZOB = (CMP_A)>>16;\
350  CMPA( ZOB, CMP_B);\
351  }
352 #define CMPAL(CMP_A, CMP_B) CMP(CMP_A, CMP_B)
353 
354 #define NEGB(NEG_S,NEG_A) SUBB(NEG_S,NEG_A,0)
355 #define NEGW(NEG_S,NEG_A) SUBW(NEG_S,NEG_A,0)
356 #define NEGL(NEG_S,NEG_A) SUBL(NEG_S,NEG_A,0)
357 
358 #define NEGXB(NEG_S,NEG_A) SUBXB(NEG_S,NEG_A,0)
359 #define NEGXW(NEG_S,NEG_A) SUBXW(NEG_S,NEG_A,0)
360 #define NEGXL(NEG_S,NEG_A) SUBXL(NEG_S,NEG_A,0)
361 
370 #define LSR(LSR_A,LSR_D,LSR_MSK,LSR_C) \
371 {\
372  reg68.sr &= 0xFF00;\
373  if((LSR_D)!=0) \
374  {\
375  ADDCYCLE(2*(LSR_D));\
376  (LSR_A) >>= (LSR_D)-1;\
377  if((LSR_A)&(LSR_C)) reg68.sr |= SR_X | SR_C;\
378  (LSR_A)>>=1;\
379  }\
380  (LSR_A) &= (LSR_MSK);\
381  reg68.sr |= (((LSR_A)==0)<<SR_Z_BIT) | (((s32)(LSR_A)<0)<<SR_N_BIT);\
382 }
383 
385 #define LSRB(LSR_A,LSR_B) LSR(LSR_A,LSR_B,0xFF000000,(1<<24))
386 
388 #define LSRW(LSR_A,LSR_B) LSR(LSR_A,LSR_B,0xFFFF0000,(1<<16))
389 
391 #define LSRL(LSR_A,LSR_B) LSR(LSR_A,LSR_B,0xFFFFFFFF,(1<<0))
392 
394 #define ASRB(LSR_A,LSR_B) LSR(LSR_A,LSR_B,0xFF000000,(1<<24))
395 
397 #define ASRW(LSR_A,LSR_B) LSR(LSR_A,LSR_B,0xFFFF0000,(1<<16))
398 
400 #define ASRL(LSR_A,LSR_B) LSR(LSR_A,LSR_B,0xFFFFFFFF,(1<<0))
401 
403 #define LSL(LSL_A,LSL_D,LSL_MSK) \
404 {\
405  reg68.sr &= 0xFF00;\
406  if((LSL_D)!=0) \
407  {\
408  ADDCYCLE(2*(LSL_D));\
409  (LSL_A) <<= (LSL_D)-1;\
410  if((LSL_A)&0x80000000) reg68.sr |= SR_X | SR_C;\
411  (LSL_A)<<=1;\
412  }\
413  (LSL_A) &= (LSL_MSK);\
414  reg68.sr |= (((LSL_A)==0)<<SR_Z_BIT) | (((s32)(LSL_A)<0)<<SR_N_BIT);\
415 }
416 
418 #define LSLB(LSL_A,LSL_B) LSL(LSL_A,LSL_B,0xFF000000)
419 
421 #define LSLW(LSL_A,LSL_B) LSL(LSL_A,LSL_B,0xFFFF0000)
422 
424 #define LSLL(LSL_A,LSL_B) LSL(LSL_A,LSL_B,0xFFFFFFFF)
425 
427 #define ASLB(LSL_A,LSL_B) LSL(LSL_A,LSL_B,0xFF000000)
428 
430 #define ASLW(LSL_A,LSL_B) LSL(LSL_A,LSL_B,0xFFFF0000)
431 
433 #define ASLL(LSL_A,LSL_B) LSL(LSL_A,LSL_B,0xFFFFFFFF)
434 
436 #define ROR(ROR_A,ROR_D,ROR_MSK,ROR_SZ) \
437 {\
438  reg68.sr &= 0xFF00 | SR_X;\
439  if((ROR_D)!=0) \
440  {\
441  ADDCYCLE(2*(ROR_D));\
442  ROR_D &= (ROR_SZ)-1;\
443  if((ROR_A)&(1<<((ROR_D)-1+32-(ROR_SZ)))) reg68.sr |= SR_C;\
444  (ROR_A) &= (ROR_MSK);\
445  (ROR_A) = ((ROR_A)>>(ROR_D)) + ((ROR_A)<<((ROR_SZ)-(ROR_D)));\
446  }\
447  (ROR_A) &= (ROR_MSK);\
448  reg68.sr |= (((ROR_A)==0)<<SR_Z_BIT) | (((s32)(ROR_A)<0)<<SR_N_BIT);\
449 }
450 
452 #define ROL(ROR_A,ROR_D,ROR_MSK,ROR_SZ) \
453 {\
454  reg68.sr &= 0xFF00 | SR_X;\
455  if((ROR_D)!=0) \
456  {\
457  ADDCYCLE(2*(ROR_D));\
458  ROR_D &= (ROR_SZ)-1;\
459  if((ROR_A)&(1<<(32-(ROR_D)))) reg68.sr |= SR_C;\
460  (ROR_A) &= (ROR_MSK);\
461  (ROR_A) = ((ROR_A)<<(ROR_D)) + ((ROR_A)>>((ROR_SZ)-(ROR_D)));\
462  }\
463  (ROR_A) &= (ROR_MSK);\
464  reg68.sr |= (((ROR_A)==0)<<SR_Z_BIT) | (((s32)(ROR_A)<0)<<SR_N_BIT);\
465 }
466 
467 #define RORB(ROR_A,ROR_B) ROR(ROR_A,ROR_B,0xFF000000,8)
468 #define RORW(ROR_A,ROR_B) ROR(ROR_A,ROR_B,0xFFFF0000,16)
469 #define RORL(ROR_A,ROR_B) ROR(ROR_A,ROR_B,0xFFFFFFFF,32)
470 #define ROLB(ROR_A,ROR_B) ROL(ROR_A,ROR_B,0xFF000000,8)
471 #define ROLW(ROR_A,ROR_B) ROL(ROR_A,ROR_B,0xFFFF0000,16)
472 #define ROLL(ROR_A,ROR_B) ROL(ROR_A,ROR_B,0xFFFFFFFF,32)
473 
475 #define ROXR(ROR_A,ROR_D,ROR_MSK,ROR_SZ) \
476 {\
477  u32 ROR_X = (reg68.sr>>SR_X_BIT)&1;\
478  reg68.sr &= 0xFF00;\
479  if((ROR_D)!=0) \
480  {\
481  ADDCYCLE(2*(ROR_D));\
482  ROR_D &= (ROR_SZ)-1;\
483  if((ROR_A)&(1<<((ROR_D)-1+32-(ROR_SZ)))) reg68.sr |= SR_C | SR_X;\
484  (ROR_A) &= (ROR_MSK);\
485  (ROR_A) = ((ROR_A)>>(ROR_D)) + ((ROR_A)<<((ROR_SZ)-(ROR_D)+1));\
486  (ROR_A) |= (ROR_X)<<(32-(ROR_D));\
487  }\
488  (ROR_A) &= (ROR_MSK);\
489  reg68.sr |= (((ROR_A)==0)<<SR_Z_BIT) | (((s32)(ROR_A)<0)<<SR_N_BIT);\
490 }
491 
493 #define ROXL(ROR_A,ROR_D,ROR_MSK,ROR_SZ) \
494 {\
495  u32 ROR_X = (reg68.sr>>SR_X_BIT)&1;\
496  reg68.sr &= 0xFF00;\
497  if((ROR_D)!=0) \
498  {\
499  ADDCYCLE(2*(ROR_D));\
500  ROR_D &= (ROR_SZ)-1;\
501  if((ROR_A)&(1<<(32-(ROR_D)))) reg68.sr |= SR_C | SR_X ;\
502  (ROR_A) &= (ROR_MSK);\
503  (ROR_A) = ((ROR_A)<<(ROR_D)) + ((ROR_A)>>((ROR_SZ)-(ROR_D)+1));\
504  (ROR_A) |= (ROR_X)<<((ROR_D)-1+(32-(ROR_SZ)));\
505  }\
506  (ROR_A) &= (ROR_MSK);\
507  reg68.sr |= (((ROR_A)==0)<<SR_Z_BIT) | (((s32)(ROR_A)<0)<<SR_N_BIT);\
508 }
509 
510 #define ROXRB(ROR_A,ROR_B) ROXR(ROR_A,ROR_B,0xFF000000,8)
511 #define ROXRW(ROR_A,ROR_B) ROXR(ROR_A,ROR_B,0xFFFF0000,16)
512 #define ROXRL(ROR_A,ROR_B) ROXR(ROR_A,ROR_B,0xFFFFFFFF,32)
513 #define ROXLB(ROR_A,ROR_B) ROXL(ROR_A,ROR_B,0xFF000000,8)
514 #define ROXLW(ROR_A,ROR_B) ROXL(ROR_A,ROR_B,0xFFFF0000,16)
515 #define ROXLL(ROR_A,ROR_B) ROXL(ROR_A,ROR_B,0xFFFFFFFF,32)
516 
519 #ifdef __cplusplus
520 }
521 #endif
522 
523 #endif /* #ifndef _MACRO68_H_ */
Status Register (SR) definitions.
68k exception vector definitions.