*** empty log message ***
[gcc.git] / gcc / longlong.h
1 /* longlong.h -- definitions for mixed size 32/64 bit arithmetic.
2 Copyright (C) 1991, 1992 Free Software Foundation, Inc.
3
4 This definition file is free software; you can redistribute it
5 and/or modify it under the terms of the GNU General Public
6 License as published by the Free Software Foundation; either
7 version 2, or (at your option) any later version.
8
9 This definition file is distributed in the hope that it will be
10 useful, but WITHOUT ANY WARRANTY; without even the implied
11 warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12 See the GNU General Public License for more details.
13
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
17
18 #ifndef LONG_TYPE_SIZE
19 #define LONG_TYPE_SIZE 32
20 #endif
21
22 #define __BITS4 (LONG_TYPE_SIZE / 4)
23 #define __ll_B (1L << (LONG_TYPE_SIZE / 2))
24 #define __ll_lowpart(t) ((USItype) (t) % __ll_B)
25 #define __ll_highpart(t) ((USItype) (t) / __ll_B)
26
27 /* Define auxiliary asm macros.
28
29 1) umul_ppmm(high_prod, low_prod, multipler, multiplicand)
30 multiplies two USItype integers MULTIPLER and MULTIPLICAND,
31 and generates a two-part USItype product in HIGH_PROD and
32 LOW_PROD.
33
34 2) __umulsidi3(a,b) multiplies two USItype integers A and B,
35 and returns a UDItype product. This is just a variant of umul_ppmm.
36
37 3) udiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
38 denominator) divides a two-word unsigned integer, composed by the
39 integers HIGH_NUMERATOR and LOW_NUMERATOR, by DENOMINATOR and
40 places the quotient in QUOTIENT and the remainder in REMAINDER.
41 HIGH_NUMERATOR must be less than DENOMINATOR for correct operation.
42 If, in addition, the most significant bit of DENOMINATOR must be 1,
43 then the pre-processor symbol UDIV_NEEDS_NORMALIZATION is defined to 1.
44
45 4) count_leading_zeros(count, x) counts the number of zero-bits from
46 the msb to the first non-zero bit. This is the number of steps X
47 needs to be shifted left to set the msb. Undefined for X == 0.
48
49 5) add_ssaaaa(high_sum, low_sum, high_addend_1, low_addend_1,
50 high_addend_2, low_addend_2) adds two two-word unsigned integers,
51 composed by HIGH_ADDEND_1 and LOW_ADDEND_1, and HIGH_ADDEND_2 and
52 LOW_ADDEND_2 respectively. The result is placed in HIGH_SUM and
53 LOW_SUM. Overflow (i.e. carry out) is not stored anywhere, and is
54 lost.
55
56 6) sub_ddmmss(high_difference, low_difference, high_minuend,
57 low_minuend, high_subtrahend, low_subtrahend) subtracts two
58 two-word unsigned integers, composed by HIGH_MINUEND_1 and
59 LOW_MINUEND_1, and HIGH_SUBTRAHEND_2 and LOW_SUBTRAHEND_2
60 respectively. The result is placed in HIGH_DIFFERENCE and
61 LOW_DIFFERENCE. Overflow (i.e. carry out) is not stored anywhere,
62 and is lost.
63
64 If any of these macros are left undefined for a particular CPU,
65 C macros are used. */
66
67 /* The CPUs come in alphabetical order below.
68
69 Please add support for more CPUs here, or improve the current support
70 for the CPUs below!
71 (E.g. WE32100, i960, IBM360.) */
72
73 #if defined (__GNUC__) && !defined (NO_ASM)
74
75 /* We sometimes need to clobber "cc" with gcc2, but that would not be
76 understood by gcc1. Use cpp to avoid major code duplication. */
77 #if __GNUC__ < 2
78 #define __CLOBBER_CC
79 #define __AND_CLOBBER_CC
80 #else /* __GNUC__ >= 2 */
81 #define __CLOBBER_CC : "cc"
82 #define __AND_CLOBBER_CC , "cc"
83 #endif /* __GNUC__ < 2 */
84
85 #if defined (__a29k__) || defined (___AM29K__)
86 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
87 __asm__ ("add %1,%4,%5
88 addc %0,%2,%3" \
89 : "=r" ((USItype)(sh)), \
90 "=&r" ((USItype)(sl)) \
91 : "%r" ((USItype)(ah)), \
92 "rI" ((USItype)(bh)), \
93 "%r" ((USItype)(al)), \
94 "rI" ((USItype)(bl)))
95 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
96 __asm__ ("sub %1,%4,%5
97 subc %0,%2,%3" \
98 : "=r" ((USItype)(sh)), \
99 "=&r" ((USItype)(sl)) \
100 : "r" ((USItype)(ah)), \
101 "rI" ((USItype)(bh)), \
102 "r" ((USItype)(al)), \
103 "rI" ((USItype)(bl)))
104 #define umul_ppmm(xh, xl, m0, m1) \
105 do { \
106 USItype __m0 = (m0), __m1 = (m1); \
107 __asm__ ("multiplu %0,%1,%2" \
108 : "=r" ((USItype)(xl)) \
109 : "r" (__m0), \
110 "r" (__m1)); \
111 __asm__ ("multmu %0,%1,%2" \
112 : "=r" ((USItype)(xh)) \
113 : "r" (__m0), \
114 "r" (__m1)); \
115 } while (0)
116 #define udiv_qrnnd(q, r, n1, n0, d) \
117 __asm__ ("dividu %0,%3,%4" \
118 : "=r" ((USItype)(q)), \
119 "=q" ((USItype)(r)) \
120 : "1" ((USItype)(n1)), \
121 "r" ((USItype)(n0)), \
122 "r" ((USItype)(d)))
123 #define count_leading_zeros(count, x) \
124 __asm__ ("clz %0,%1" \
125 : "=r" ((USItype)(count)) \
126 : "r" ((USItype)(x)))
127 #endif /* __a29k__ */
128
129 #if defined (__arm__)
130 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
131 __asm__ ("adds %1,%4,%5
132 adc %0,%2,%3" \
133 : "=r" ((USItype)(sh)), \
134 "=&r" ((USItype)(sl)) \
135 : "%r" ((USItype)(ah)), \
136 "rI" ((USItype)(bh)), \
137 "%r" ((USItype)(al)), \
138 "rI" ((USItype)(bl)))
139 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
140 __asm__ ("subs %1,%4,%5
141 sbc %0,%2,%3" \
142 : "=r" ((USItype)(sh)), \
143 "=&r" ((USItype)(sl)) \
144 : "r" ((USItype)(ah)), \
145 "rI" ((USItype)(bh)), \
146 "r" ((USItype)(al)), \
147 "rI" ((USItype)(bl)))
148 #endif /* __arm__ */
149
150 #if defined (__gmicro__)
151 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
152 __asm__ ("add.w %5,%1
153 addx %3,%0" \
154 : "=g" ((USItype)(sh)), \
155 "=&g" ((USItype)(sl)) \
156 : "%0" ((USItype)(ah)), \
157 "g" ((USItype)(bh)), \
158 "%1" ((USItype)(al)), \
159 "g" ((USItype)(bl)))
160 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
161 __asm__ ("sub.w %5,%1
162 subx %3,%0" \
163 : "=g" ((USItype)(sh)), \
164 "=&g" ((USItype)(sl)) \
165 : "0" ((USItype)(ah)), \
166 "g" ((USItype)(bh)), \
167 "1" ((USItype)(al)), \
168 "g" ((USItype)(bl)))
169 #define umul_ppmm(ph, pl, m0, m1) \
170 __asm__ ("mulx %3,%0,%1" \
171 : "=g" ((USItype)(ph)), \
172 "=r" ((USItype)(pl)) \
173 : "%0" ((USItype)(m0)), \
174 "g" ((USItype)(m1)))
175 #define udiv_qrnnd(q, r, nh, nl, d) \
176 __asm__ ("divx %4,%0,%1" \
177 : "=g" ((USItype)(q)), \
178 "=r" ((USItype)(r)) \
179 : "1" ((USItype)(nh)), \
180 "0" ((USItype)(nl)), \
181 "g" ((USItype)(d)))
182 #define count_leading_zeros(count, x) \
183 __asm__ ("bsch/1 %1,%0" \
184 : "=g" (count) \
185 : "g" ((USItype)(x)), \
186 "0" ((USItype)0))
187 #endif
188
189 #if defined (__hppa)
190 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
191 __asm__ ("add %4,%5,%1
192 addc %2,%3,%0" \
193 : "=r" ((USItype)(sh)), \
194 "=&r" ((USItype)(sl)) \
195 : "%rM" ((USItype)(ah)), \
196 "rM" ((USItype)(bh)), \
197 "%rM" ((USItype)(al)), \
198 "rM" ((USItype)(bl)))
199 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
200 __asm__ ("sub %5,%4,%1
201 subb %3,%2,%0" \
202 : "=r" ((USItype)(sh)), \
203 "=&r" ((USItype)(sl)) \
204 : "rM" ((USItype)(ah)), \
205 "rM" ((USItype)(bh)), \
206 "rM" ((USItype)(al)), \
207 "rM" ((USItype)(bl)))
208 #if defined (_PA_RISC1_1)
209 #define umul_ppmm(w1, w0, u, v) \
210 do { \
211 union \
212 { \
213 UDItype __f; \
214 struct {USItype __w1, __w0;} __w1w0; \
215 } __t; \
216 __asm__ ("xmpyu %1,%2,%0" \
217 : "=x" (__t.__f) \
218 : "x" ((USItype)(u)), \
219 "x" ((USItype)(v))); \
220 (w1) = __t.__w1w0.__w1; \
221 (w0) = __t.__w1w0.__w0; \
222 } while (0)
223 #define UMUL_TIME 8
224 #else
225 #define UMUL_TIME 30
226 #endif
227 #define UDIV_TIME 40
228 #endif
229
230 #if defined (__i386__) || defined (__i486__)
231 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
232 __asm__ ("addl %5,%1
233 adcl %3,%0" \
234 : "=r" ((USItype)(sh)), \
235 "=&r" ((USItype)(sl)) \
236 : "%0" ((USItype)(ah)), \
237 "g" ((USItype)(bh)), \
238 "%1" ((USItype)(al)), \
239 "g" ((USItype)(bl)))
240 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
241 __asm__ ("subl %5,%1
242 sbbl %3,%0" \
243 : "=r" ((USItype)(sh)), \
244 "=&r" ((USItype)(sl)) \
245 : "0" ((USItype)(ah)), \
246 "g" ((USItype)(bh)), \
247 "1" ((USItype)(al)), \
248 "g" ((USItype)(bl)))
249 #define umul_ppmm(w1, w0, u, v) \
250 __asm__ ("mull %3" \
251 : "=a" ((USItype)(w0)), \
252 "=d" ((USItype)(w1)) \
253 : "%0" ((USItype)(u)), \
254 "rm" ((USItype)(v)))
255 #define udiv_qrnnd(q, r, n1, n0, d) \
256 __asm__ ("divl %4" \
257 : "=a" ((USItype)(q)), \
258 "=d" ((USItype)(r)) \
259 : "0" ((USItype)(n0)), \
260 "1" ((USItype)(n1)), \
261 "rm" ((USItype)(d)))
262 #define count_leading_zeros(count, x) \
263 do { \
264 USItype __cbtmp; \
265 __asm__ ("bsrl %1,%0" \
266 : "=r" (__cbtmp) : "rm" ((USItype)(x))); \
267 (count) = __cbtmp ^ 31; \
268 } while (0)
269 #define UMUL_TIME 40
270 #define UDIV_TIME 40
271 #endif /* 80x86 */
272
273 #if defined (__i860__)
274 #if 0
275 /* Make sure these patterns really improve the code before
276 switching them on. */
277 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
278 do { \
279 union \
280 { \
281 DItype __ll; \
282 struct {USItype __l, __h;} __i; \
283 } __a, __b, __s; \
284 __a.__i.__l = (al); \
285 __a.__i.__h = (ah); \
286 __b.__i.__l = (bl); \
287 __b.__i.__h = (bh); \
288 __asm__ ("fiadd.dd %1,%2,%0" \
289 : "=f" (__s.__ll) \
290 : "%f" (__a.__ll), "f" (__b.__ll)); \
291 (sh) = __s.__i.__h; \
292 (sl) = __s.__i.__l; \
293 } while (0)
294 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
295 do { \
296 union \
297 { \
298 DItype __ll; \
299 struct {USItype __l, __h;} __i; \
300 } __a, __b, __s; \
301 __a.__i.__l = (al); \
302 __a.__i.__h = (ah); \
303 __b.__i.__l = (bl); \
304 __b.__i.__h = (bh); \
305 __asm__ ("fisub.dd %1,%2,%0" \
306 : "=f" (__s.__ll) \
307 : "%f" (__a.__ll), "f" (__b.__ll)); \
308 (sh) = __s.__i.__h; \
309 (sl) = __s.__i.__l; \
310 } while (0)
311 #endif
312 #endif /* __i860__ */
313
314 #if defined (___IBMR2__) /* IBM RS6000 */
315 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
316 __asm__ ("a%I5 %1,%4,%5
317 ae %0,%2,%3" \
318 : "=r" ((USItype)(sh)), \
319 "=&r" ((USItype)(sl)) \
320 : "%r" ((USItype)(ah)), \
321 "r" ((USItype)(bh)), \
322 "%r" ((USItype)(al)), \
323 "rI" ((USItype)(bl)))
324 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
325 __asm__ ("sf%I4 %1,%5,%4
326 sfe %0,%3,%2" \
327 : "=r" ((USItype)(sh)), \
328 "=&r" ((USItype)(sl)) \
329 : "r" ((USItype)(ah)), \
330 "r" ((USItype)(bh)), \
331 "rI" ((USItype)(al)), \
332 "r" ((USItype)(bl)))
333 #define umul_ppmm(xh, xl, m0, m1) \
334 do { \
335 USItype __m0 = (m0), __m1 = (m1); \
336 __asm__ ("mul %0,%2,%3" \
337 : "=r" ((USItype)(xh)), \
338 "=q" ((USItype)(xl)) \
339 : "r" (__m0), \
340 "r" (__m1)); \
341 (xh) += ((((SItype) __m0 >> 31) & __m1) \
342 + (((SItype) __m1 >> 31) & __m0)); \
343 } while (0)
344 #define smul_ppmm(xh, xl, m0, m1) \
345 __asm__ ("mul %0,%2,%3" \
346 : "=r" ((USItype)(xh)), \
347 "=q" ((USItype)(xl)) \
348 : "r" ((USItype)(m0)), \
349 "r" ((USItype)(m1)))
350 #define UMUL_TIME 8
351 #define sdiv_qrnnd(q, r, nh, nl, d) \
352 __asm__ ("div %0,%2,%4" \
353 : "=r" ((USItype)(q)), "=q" ((USItype)(r)) \
354 : "r" ((USItype)(nh)), "1" ((USItype)(nl)), "r" ((USItype)(d)))
355 #define UDIV_TIME 40
356 #define UDIV_NEEDS_NORMALIZATION 1
357 #define count_leading_zeros(count, x) \
358 __asm__ ("cntlz %0,%1" \
359 : "=r" ((USItype)(count)) \
360 : "r" ((USItype)(x)))
361 #endif /* ___IBMR2__ */
362
363 #if defined (__mc68000__)
364 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
365 __asm__ ("add%.l %5,%1
366 addx%.l %3,%0" \
367 : "=d" ((USItype)(sh)), \
368 "=&d" ((USItype)(sl)) \
369 : "%0" ((USItype)(ah)), \
370 "d" ((USItype)(bh)), \
371 "%1" ((USItype)(al)), \
372 "g" ((USItype)(bl)))
373 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
374 __asm__ ("sub%.l %5,%1
375 subx%.l %3,%0" \
376 : "=d" ((USItype)(sh)), \
377 "=&d" ((USItype)(sl)) \
378 : "0" ((USItype)(ah)), \
379 "d" ((USItype)(bh)), \
380 "1" ((USItype)(al)), \
381 "g" ((USItype)(bl)))
382 #if defined (__mc68020__) || defined (__NeXT__) || defined(mc68020)
383 #define umul_ppmm(w1, w0, u, v) \
384 __asm__ ("mulu%.l %3,%1:%0" \
385 : "=d" ((USItype)(w0)), \
386 "=d" ((USItype)(w1)) \
387 : "%0" ((USItype)(u)), \
388 "dmi" ((USItype)(v)))
389 #define UMUL_TIME 45
390 #define udiv_qrnnd(q, r, n1, n0, d) \
391 __asm__ ("divu%.l %4,%1:%0" \
392 : "=d" ((USItype)(q)), \
393 "=d" ((USItype)(r)) \
394 : "0" ((USItype)(n0)), \
395 "1" ((USItype)(n1)), \
396 "dmi" ((USItype)(d)))
397 #define UDIV_TIME 90
398 #define sdiv_qrnnd(q, r, n1, n0, d) \
399 __asm__ ("divs%.l %4,%1:%0" \
400 : "=d" ((USItype)(q)), \
401 "=d" ((USItype)(r)) \
402 : "0" ((USItype)(n0)), \
403 "1" ((USItype)(n1)), \
404 "dmi" ((USItype)(d)))
405 #define count_leading_zeros(count, x) \
406 __asm__ ("bfffo %1{%b2:%b2},%0" \
407 : "=d" ((USItype)(count)) \
408 : "od" ((USItype)(x)), "n" (0))
409 #else /* not mc68020 */
410 #define umul_ppmm(xh, xl, a, b) \
411 __asm__ ("| Inlined umul_ppmm
412 movel %2,d0
413 movel %3,d1
414 movel d0,d2
415 swap d0
416 movel d1,d3
417 swap d1
418 movew d2,d4
419 mulu d3,d4
420 mulu d1,d2
421 mulu d0,d3
422 mulu d0,d1
423 movel d4,d0
424 eorw d0,d0
425 swap d0
426 addl d0,d2
427 addl d3,d2
428 jcc 1f
429 addl #65536,d1
430 1: swap d2
431 moveq #0,d0
432 movew d2,d0
433 movew d4,d2
434 movel d2,%1
435 addl d1,d0
436 movel d0,%0" \
437 : "=g" ((USItype)(xh)), \
438 "=g" ((USItype)(xl)) \
439 : "g" ((USItype)(a)), \
440 "g" ((USItype)(b)) \
441 : "d0", "d1", "d2", "d3", "d4")
442 #define UMUL_TIME 100
443 #define UDIV_TIME 400
444 #endif /* not mc68020 */
445 #endif /* mc68000 */
446
447 #if defined (__m88000__)
448 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
449 __asm__ ("addu.co %1,%r4,%r5
450 addu.ci %0,%r2,%r3" \
451 : "=r" ((USItype)(sh)), \
452 "=&r" ((USItype)(sl)) \
453 : "%rJ" ((USItype)(ah)), \
454 "rJ" ((USItype)(bh)), \
455 "%rJ" ((USItype)(al)), \
456 "rJ" ((USItype)(bl)))
457 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
458 __asm__ ("subu.co %1,%r4,%r5
459 subu.ci %0,%r2,%r3" \
460 : "=r" ((USItype)(sh)), \
461 "=&r" ((USItype)(sl)) \
462 : "rJ" ((USItype)(ah)), \
463 "rJ" ((USItype)(bh)), \
464 "rJ" ((USItype)(al)), \
465 "rJ" ((USItype)(bl)))
466 #define UMUL_TIME 17
467 #define UDIV_TIME 150
468 #define count_leading_zeros(count, x) \
469 do { \
470 USItype __cbtmp; \
471 __asm__ ("ff1 %0,%1" \
472 : "=r" (__cbtmp) \
473 : "r" ((USItype)(x))); \
474 (count) = __cbtmp ^ 31; \
475 } while (0)
476 #if defined (__mc88110__)
477 #define umul_ppmm(w1, w0, u, v) \
478 __asm__ ("mulu.d r10,%2,%3
479 or %0,r10,0
480 or %1,r11,0" \
481 : "=r" (w1), \
482 "=r" (w0) \
483 : "r" ((USItype)(u)), \
484 "r" ((USItype)(v)) \
485 : "r10", "r11")
486 #define udiv_qrnnd(q, r, n1, n0, d) \
487 __asm__ ("or r10,%2,0
488 or r11,%3,0
489 divu.d r10,r10,%4
490 mulu %1,%4,r11
491 subu %1,%3,%1
492 or %0,r11,0" \
493 : "=r" (q), \
494 "=&r" (r) \
495 : "r" ((USItype)(n1)), \
496 "r" ((USItype)(n0)), \
497 "r" ((USItype)(d)) \
498 : "r10", "r11")
499 #endif
500 #endif /* __m88000__ */
501
502 #if defined (__mips__)
503 #define umul_ppmm(w1, w0, u, v) \
504 __asm__ ("multu %2,%3
505 mflo %0
506 mfhi %1" \
507 : "=d" ((USItype)(w0)), \
508 "=d" ((USItype)(w1)) \
509 : "d" ((USItype)(u)), \
510 "d" ((USItype)(v)))
511 #define UMUL_TIME 5
512 #define UDIV_TIME 100
513 #endif /* __mips__ */
514
515 #if defined (__ns32000__)
516 #define __umulsidi3(u, v) \
517 ({UDItype __w; \
518 __asm__ ("meid %2,%0" \
519 : "=g" (__w) \
520 : "%0" ((USItype)(u)), \
521 "g" ((USItype)(v))); \
522 __w; })
523 #define div_qrnnd(q, r, n1, n0, d) \
524 __asm__ ("movd %2,r0
525 movd %3,r1
526 deid %4,r0
527 movd r1,%0
528 movd r0,%1" \
529 : "=g" ((USItype)(q)), \
530 "=g" ((USItype)(r)) \
531 : "g" ((USItype)(n0)), \
532 "g" ((USItype)(n1)), \
533 "g" ((USItype)(d)) \
534 : "r0", "r1")
535 #endif /* __ns32000__ */
536
537 #if defined (__pyr__)
538 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
539 __asm__ ("addw %5,%1
540 addwc %3,%0" \
541 : "=r" ((USItype)(sh)), \
542 "=&r" ((USItype)(sl)) \
543 : "%0" ((USItype)(ah)), \
544 "g" ((USItype)(bh)), \
545 "%1" ((USItype)(al)), \
546 "g" ((USItype)(bl)))
547 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
548 __asm__ ("subw %5,%1
549 subwb %3,%0" \
550 : "=r" ((USItype)(sh)), \
551 "=&r" ((USItype)(sl)) \
552 : "0" ((USItype)(ah)), \
553 "g" ((USItype)(bh)), \
554 "1" ((USItype)(al)), \
555 "g" ((USItype)(bl)))
556 /* This insn doesn't work on ancient pyramids. */
557 #define umul_ppmm(w1, w0, u, v) \
558 ({union { \
559 UDItype __ll; \
560 struct {USItype __h, __l;} __i; \
561 } __xx; \
562 __xx.__i.__l = u; \
563 __asm__ ("uemul %3,%0" \
564 : "=r" (__xx.__i.__h), \
565 "=r" (__xx.__i.__l) \
566 : "1" (__xx.__i.__l), \
567 "g" ((UDItype)(v))); \
568 (w1) = __xx.__i.__h; \
569 (w0) = __xx.__i.__l;})
570 #endif /* __pyr__ */
571
572 #if defined (__ibm032__) /* RT/ROMP */
573 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
574 __asm__ ("a %1,%5
575 ae %0,%3" \
576 : "=r" ((USItype)(sh)), \
577 "=&r" ((USItype)(sl)) \
578 : "%0" ((USItype)(ah)), \
579 "r" ((USItype)(bh)), \
580 "%1" ((USItype)(al)), \
581 "r" ((USItype)(bl)))
582 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
583 __asm__ ("s %1,%5
584 se %0,%3" \
585 : "=r" ((USItype)(sh)), \
586 "=&r" ((USItype)(sl)) \
587 : "0" ((USItype)(ah)), \
588 "r" ((USItype)(bh)), \
589 "1" ((USItype)(al)), \
590 "r" ((USItype)(bl)))
591 #define umul_ppmm(ph, pl, m0, m1) \
592 do { \
593 USItype __m0 = (m0), __m1 = (m1); \
594 __asm__ ( \
595 "s r2,r2
596 mts r10,%2
597 m r2,%3
598 m r2,%3
599 m r2,%3
600 m r2,%3
601 m r2,%3
602 m r2,%3
603 m r2,%3
604 m r2,%3
605 m r2,%3
606 m r2,%3
607 m r2,%3
608 m r2,%3
609 m r2,%3
610 m r2,%3
611 m r2,%3
612 m r2,%3
613 cas %0,r2,r0
614 mfs r10,%1" \
615 : "=r" ((USItype)(ph)), \
616 "=r" ((USItype)(pl)) \
617 : "%r" (__m0), \
618 "r" (__m1) \
619 : "r2"); \
620 (ph) += ((((SItype) __m0 >> 31) & __m1) \
621 + (((SItype) __m1 >> 31) & __m0)); \
622 } while (0)
623 #define UMUL_TIME 20
624 #define UDIV_TIME 200
625 #define count_leading_zeros(count, x) \
626 do { \
627 if ((x) >= 0x10000) \
628 __asm__ ("clz %0,%1" \
629 : "=r" ((USItype)(count)) \
630 : "r" ((USItype)(x) >> 16)); \
631 else \
632 { \
633 __asm__ ("clz %0,%1" \
634 : "=r" ((USItype)(count)) \
635 : "r" ((USItype)(x))); \
636 (count) += 16; \
637 } \
638 } while (0)
639 #endif
640
641 #if defined (__sparc__)
642 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
643 __asm__ ("addcc %4,%5,%1
644 addx %2,%3,%0" \
645 : "=r" ((USItype)(sh)), \
646 "=&r" ((USItype)(sl)) \
647 : "%r" ((USItype)(ah)), \
648 "rI" ((USItype)(bh)), \
649 "%r" ((USItype)(al)), \
650 "rI" ((USItype)(bl)) \
651 __CLOBBER_CC)
652 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
653 __asm__ ("subcc %4,%5,%1
654 subx %2,%3,%0" \
655 : "=r" ((USItype)(sh)), \
656 "=&r" ((USItype)(sl)) \
657 : "r" ((USItype)(ah)), \
658 "rI" ((USItype)(bh)), \
659 "r" ((USItype)(al)), \
660 "rI" ((USItype)(bl)) \
661 __CLOBBER_CC)
662 #if defined (__sparcv8__)
663 #define umul_ppmm(w1, w0, u, v) \
664 __asm__ ("umul %2,%3,%1;rd %%y,%0" \
665 : "=r" ((USItype)(w1)), \
666 "=r" ((USItype)(w0)) \
667 : "r" ((USItype)(u)), \
668 "r" ((USItype)(v)))
669 #define udiv_qrnnd(q, r, n1, n0, d) \
670 __asm__ ("mov %2,%%y;nop;nop;nop;udiv %3,%4,%0;umul %0,%4,%1;sub %3,%1,%1"\
671 : "=&r" ((USItype)(q)), \
672 "=&r" ((USItype)(r)) \
673 : "r" ((USItype)(n1)), \
674 "r" ((USItype)(n0)), \
675 "r" ((USItype)(d)))
676 #else
677 /* SPARC without integer multiplication and divide instructions.
678 (i.e. at least Sun4/20,40,60,65,75,110,260,280,330,360,380,470,490) */
679 #define umul_ppmm(w1, w0, u, v) \
680 __asm__ ("! Inlined umul_ppmm
681 wr %%g0,%2,%%y ! SPARC has 0-3 delay insn after a wr
682 sra %3,31,%%g2 ! Don't move this insn
683 and %2,%%g2,%%g2 ! Don't move this insn
684 andcc %%g0,0,%%g1 ! Don't move this insn
685 mulscc %%g1,%3,%%g1
686 mulscc %%g1,%3,%%g1
687 mulscc %%g1,%3,%%g1
688 mulscc %%g1,%3,%%g1
689 mulscc %%g1,%3,%%g1
690 mulscc %%g1,%3,%%g1
691 mulscc %%g1,%3,%%g1
692 mulscc %%g1,%3,%%g1
693 mulscc %%g1,%3,%%g1
694 mulscc %%g1,%3,%%g1
695 mulscc %%g1,%3,%%g1
696 mulscc %%g1,%3,%%g1
697 mulscc %%g1,%3,%%g1
698 mulscc %%g1,%3,%%g1
699 mulscc %%g1,%3,%%g1
700 mulscc %%g1,%3,%%g1
701 mulscc %%g1,%3,%%g1
702 mulscc %%g1,%3,%%g1
703 mulscc %%g1,%3,%%g1
704 mulscc %%g1,%3,%%g1
705 mulscc %%g1,%3,%%g1
706 mulscc %%g1,%3,%%g1
707 mulscc %%g1,%3,%%g1
708 mulscc %%g1,%3,%%g1
709 mulscc %%g1,%3,%%g1
710 mulscc %%g1,%3,%%g1
711 mulscc %%g1,%3,%%g1
712 mulscc %%g1,%3,%%g1
713 mulscc %%g1,%3,%%g1
714 mulscc %%g1,%3,%%g1
715 mulscc %%g1,%3,%%g1
716 mulscc %%g1,%3,%%g1
717 mulscc %%g1,0,%%g1
718 add %%g1,%%g2,%0
719 rd %%y,%1" \
720 : "=r" ((USItype)(w1)), \
721 "=r" ((USItype)(w0)) \
722 : "%rI" ((USItype)(u)), \
723 "r" ((USItype)(v)) \
724 : "%g1", "%g2" __AND_CLOBBER_CC)
725 #define UMUL_TIME 39 /* 39 instructions */
726 /* It's quite necessary to add this much assembler for the sparc.
727 The default udiv_qrnnd (in C) is more than 10 times slower! */
728 #define udiv_qrnnd(q, r, n1, n0, d) \
729 __asm__ ("! Inlined udiv_qrnnd
730 mov 32,%%g1
731 subcc %1,%2,%%g0
732 1: bcs 5f
733 addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb
734 sub %1,%2,%1 ! this kills msb of n
735 addx %1,%1,%1 ! so this can't give carry
736 subcc %%g1,1,%%g1
737 2: bne 1b
738 subcc %1,%2,%%g0
739 bcs 3f
740 addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb
741 b 3f
742 sub %1,%2,%1 ! this kills msb of n
743 4: sub %1,%2,%1
744 5: addxcc %1,%1,%1
745 bcc 2b
746 subcc %%g1,1,%%g1
747 ! Got carry from n. Subtract next step to cancel this carry.
748 bne 4b
749 addcc %0,%0,%0 ! shift n1n0 and a 0-bit in lsb
750 sub %1,%2,%1
751 3: xnor %0,0,%0
752 ! End of inline udiv_qrnnd" \
753 : "=&r" ((USItype)(q)), \
754 "=&r" ((USItype)(r)) \
755 : "r" ((USItype)(d)), \
756 "1" ((USItype)(n1)), \
757 "0" ((USItype)(n0)) : "%g1" __AND_CLOBBER_CC)
758 #define UDIV_TIME (3+7*32) /* 7 instructions/iteration. 32 iterations. */
759 #endif /* __sparc8__ */
760 #endif /* __sparc__ */
761
762 #if defined (__vax__)
763 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
764 __asm__ ("addl2 %5,%1
765 adwc %3,%0" \
766 : "=g" ((USItype)(sh)), \
767 "=&g" ((USItype)(sl)) \
768 : "%0" ((USItype)(ah)), \
769 "g" ((USItype)(bh)), \
770 "%1" ((USItype)(al)), \
771 "g" ((USItype)(bl)))
772 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
773 __asm__ ("subl2 %5,%1
774 sbwc %3,%0" \
775 : "=g" ((USItype)(sh)), \
776 "=&g" ((USItype)(sl)) \
777 : "0" ((USItype)(ah)), \
778 "g" ((USItype)(bh)), \
779 "1" ((USItype)(al)), \
780 "g" ((USItype)(bl)))
781 #define umul_ppmm(xh, xl, m0, m1) \
782 do { \
783 union { \
784 UDItype __ll; \
785 struct {USItype __l, __h;} __i; \
786 } __xx; \
787 USItype __m0 = (m0), __m1 = (m1); \
788 __asm__ ("emul %1,%2,$0,%0" \
789 : "=r" (__xx.__ll) \
790 : "g" (__m0), \
791 "g" (__m1)); \
792 (xh) = __xx.__i.__h; \
793 (xl) = __xx.__i.__l; \
794 (xh) += ((((SItype) __m0 >> 31) & __m1) \
795 + (((SItype) __m1 >> 31) & __m0)); \
796 } while (0)
797 #endif /* __vax__ */
798
799 #endif /* __GNUC__ */
800
801 /* If this machine has no inline assembler, use C macros. */
802
803 #if !defined (add_ssaaaa)
804 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
805 do { \
806 USItype __x; \
807 __x = (al) + (bl); \
808 (sh) = (ah) + (bh) + (__x < (al)); \
809 (sl) = __x; \
810 } while (0)
811 #endif
812
813 #if !defined (sub_ddmmss)
814 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
815 do { \
816 USItype __x; \
817 __x = (al) - (bl); \
818 (sh) = (ah) - (bh) - (__x > (al)); \
819 (sl) = __x; \
820 } while (0)
821 #endif
822
823 #if !defined (umul_ppmm)
824 #define umul_ppmm(w1, w0, u, v) \
825 do { \
826 USItype __x0, __x1, __x2, __x3; \
827 USItype __ul, __vl, __uh, __vh; \
828 \
829 __ul = __ll_lowpart (u); \
830 __uh = __ll_highpart (u); \
831 __vl = __ll_lowpart (v); \
832 __vh = __ll_highpart (v); \
833 \
834 __x0 = (USItype) __ul * __vl; \
835 __x1 = (USItype) __ul * __vh; \
836 __x2 = (USItype) __uh * __vl; \
837 __x3 = (USItype) __uh * __vh; \
838 \
839 __x1 += __ll_highpart (__x0);/* this can't give carry */ \
840 __x1 += __x2; /* but this indeed can */ \
841 if (__x1 < __x2) /* did we get it? */ \
842 __x3 += __ll_B; /* yes, add it in the proper pos. */ \
843 \
844 (w1) = __x3 + __ll_highpart (__x1); \
845 (w0) = __ll_lowpart (__x1) * __ll_B + __ll_lowpart (__x0); \
846 } while (0)
847 #endif
848
849 #if !defined (__umulsidi3)
850 #define __umulsidi3(u, v) \
851 ({DIunion __w; \
852 umul_ppmm (__w.s.high, __w.s.low, u, v); \
853 __w.ll; })
854 #endif
855
856 /* Define this unconditionally, so it can be used for debugging. */
857 #define __udiv_qrnnd_c(q, r, n1, n0, d) \
858 do { \
859 USItype __d1, __d0, __q1, __q0; \
860 USItype __r1, __r0, __m; \
861 __d1 = __ll_highpart (d); \
862 __d0 = __ll_lowpart (d); \
863 \
864 __r1 = (n1) % __d1; \
865 __q1 = (n1) / __d1; \
866 __m = (USItype) __q1 * __d0; \
867 __r1 = __r1 * __ll_B | __ll_highpart (n0); \
868 if (__r1 < __m) \
869 { \
870 __q1--, __r1 += (d); \
871 if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */\
872 if (__r1 < __m) \
873 __q1--, __r1 += (d); \
874 } \
875 __r1 -= __m; \
876 \
877 __r0 = __r1 % __d1; \
878 __q0 = __r1 / __d1; \
879 __m = (USItype) __q0 * __d0; \
880 __r0 = __r0 * __ll_B | __ll_lowpart (n0); \
881 if (__r0 < __m) \
882 { \
883 __q0--, __r0 += (d); \
884 if (__r0 >= (d)) \
885 if (__r0 < __m) \
886 __q0--, __r0 += (d); \
887 } \
888 __r0 -= __m; \
889 \
890 (q) = (USItype) __q1 * __ll_B | __q0; \
891 (r) = __r0; \
892 } while (0)
893
894 /* If the processor has no udiv_qrnnd but sdiv_qrnnd, go through
895 __udiv_using_sdiv (defined in libgcc or elsewhere). */
896 #if !defined (udiv_qrnnd) && defined (sdiv_qrnnd)
897 #define udiv_qrnnd(q, r, nh, nl, d) \
898 do { \
899 USItype __r; \
900 (q) = __udiv_using_sdiv (&__r, nh, nl, d); \
901 (r) = __r; \
902 } while (0)
903 #endif
904
905 /* If udiv_qrnnd was not defined for this processor, use __udiv_qrnnd_c. */
906 #if !defined (udiv_qrnnd)
907 #define UDIV_NEEDS_NORMALIZATION 1
908 #define udiv_qrnnd __udiv_qrnnd_c
909 #endif
910
911 #if !defined (count_leading_zeros)
912 extern const UQItype __clz_tab[];
913 #define count_leading_zeros(count, x) \
914 do { \
915 USItype __xr = (x); \
916 USItype __a; \
917 \
918 if (LONG_TYPE_SIZE <= 32) \
919 { \
920 __a = __xr < (1<<2*__BITS4) \
921 ? (__xr < (1<<__BITS4) ? 0 : __BITS4) \
922 : (__xr < (1<<3*__BITS4) ? 2*__BITS4 : 3*__BITS4); \
923 } \
924 else \
925 { \
926 for (__a = LONG_TYPE_SIZE - 8; __a > 0; __a -= 8) \
927 if (((__xr >> __a) & 0xff) != 0) \
928 break; \
929 } \
930 \
931 (count) = LONG_TYPE_SIZE - (__clz_tab[__xr >> __a] + __a); \
932 } while (0)
933 #endif
934
935 #ifndef UDIV_NEEDS_NORMALIZATION
936 #define UDIV_NEEDS_NORMALIZATION 0
937 #endif