Patch from Enrico WEIGELT <weigelt@metux.de> to disable __cxa_atexit for those C libraries with no support (old uClibc).
2 ./longlong.h:423: error: parse error before '%' token
3 ./longlong.h:423: error: missing terminating " character
4 ./longlong.h:432: error: missing terminating " character
5 See also patches/glibc-2.1.3/glibc-2.1.3-allow-gcc3-longlong.patch
7 ===================================================================
8 --- glibc-2.2.2/stdlib/longlong.h.old 2000-02-11 15:48:58.000000000 -0800
9 +++ glibc-2.2.2/stdlib/longlong.h 2005-04-11 15:36:10.000000000 -0700
12 #if (defined (__a29k__) || defined (_AM29K)) && W_TYPE_SIZE == 32
13 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
14 - __asm__ ("add %1,%4,%5
16 + __asm__ ("add %1,%4,%5\n" \
18 : "=r" ((USItype) (sh)), \
19 "=&r" ((USItype) (sl)) \
20 : "%r" ((USItype) (ah)), \
22 "%r" ((USItype) (al)), \
23 "rI" ((USItype) (bl)))
24 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
25 - __asm__ ("sub %1,%4,%5
27 + __asm__ ("sub %1,%4,%5\n" \
29 : "=r" ((USItype) (sh)), \
30 "=&r" ((USItype) (sl)) \
31 : "r" ((USItype) (ah)), \
34 #if defined (__arc__) && W_TYPE_SIZE == 32
35 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
36 - __asm__ ("add.f %1, %4, %5
38 + __asm__ ("add.f %1, %4, %5\n" \
40 : "=r" ((USItype) (sh)), \
41 "=&r" ((USItype) (sl)) \
42 : "%r" ((USItype) (ah)), \
44 "%r" ((USItype) (al)), \
45 "rIJ" ((USItype) (bl)))
46 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
47 - __asm__ ("sub.f %1, %4, %5
49 + __asm__ ("sub.f %1, %4, %5\n" \
51 : "=r" ((USItype) (sh)), \
52 "=&r" ((USItype) (sl)) \
53 : "r" ((USItype) (ah)), \
56 #if defined (__arm__) && W_TYPE_SIZE == 32
57 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
58 - __asm__ ("adds %1, %4, %5
60 + __asm__ ("adds %1, %4, %5\n" \
62 : "=r" ((USItype) (sh)), \
63 "=&r" ((USItype) (sl)) \
64 : "%r" ((USItype) (ah)), \
66 "%r" ((USItype) (al)), \
67 "rI" ((USItype) (bl)))
68 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
69 - __asm__ ("subs %1, %4, %5
71 + __asm__ ("subs %1, %4, %5\n" \
73 : "=r" ((USItype) (sh)), \
74 "=&r" ((USItype) (sl)) \
75 : "r" ((USItype) (ah)), \
77 "rI" ((USItype) (bl)))
78 #define umul_ppmm(xh, xl, a, b) \
79 {register USItype __t0, __t1, __t2; \
80 - __asm__ ("%@ Inlined umul_ppmm
83 - bic %3, %5, %2, lsl #16
84 - bic %4, %6, %0, lsl #16
90 - addcs %0, %0, #65536
91 - adds %1, %1, %3, lsl #16
92 - adc %0, %0, %3, lsr #16" \
93 + __asm__ ("%@ Inlined umul_ppmm\n" \
94 + "mov %2, %5, lsr #16\n" \
95 + "mov %0, %6, lsr #16\n" \
96 + "bic %3, %5, %2, lsl #16\n" \
97 + "bic %4, %6, %0, lsl #16\n" \
98 + "mul %1, %3, %4\n" \
99 + "mul %4, %2, %4\n" \
100 + "mul %3, %0, %3\n" \
101 + "mul %0, %2, %0\n" \
102 + "adds %3, %4, %3\n" \
103 + "addcs %0, %0, #65536\n" \
104 + "adds %1, %1, %3, lsl #16\n" \
105 + "adc %0, %0, %3, lsr #16" \
106 : "=&r" ((USItype) (xh)), \
107 "=r" ((USItype) (xl)), \
108 "=&r" (__t0), "=&r" (__t1), "=r" (__t2) \
111 #if defined (__gmicro__) && W_TYPE_SIZE == 32
112 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
113 - __asm__ ("add.w %5,%1
115 + __asm__ ("add.w %5,%1\n" \
117 : "=g" ((USItype) (sh)), \
118 "=&g" ((USItype) (sl)) \
119 : "%0" ((USItype) (ah)), \
121 "%1" ((USItype) (al)), \
122 "g" ((USItype) (bl)))
123 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
124 - __asm__ ("sub.w %5,%1
126 + __asm__ ("sub.w %5,%1\n" \
128 : "=g" ((USItype) (sh)), \
129 "=&g" ((USItype) (sl)) \
130 : "0" ((USItype) (ah)), \
133 #if defined (__hppa) && W_TYPE_SIZE == 32
134 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
135 - __asm__ ("add %4,%5,%1
137 + __asm__ ("add %4,%5,%1\n" \
139 : "=r" ((USItype) (sh)), \
140 "=&r" ((USItype) (sl)) \
141 : "%rM" ((USItype) (ah)), \
143 "%rM" ((USItype) (al)), \
144 "rM" ((USItype) (bl)))
145 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
146 - __asm__ ("sub %4,%5,%1
148 + __asm__ ("sub %4,%5,%1\n" \
150 : "=r" ((USItype) (sh)), \
151 "=&r" ((USItype) (sl)) \
152 : "rM" ((USItype) (ah)), \
153 @@ -357,22 +357,22 @@
158 - extru,= %1,15,16,%%r0 ; Bits 31..16 zero?
159 - extru,tr %1,15,16,%1 ; No. Shift down, skip add.
160 - ldo 16(%0),%0 ; Yes. Perform add.
161 - extru,= %1,23,8,%%r0 ; Bits 15..8 zero?
162 - extru,tr %1,23,8,%1 ; No. Shift down, skip add.
163 - ldo 8(%0),%0 ; Yes. Perform add.
164 - extru,= %1,27,4,%%r0 ; Bits 7..4 zero?
165 - extru,tr %1,27,4,%1 ; No. Shift down, skip add.
166 - ldo 4(%0),%0 ; Yes. Perform add.
167 - extru,= %1,29,2,%%r0 ; Bits 3..2 zero?
168 - extru,tr %1,29,2,%1 ; No. Shift down, skip add.
169 - ldo 2(%0),%0 ; Yes. Perform add.
170 - extru %1,30,1,%1 ; Extract bit 1.
171 - sub %0,%1,%0 ; Subtract it.
172 - " : "=r" (count), "=r" (__tmp) : "1" (x)); \
174 + "extru,= %1,15,16,%%r0 ; Bits 31..16 zero?\n" \
175 + "extru,tr %1,15,16,%1 ; No. Shift down, skip add.\n" \
176 + "ldo 16(%0),%0 ; Yes. Perform add.\n" \
177 + "extru,= %1,23,8,%%r0 ; Bits 15..8 zero?\n" \
178 + "extru,tr %1,23,8,%1 ; No. Shift down, skip add.\n" \
179 + "ldo 8(%0),%0 ; Yes. Perform add.\n" \
180 + "extru,= %1,27,4,%%r0 ; Bits 7..4 zero?\n" \
181 + "extru,tr %1,27,4,%1 ; No. Shift down, skip add.\n" \
182 + "ldo 4(%0),%0 ; Yes. Perform add.\n" \
183 + "extru,= %1,29,2,%%r0 ; Bits 3..2 zero?\n" \
184 + "extru,tr %1,29,2,%1 ; No. Shift down, skip add.\n" \
185 + "ldo 2(%0),%0 ; Yes. Perform add.\n" \
186 + "extru %1,30,1,%1 ; Extract bit 1.\n" \
187 + "sub %0,%1,%0 ; Subtract it.\n" \
188 + : "=r" (count), "=r" (__tmp) : "1" (x)); \
194 #if (defined (__i386__) || defined (__i486__)) && W_TYPE_SIZE == 32
195 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
196 - __asm__ ("addl %5,%1
198 + __asm__ ("addl %5,%1\n" \
200 : "=r" ((USItype) (sh)), \
201 "=&r" ((USItype) (sl)) \
202 : "%0" ((USItype) (ah)), \
204 "%1" ((USItype) (al)), \
205 "g" ((USItype) (bl)))
206 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
207 - __asm__ ("subl %5,%1
209 + __asm__ ("subl %5,%1\n" \
211 : "=r" ((USItype) (sh)), \
212 "=&r" ((USItype) (sl)) \
213 : "0" ((USItype) (ah)), \
215 #if defined (__M32R__) && W_TYPE_SIZE == 32
216 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
217 /* The cmp clears the condition bit. */ \
218 - __asm__ ("cmp %0,%0
221 + __asm__ ("cmp %0,%0\n" \
224 : "=r" ((USItype) (sh)), \
225 "=&r" ((USItype) (sl)) \
226 : "%0" ((USItype) (ah)), \
229 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
230 /* The cmp clears the condition bit. */ \
231 - __asm__ ("cmp %0,%0
234 + __asm__ ("cmp %0,%0\n" \
237 : "=r" ((USItype) (sh)), \
238 "=&r" ((USItype) (sl)) \
239 : "0" ((USItype) (ah)), \
242 #if defined (__mc68000__) && W_TYPE_SIZE == 32
243 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
244 - __asm__ ("add%.l %5,%1
246 + __asm__ ("add%.l %5,%1\n" \
248 : "=d" ((USItype) (sh)), \
249 "=&d" ((USItype) (sl)) \
250 : "%0" ((USItype) (ah)), \
252 "%1" ((USItype) (al)), \
253 "g" ((USItype) (bl)))
254 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
255 - __asm__ ("sub%.l %5,%1
257 + __asm__ ("sub%.l %5,%1\n" \
259 : "=d" ((USItype) (sh)), \
260 "=&d" ((USItype) (sl)) \
261 : "0" ((USItype) (ah)), \
262 @@ -602,32 +602,32 @@
263 #if !defined(__mcf5200__)
264 /* %/ inserts REGISTER_PREFIX, %# inserts IMMEDIATE_PREFIX. */
265 #define umul_ppmm(xh, xl, a, b) \
266 - __asm__ ("| Inlined umul_ppmm
284 - add%.l %#65536,%/d1
292 + __asm__ ("| Inlined umul_ppmm\n" \
293 + "move%.l %2,%/d0\n" \
294 + "move%.l %3,%/d1\n" \
295 + "move%.l %/d0,%/d2\n" \
297 + "move%.l %/d1,%/d3\n" \
299 + "move%.w %/d2,%/d4\n" \
300 + "mulu %/d3,%/d4\n" \
301 + "mulu %/d1,%/d2\n" \
302 + "mulu %/d0,%/d3\n" \
303 + "mulu %/d0,%/d1\n" \
304 + "move%.l %/d4,%/d0\n" \
305 + "eor%.w %/d0,%/d0\n" \
307 + "add%.l %/d0,%/d2\n" \
308 + "add%.l %/d3,%/d2\n" \
310 + "add%.l %#65536,%/d1\n" \
312 + "moveq %#0,%/d0\n" \
313 + "move%.w %/d2,%/d0\n" \
314 + "move%.w %/d4,%/d2\n" \
315 + "move%.l %/d2,%1\n" \
316 + "add%.l %/d1,%/d0\n" \
317 + "move%.l %/d0,%0" \
318 : "=g" ((USItype) (xh)), \
319 "=g" ((USItype) (xl)) \
320 : "g" ((USItype) (a)), \
323 #if defined (__m88000__) && W_TYPE_SIZE == 32
324 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
325 - __asm__ ("addu.co %1,%r4,%r5
326 - addu.ci %0,%r2,%r3" \
327 + __asm__ ("addu.co %1,%r4,%r5\n" \
328 + "addu.ci %0,%r2,%r3" \
329 : "=r" ((USItype) (sh)), \
330 "=&r" ((USItype) (sl)) \
331 : "%rJ" ((USItype) (ah)), \
333 "%rJ" ((USItype) (al)), \
334 "rJ" ((USItype) (bl)))
335 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
336 - __asm__ ("subu.co %1,%r4,%r5
337 - subu.ci %0,%r2,%r3" \
338 + __asm__ ("subu.co %1,%r4,%r5\n" \
339 + "subu.ci %0,%r2,%r3" \
340 : "=r" ((USItype) (sh)), \
341 "=&r" ((USItype) (sl)) \
342 : "rJ" ((USItype) (ah)), \
345 #if defined (__pyr__) && W_TYPE_SIZE == 32
346 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
347 - __asm__ ("addw %5,%1
349 + __asm__ ("addw %5,%1\n" \
351 : "=r" ((USItype) (sh)), \
352 "=&r" ((USItype) (sl)) \
353 : "%0" ((USItype) (ah)), \
355 "%1" ((USItype) (al)), \
356 "g" ((USItype) (bl)))
357 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
358 - __asm__ ("subw %5,%1
360 + __asm__ ("subw %5,%1\n" \
362 : "=r" ((USItype) (sh)), \
363 "=&r" ((USItype) (sl)) \
364 : "0" ((USItype) (ah)), \
366 ({union {UDItype __ll; \
367 struct {USItype __h, __l;} __i; \
369 - __asm__ ("movw %1,%R0
371 + __asm__ ("movw %1,%R0\n" \
373 : "=&r" (__xx.__ll) \
374 : "g" ((USItype) (u)), \
375 "g" ((USItype) (v))); \
378 #if defined (__ibm032__) /* RT/ROMP */ && W_TYPE_SIZE == 32
379 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
382 + __asm__ ("a %1,%5\n" \
384 : "=r" ((USItype) (sh)), \
385 "=&r" ((USItype) (sl)) \
386 : "%0" ((USItype) (ah)), \
388 "%1" ((USItype) (al)), \
389 "r" ((USItype) (bl)))
390 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
393 + __asm__ ("s %1,%5\n" \
395 : "=r" ((USItype) (sh)), \
396 "=&r" ((USItype) (sl)) \
397 : "0" ((USItype) (ah)), \
398 @@ -933,26 +933,26 @@
400 USItype __m0 = (m0), __m1 = (m1); \
442 : "=r" ((USItype) (ph)), \
443 "=r" ((USItype) (pl)) \
446 #if defined (__sh2__) && W_TYPE_SIZE == 32
447 #define umul_ppmm(w1, w0, u, v) \
452 + "dmulu.l %2,%3\n" \
455 : "=r" ((USItype)(w1)), \
456 "=r" ((USItype)(w0)) \
457 : "r" ((USItype)(u)), \
459 #if defined (__sparc__) && !defined(__arch64__) \
460 && !defined(__sparcv9) && W_TYPE_SIZE == 32
461 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
462 - __asm__ ("addcc %r4,%5,%1
464 + __asm__ ("addcc %r4,%5,%1\n" \
466 : "=r" ((USItype) (sh)), \
467 "=&r" ((USItype) (sl)) \
468 : "%rJ" ((USItype) (ah)), \
469 @@ -1006,8 +1006,8 @@
470 "rI" ((USItype) (bl)) \
472 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
473 - __asm__ ("subcc %r4,%5,%1
475 + __asm__ ("subcc %r4,%5,%1\n" \
477 : "=r" ((USItype) (sh)), \
478 "=&r" ((USItype) (sl)) \
479 : "rJ" ((USItype) (ah)), \
480 @@ -1040,45 +1040,45 @@
481 : "r" ((USItype) (u)), \
483 #define udiv_qrnnd(q, r, n1, n0, d) \
484 - __asm__ ("! Inlined udiv_qrnnd
485 - wr %%g0,%2,%%y ! Not a delayed write for sparclite
488 - divscc %%g1,%4,%%g1
489 - divscc %%g1,%4,%%g1
490 - divscc %%g1,%4,%%g1
491 - divscc %%g1,%4,%%g1
492 - divscc %%g1,%4,%%g1
493 - divscc %%g1,%4,%%g1
494 - divscc %%g1,%4,%%g1
495 - divscc %%g1,%4,%%g1
496 - divscc %%g1,%4,%%g1
497 - divscc %%g1,%4,%%g1
498 - divscc %%g1,%4,%%g1
499 - divscc %%g1,%4,%%g1
500 - divscc %%g1,%4,%%g1
501 - divscc %%g1,%4,%%g1
502 - divscc %%g1,%4,%%g1
503 - divscc %%g1,%4,%%g1
504 - divscc %%g1,%4,%%g1
505 - divscc %%g1,%4,%%g1
506 - divscc %%g1,%4,%%g1
507 - divscc %%g1,%4,%%g1
508 - divscc %%g1,%4,%%g1
509 - divscc %%g1,%4,%%g1
510 - divscc %%g1,%4,%%g1
511 - divscc %%g1,%4,%%g1
512 - divscc %%g1,%4,%%g1
513 - divscc %%g1,%4,%%g1
514 - divscc %%g1,%4,%%g1
515 - divscc %%g1,%4,%%g1
516 - divscc %%g1,%4,%%g1
517 - divscc %%g1,%4,%%g1
522 -1: ! End of inline udiv_qrnnd" \
523 + __asm__ ("! Inlined udiv_qrnnd\n" \
524 + "wr %%g0,%2,%%y ! Not a delayed write for sparclite\n" \
526 + "divscc %3,%4,%%g1\n" \
527 + "divscc %%g1,%4,%%g1\n" \
528 + "divscc %%g1,%4,%%g1\n" \
529 + "divscc %%g1,%4,%%g1\n" \
530 + "divscc %%g1,%4,%%g1\n" \
531 + "divscc %%g1,%4,%%g1\n" \
532 + "divscc %%g1,%4,%%g1\n" \
533 + "divscc %%g1,%4,%%g1\n" \
534 + "divscc %%g1,%4,%%g1\n" \
535 + "divscc %%g1,%4,%%g1\n" \
536 + "divscc %%g1,%4,%%g1\n" \
537 + "divscc %%g1,%4,%%g1\n" \
538 + "divscc %%g1,%4,%%g1\n" \
539 + "divscc %%g1,%4,%%g1\n" \
540 + "divscc %%g1,%4,%%g1\n" \
541 + "divscc %%g1,%4,%%g1\n" \
542 + "divscc %%g1,%4,%%g1\n" \
543 + "divscc %%g1,%4,%%g1\n" \
544 + "divscc %%g1,%4,%%g1\n" \
545 + "divscc %%g1,%4,%%g1\n" \
546 + "divscc %%g1,%4,%%g1\n" \
547 + "divscc %%g1,%4,%%g1\n" \
548 + "divscc %%g1,%4,%%g1\n" \
549 + "divscc %%g1,%4,%%g1\n" \
550 + "divscc %%g1,%4,%%g1\n" \
551 + "divscc %%g1,%4,%%g1\n" \
552 + "divscc %%g1,%4,%%g1\n" \
553 + "divscc %%g1,%4,%%g1\n" \
554 + "divscc %%g1,%4,%%g1\n" \
555 + "divscc %%g1,%4,%%g1\n" \
556 + "divscc %%g1,%4,%%g1\n" \
557 + "divscc %%g1,%4,%0\n" \
561 +"1: ! End of inline udiv_qrnnd" \
562 : "=r" ((USItype) (q)), \
563 "=r" ((USItype) (r)) \
564 : "r" ((USItype) (n1)), \
565 @@ -1099,46 +1099,46 @@
566 /* SPARC without integer multiplication and divide instructions.
567 (i.e. at least Sun4/20,40,60,65,75,110,260,280,330,360,380,470,490) */
568 #define umul_ppmm(w1, w0, u, v) \
569 - __asm__ ("! Inlined umul_ppmm
570 - wr %%g0,%2,%%y ! SPARC has 0-3 delay insn after a wr
571 - sra %3,31,%%o5 ! Don't move this insn
572 - and %2,%%o5,%%o5 ! Don't move this insn
573 - andcc %%g0,0,%%g1 ! Don't move this insn
574 - mulscc %%g1,%3,%%g1
575 - mulscc %%g1,%3,%%g1
576 - mulscc %%g1,%3,%%g1
577 - mulscc %%g1,%3,%%g1
578 - mulscc %%g1,%3,%%g1
579 - mulscc %%g1,%3,%%g1
580 - mulscc %%g1,%3,%%g1
581 - mulscc %%g1,%3,%%g1
582 - mulscc %%g1,%3,%%g1
583 - mulscc %%g1,%3,%%g1
584 - mulscc %%g1,%3,%%g1
585 - mulscc %%g1,%3,%%g1
586 - mulscc %%g1,%3,%%g1
587 - mulscc %%g1,%3,%%g1
588 - mulscc %%g1,%3,%%g1
589 - mulscc %%g1,%3,%%g1
590 - mulscc %%g1,%3,%%g1
591 - mulscc %%g1,%3,%%g1
592 - mulscc %%g1,%3,%%g1
593 - mulscc %%g1,%3,%%g1
594 - mulscc %%g1,%3,%%g1
595 - mulscc %%g1,%3,%%g1
596 - mulscc %%g1,%3,%%g1
597 - mulscc %%g1,%3,%%g1
598 - mulscc %%g1,%3,%%g1
599 - mulscc %%g1,%3,%%g1
600 - mulscc %%g1,%3,%%g1
601 - mulscc %%g1,%3,%%g1
602 - mulscc %%g1,%3,%%g1
603 - mulscc %%g1,%3,%%g1
604 - mulscc %%g1,%3,%%g1
605 - mulscc %%g1,%3,%%g1
609 + __asm__ ("! Inlined umul_ppmm\n" \
610 + "wr %%g0,%2,%%y ! SPARC has 0-3 delay insn after a wr\n" \
611 + "sra %3,31,%%o5 ! Don't move this insn\n" \
612 + "and %2,%%o5,%%o5 ! Don't move this insn\n" \
613 + "andcc %%g0,0,%%g1 ! Don't move this insn\n" \
614 + "mulscc %%g1,%3,%%g1\n" \
615 + "mulscc %%g1,%3,%%g1\n" \
616 + "mulscc %%g1,%3,%%g1\n" \
617 + "mulscc %%g1,%3,%%g1\n" \
618 + "mulscc %%g1,%3,%%g1\n" \
619 + "mulscc %%g1,%3,%%g1\n" \
620 + "mulscc %%g1,%3,%%g1\n" \
621 + "mulscc %%g1,%3,%%g1\n" \
622 + "mulscc %%g1,%3,%%g1\n" \
623 + "mulscc %%g1,%3,%%g1\n" \
624 + "mulscc %%g1,%3,%%g1\n" \
625 + "mulscc %%g1,%3,%%g1\n" \
626 + "mulscc %%g1,%3,%%g1\n" \
627 + "mulscc %%g1,%3,%%g1\n" \
628 + "mulscc %%g1,%3,%%g1\n" \
629 + "mulscc %%g1,%3,%%g1\n" \
630 + "mulscc %%g1,%3,%%g1\n" \
631 + "mulscc %%g1,%3,%%g1\n" \
632 + "mulscc %%g1,%3,%%g1\n" \
633 + "mulscc %%g1,%3,%%g1\n" \
634 + "mulscc %%g1,%3,%%g1\n" \
635 + "mulscc %%g1,%3,%%g1\n" \
636 + "mulscc %%g1,%3,%%g1\n" \
637 + "mulscc %%g1,%3,%%g1\n" \
638 + "mulscc %%g1,%3,%%g1\n" \
639 + "mulscc %%g1,%3,%%g1\n" \
640 + "mulscc %%g1,%3,%%g1\n" \
641 + "mulscc %%g1,%3,%%g1\n" \
642 + "mulscc %%g1,%3,%%g1\n" \
643 + "mulscc %%g1,%3,%%g1\n" \
644 + "mulscc %%g1,%3,%%g1\n" \
645 + "mulscc %%g1,%3,%%g1\n" \
646 + "mulscc %%g1,0,%%g1\n" \
647 + "add %%g1,%%o5,%0\n" \
649 : "=r" ((USItype) (w1)), \
650 "=r" ((USItype) (w0)) \
651 : "%rI" ((USItype) (u)), \
652 @@ -1148,30 +1148,30 @@
653 /* It's quite necessary to add this much assembler for the sparc.
654 The default udiv_qrnnd (in C) is more than 10 times slower! */
655 #define udiv_qrnnd(q, r, n1, n0, d) \
656 - __asm__ ("! Inlined udiv_qrnnd
660 - addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb
661 - sub %1,%2,%1 ! this kills msb of n
662 - addx %1,%1,%1 ! so this can't give carry
667 - addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb
669 - sub %1,%2,%1 ! this kills msb of n
674 -! Got carry from n. Subtract next step to cancel this carry.
676 - addcc %0,%0,%0 ! shift n1n0 and a 0-bit in lsb
679 - ! End of inline udiv_qrnnd" \
680 + __asm__ ("! Inlined udiv_qrnnd\n" \
682 + "subcc %1,%2,%%g0\n" \
684 + "addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb\n" \
685 + "sub %1,%2,%1 ! this kills msb of n\n" \
686 + "addx %1,%1,%1 ! so this can't give carry\n" \
687 + "subcc %%g1,1,%%g1\n" \
689 + "subcc %1,%2,%%g0\n" \
691 + "addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb\n" \
693 + "sub %1,%2,%1 ! this kills msb of n\n" \
694 +"4: sub %1,%2,%1\n" \
695 +"5: addxcc %1,%1,%1\n" \
697 + "subcc %%g1,1,%%g1\n" \
698 +"! Got carry from n. Subtract next step to cancel this carry.\n" \
700 + "addcc %0,%0,%0 ! shift n1n0 and a 0-bit in lsb\n" \
702 +"3: xnor %0,0,%0\n" \
703 + "! End of inline udiv_qrnnd" \
704 : "=&r" ((USItype) (q)), \
705 "=&r" ((USItype) (r)) \
706 : "r" ((USItype) (d)), \
707 @@ -1185,11 +1185,11 @@
708 #if ((defined (__sparc__) && defined (__arch64__)) \
709 || defined (__sparcv9)) && W_TYPE_SIZE == 64
710 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
711 - __asm__ ("addcc %r4,%5,%1
716 + __asm__ ("addcc %r4,%5,%1\n" \
717 + "add %r2,%3,%0\n" \
718 + "bcs,a,pn %%xcc, 1f\n" \
719 + "add %0, 1, %0\n" \
721 : "=r" ((UDItype)(sh)), \
722 "=&r" ((UDItype)(sl)) \
723 : "%rJ" ((UDItype)(ah)), \
724 @@ -1199,11 +1199,11 @@
727 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
728 - __asm__ ("subcc %r4,%5,%1
733 + __asm__ ("subcc %r4,%5,%1\n" \
734 + "sub %r2,%3,%0\n" \
735 + "bcs,a,pn %%xcc, 1f\n" \
736 + "sub %0, 1, %0\n" \
738 : "=r" ((UDItype)(sh)), \
739 "=&r" ((UDItype)(sl)) \
740 : "rJ" ((UDItype)(ah)), \
741 @@ -1216,27 +1216,27 @@
743 UDItype tmp1, tmp2, tmp3, tmp4; \
744 __asm__ __volatile__ ( \
757 - sethi %%hi(0x80000000),%2
761 - movcc %%xcc,%%g0,%2
767 + "mulx %3,%6,%1\n" \
768 + "srlx %6,32,%2\n" \
769 + "mulx %2,%3,%4\n" \
770 + "sllx %4,32,%5\n" \
773 + "srlx %5,32,%5\n" \
774 + "addcc %4,%5,%4\n" \
775 + "srlx %7,32,%5\n" \
776 + "mulx %3,%5,%3\n" \
777 + "mulx %2,%5,%5\n" \
778 + "sethi %%hi(0x80000000),%2\n" \
779 + "addcc %4,%3,%4\n" \
780 + "srlx %4,32,%4\n" \
782 + "movcc %%xcc,%%g0,%2\n" \
783 + "addcc %5,%4,%5\n" \
784 + "sllx %3,32,%3\n" \
787 : "=r" ((UDItype)(wh)), \
788 "=&r" ((UDItype)(wl)), \
789 "=&r" (tmp1), "=&r" (tmp2), "=&r" (tmp3), "=&r" (tmp4) \
790 @@ -1250,8 +1250,8 @@
792 #if defined (__vax__) && W_TYPE_SIZE == 32
793 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
794 - __asm__ ("addl2 %5,%1
796 + __asm__ ("addl2 %5,%1\n" \
798 : "=g" ((USItype) (sh)), \
799 "=&g" ((USItype) (sl)) \
800 : "%0" ((USItype) (ah)), \
801 @@ -1259,8 +1259,8 @@
802 "%1" ((USItype) (al)), \
803 "g" ((USItype) (bl)))
804 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
805 - __asm__ ("subl2 %5,%1
807 + __asm__ ("subl2 %5,%1\n" \
809 : "=g" ((USItype) (sh)), \
810 "=&g" ((USItype) (sl)) \
811 : "0" ((USItype) (ah)), \