A Discrete-Event Network Simulator
API
cairo-wideint.c
Go to the documentation of this file.
1 /* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
2 /* cairo - a vector graphics library with display and print output
3  *
4  * Copyright © 2004 Keith Packard
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation;
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18  *
19  * The original code as contributed to the cairo library under
20  * the dual license MPL+LGPL. We used the LGPL relicensing clause to
21  * get a GPL version of this code which now lives here. This header is
22  * unmodified other than the licensing clause.
23  *
24  * The Original Code is the cairo graphics library.
25  *
26  * The Initial Developer of the Original Code is Keith Packard
27  *
28  * Contributor(s):
29  * Keith R. Packard <keithp@keithp.com>
30  *
31  * Code changes for ns-3 from upstream are marked with `//PDB'
32  */
33 
34 #include "cairo-wideint-private.h"
35 
42 #if HAVE_UINT64_T
43 
44 const char * cairo_impl64 = "uint64_t";
45 
46 #define _cairo_uint32s_to_uint64(h,l) ((uint64_t) (h) << 32 | (l))
47 
50 {
52 
53  qr.quo = num / den;
54  qr.rem = num % den;
55  return qr;
56 }
57 
58 #else
59 
60 const char * cairo_impl64 = "uint32_t";
61 
63 _cairo_uint32_to_uint64 (uint32_t i)
64 {
66 
67  q.lo = i;
68  q.hi = 0;
69  return q;
70 }
71 
73 _cairo_int32_to_int64 (int32_t i)
74 {
76 
77  q.lo = i;
78  q.hi = i < 0 ? -1 : 0;
79  return q;
80 }
81 
82 static cairo_uint64_t
83 _cairo_uint32s_to_uint64 (uint32_t h, uint32_t l)
84 {
86 
87  q.lo = l;
88  q.hi = h;
89  return q;
90 }
91 
94 {
96 
97  s.hi = a.hi + b.hi;
98  s.lo = a.lo + b.lo;
99  if (s.lo < a.lo)
100  s.hi++;
101  return s;
102 }
103 
106 {
107  cairo_uint64_t s;
108 
109  s.hi = a.hi - b.hi;
110  s.lo = a.lo - b.lo;
111  if (s.lo > a.lo)
112  s.hi--;
113  return s;
114 }
115 
116 #define uint32_lo(i) ((i) & 0xffff)
117 #define uint32_hi(i) ((i) >> 16)
118 #define uint32_carry16 ((1) << 16)
119 
121 _cairo_uint32x32_64_mul (uint32_t a, uint32_t b)
122 {
123  cairo_uint64_t s;
124 
125  uint16_t ah, al, bh, bl;
126  uint32_t r0, r1, r2, r3;
127 
128  al = uint32_lo (a);
129  ah = uint32_hi (a);
130  bl = uint32_lo (b);
131  bh = uint32_hi (b);
132 
133  r0 = (uint32_t) al * bl;
134  r1 = (uint32_t) al * bh;
135  r2 = (uint32_t) ah * bl;
136  r3 = (uint32_t) ah * bh;
137 
138  r1 += uint32_hi(r0); /* no carry possible */
139  r1 += r2; /* but this can carry */
140  if (r1 < r2) /* check */
141  r3 += uint32_carry16;
142 
143  s.hi = r3 + uint32_hi(r1);
144  s.lo = (uint32_lo (r1) << 16) + uint32_lo (r0);
145  return s;
146 }
147 
149 _cairo_int32x32_64_mul (int32_t a, int32_t b)
150 {
151  cairo_int64_t s;
152  s = _cairo_uint32x32_64_mul ((uint32_t) a, (uint32_t) b);
153  if (a < 0)
154  s.hi -= b;
155  if (b < 0)
156  s.hi -= a;
157  return s;
158 }
159 
162 {
163  cairo_uint64_t s;
164 
165  s = _cairo_uint32x32_64_mul (a.lo, b.lo);
166  s.hi += a.lo * b.hi + a.hi * b.lo;
167  return s;
168 }
169 
171 _cairo_uint64_lsl (cairo_uint64_t a, int shift)
172 {
173  if (shift >= 32)
174  {
175  a.hi = a.lo;
176  a.lo = 0;
177  shift -= 32;
178  }
179  if (shift)
180  {
181  a.hi = a.hi << shift | a.lo >> (32 - shift);
182  a.lo = a.lo << shift;
183  }
184  return a;
185 }
186 
188 _cairo_uint64_rsl (cairo_uint64_t a, int shift)
189 {
190  if (shift >= 32)
191  {
192  a.lo = a.hi;
193  a.hi = 0;
194  shift -= 32;
195  }
196  if (shift)
197  {
198  a.lo = a.lo >> shift | a.hi << (32 - shift);
199  a.hi = a.hi >> shift;
200  }
201  return a;
202 }
203 
204 #define _cairo_uint32_rsa(a,n) ((uint32_t) (((int32_t) (a)) >> (n)))
205 
207 _cairo_uint64_rsa (cairo_int64_t a, int shift)
208 {
209  if (shift >= 32)
210  {
211  a.lo = a.hi;
212  a.hi = _cairo_uint32_rsa (a.hi, 31);
213  shift -= 32;
214  }
215  if (shift)
216  {
217  a.lo = a.lo >> shift | a.hi << (32 - shift);
218  a.hi = _cairo_uint32_rsa (a.hi, shift);
219  }
220  return a;
221 }
222 
223 int
225 {
226  return (a.hi < b.hi ||
227  (a.hi == b.hi && a.lo < b.lo));
228 }
229 
230 int
232 {
233  return a.hi == b.hi && a.lo == b.lo;
234 }
235 
236 int
238 {
240  return 1;
242  return 0;
243  return _cairo_uint64_lt (a, b);
244 }
245 
248 {
249  a.lo = ~a.lo;
250  a.hi = ~a.hi;
251  return a;
252 }
253 
256 {
257  a.lo = ~a.lo;
258  a.hi = ~a.hi;
259  if (++a.lo == 0)
260  ++a.hi;
261  return a;
262 }
263 
264 /*
265  * Simple bit-at-a-time divide.
266  */
269 {
271  cairo_uint64_t bit;
272  cairo_uint64_t quo;
273 
274  bit = _cairo_uint32_to_uint64 (1);
275 
276  /* normalize to make den >= num, but not overflow */
277  while (_cairo_uint64_lt (den, num) && (den.hi & 0x80000000) == 0)
278  {
279  bit = _cairo_uint64_lsl (bit, 1);
280  den = _cairo_uint64_lsl (den, 1);
281  }
282  quo = _cairo_uint32_to_uint64 (0);
283 
284  /* generate quotient, one bit at a time */
285  while (bit.hi | bit.lo)
286  {
287  if (_cairo_uint64_le (den, num))
288  {
289  num = _cairo_uint64_sub (num, den);
290  quo = _cairo_uint64_add (quo, bit);
291  }
292  bit = _cairo_uint64_rsl (bit, 1);
293  den = _cairo_uint64_rsl (den, 1);
294  }
295  qr.quo = quo;
296  qr.rem = num;
297  return qr;
298 }
299 
300 #endif /* !HAVE_UINT64_T */
301 
304 {
305  int num_neg = _cairo_int64_negative (num);
306  int den_neg = _cairo_int64_negative (den);
307  cairo_uquorem64_t uqr;
308  cairo_quorem64_t qr;
309 
310  if (num_neg)
311  num = _cairo_int64_negate (num);
312  if (den_neg)
313  den = _cairo_int64_negate (den);
314  uqr = _cairo_uint64_divrem (num, den);
315  if (num_neg)
316  qr.rem = _cairo_int64_negate ((cairo_int64_t)uqr.rem); //PDB cast
317  else
318  qr.rem = uqr.rem;
319  if (num_neg != den_neg)
320  qr.quo = (cairo_int64_t) _cairo_int64_negate ((cairo_int64_t)uqr.quo); //PDB cast
321  else
322  qr.quo = (cairo_int64_t) uqr.quo;
323  return qr;
324 }
325 
326 #if HAVE_UINT128_T
327 
328 const char * cairo_impl128 = "uint128_t";
329 
331 _cairo_uint128_divrem (cairo_uint128_t num, cairo_uint128_t den)
332 {
334 
335  qr.quo = num / den;
336  qr.rem = num % den;
337  return qr;
338 }
339 
340 #else
341 
342 const char * cairo_impl128 = "cairo_uint64_t";
343 
344 cairo_uint128_t
346 {
347  cairo_uint128_t q;
348 
349  q.lo = _cairo_uint32_to_uint64 (i);
350  q.hi = _cairo_uint32_to_uint64 (0);
351  return q;
352 }
353 
356 {
357  cairo_int128_t q;
358 
359  q.lo = _cairo_int32_to_int64 (i);
360  q.hi = _cairo_int32_to_int64 (i < 0 ? -1 : 0);
361  return q;
362 }
363 
364 cairo_uint128_t
366 {
367  cairo_uint128_t q;
368 
369  q.lo = i;
370  q.hi = _cairo_uint32_to_uint64 (0);
371  return q;
372 }
373 
376 {
377  cairo_int128_t q;
378 
379  q.lo = i;
381  return q;
382 }
383 
384 cairo_uint128_t
385 _cairo_uint128_add (cairo_uint128_t a, cairo_uint128_t b)
386 {
387  cairo_uint128_t s;
388 
389  s.hi = _cairo_uint64_add (a.hi, b.hi);
390  s.lo = _cairo_uint64_add (a.lo, b.lo);
391  if (_cairo_uint64_lt (s.lo, a.lo))
392  s.hi = _cairo_uint64_add (s.hi, _cairo_uint32_to_uint64 (1));
393  return s;
394 }
395 
396 cairo_uint128_t
397 _cairo_uint128_sub (cairo_uint128_t a, cairo_uint128_t b)
398 {
399  cairo_uint128_t s;
400 
401  s.hi = _cairo_uint64_sub (a.hi, b.hi);
402  s.lo = _cairo_uint64_sub (a.lo, b.lo);
403  if (_cairo_uint64_gt (s.lo, a.lo))
404  s.hi = _cairo_uint64_sub (s.hi, _cairo_uint32_to_uint64(1));
405  return s;
406 }
407 
408 #if HAVE_UINT64_T
409 
410 #define uint64_lo32(i) ((i) & 0xffffffff)
411 #define uint64_hi32(i) ((i) >> 32)
412 #define uint64_lo(i) ((i) & 0xffffffff)
413 #define uint64_hi(i) ((i) >> 32)
414 #define uint64_shift32(i) ((i) << 32)
415 #define uint64_carry32 (((uint64_t) 1) << 32)
416 
417 #else
418 
419 #define uint64_lo32(i) ((i).lo)
420 #define uint64_hi32(i) ((i).hi)
421 
422 static cairo_uint64_t
424 {
425  cairo_uint64_t s;
426 
427  s.lo = i.lo;
428  s.hi = 0;
429  return s;
430 }
431 
432 static cairo_uint64_t
434 {
435  cairo_uint64_t s;
436 
437  s.lo = i.hi;
438  s.hi = 0;
439  return s;
440 }
441 
442 static cairo_uint64_t
444 {
445  cairo_uint64_t s;
446 
447  s.lo = 0;
448  s.hi = i.lo;
449  return s;
450 }
451 
452 static const cairo_uint64_t uint64_carry32 = { 0, 1 };
453 
454 #endif
455 
456 cairo_uint128_t
458 {
459  cairo_uint128_t s;
460  uint32_t ah, al, bh, bl;
461  cairo_uint64_t r0, r1, r2, r3;
462 
463  al = uint64_lo32 (a);
464  ah = uint64_hi32 (a);
465  bl = uint64_lo32 (b);
466  bh = uint64_hi32 (b);
467 
468  r0 = _cairo_uint32x32_64_mul (al, bl);
469  r1 = _cairo_uint32x32_64_mul (al, bh);
470  r2 = _cairo_uint32x32_64_mul (ah, bl);
471  r3 = _cairo_uint32x32_64_mul (ah, bh);
472 
473  r1 = _cairo_uint64_add (r1, uint64_hi (r0)); /* no carry possible */
474  r1 = _cairo_uint64_add (r1, r2); /* but this can carry */
475  if (_cairo_uint64_lt (r1, r2)) /* check */
477 
478  s.hi = _cairo_uint64_add (r3, uint64_hi(r1));
479  s.lo = _cairo_uint64_add (uint64_shift32 (r1),
480  uint64_lo (r0));
481  return s;
482 }
483 
486 {
487  cairo_int128_t s;
490  if (_cairo_int64_negative (a))
491  s.hi = _cairo_uint64_sub (s.hi,
493  if (_cairo_int64_negative (b))
494  s.hi = _cairo_uint64_sub (s.hi,
496  return s;
497 }
498 
499 cairo_uint128_t
500 _cairo_uint128_mul (cairo_uint128_t a, cairo_uint128_t b)
501 {
502  cairo_uint128_t s;
503 
504  s = _cairo_uint64x64_128_mul (a.lo, b.lo);
505  s.hi = _cairo_uint64_add (s.hi,
506  _cairo_uint64_mul (a.lo, b.hi));
507  s.hi = _cairo_uint64_add (s.hi,
508  _cairo_uint64_mul (a.hi, b.lo));
509  return s;
510 }
511 
512 cairo_uint128_t
513 _cairo_uint128_lsl (cairo_uint128_t a, int shift)
514 {
515  if (shift >= 64)
516  {
517  a.hi = a.lo;
518  a.lo = _cairo_uint32_to_uint64 (0);
519  shift -= 64;
520  }
521  if (shift)
522  {
523  a.hi = _cairo_uint64_add (_cairo_uint64_lsl (a.hi, shift),
524  _cairo_uint64_rsl (a.lo, (64 - shift)));
525  a.lo = _cairo_uint64_lsl (a.lo, shift);
526  }
527  return a;
528 }
529 
530 cairo_uint128_t
531 _cairo_uint128_rsl (cairo_uint128_t a, int shift)
532 {
533  if (shift >= 64)
534  {
535  a.lo = a.hi;
536  a.hi = _cairo_uint32_to_uint64 (0);
537  shift -= 64;
538  }
539  if (shift)
540  {
541  a.lo = _cairo_uint64_add (_cairo_uint64_rsl (a.lo, shift),
542  _cairo_uint64_lsl (a.hi, (64 - shift)));
543  a.hi = _cairo_uint64_rsl (a.hi, shift);
544  }
545  return a;
546 }
547 
548 cairo_uint128_t
550 {
551  if (shift >= 64)
552  {
553  a.lo = a.hi;
554  a.hi = _cairo_uint64_rsa (a.hi, 64-1);
555  shift -= 64;
556  }
557  if (shift)
558  {
559  a.lo = _cairo_uint64_add (_cairo_uint64_rsl (a.lo, shift),
560  _cairo_uint64_lsl (a.hi, (64 - shift)));
561  a.hi = _cairo_uint64_rsa (a.hi, shift);
562  }
563  return a;
564 }
565 
566 int
567 _cairo_uint128_lt (cairo_uint128_t a, cairo_uint128_t b)
568 {
569  return (_cairo_uint64_lt (a.hi, b.hi) ||
570  (_cairo_uint64_eq (a.hi, b.hi) &&
571  _cairo_uint64_lt (a.lo, b.lo)));
572 }
573 
574 int
576 {
578  return 1;
580  return 0;
581  return _cairo_uint128_lt (a, b);
582 }
583 
584 int
585 _cairo_uint128_eq (cairo_uint128_t a, cairo_uint128_t b)
586 {
587  return (_cairo_uint64_eq (a.hi, b.hi) &&
588  _cairo_uint64_eq (a.lo, b.lo));
589 }
590 
591 #if HAVE_UINT64_T
592 #define _cairo_msbset64(q) (q & ((uint64_t) 1 << 63))
593 #else
594 #define _cairo_msbset64(q) (q.hi & ((uint32_t) 1 << 31))
595 #endif
596 
598 _cairo_uint128_divrem (cairo_uint128_t num, cairo_uint128_t den)
599 {
601  cairo_uint128_t bit;
602  cairo_uint128_t quo;
603 
604  bit = _cairo_uint32_to_uint128 (1);
605 
606  /* normalize to make den >= num, but not overflow */
607  while (_cairo_uint128_lt (den, num) && !_cairo_msbset64(den.hi))
608  {
609  bit = _cairo_uint128_lsl (bit, 1);
610  den = _cairo_uint128_lsl (den, 1);
611  }
612  quo = _cairo_uint32_to_uint128 (0);
613 
614  /* generate quotient, one bit at a time */
616  {
617  if (_cairo_uint128_le (den, num))
618  {
619  num = _cairo_uint128_sub (num, den);
620  quo = _cairo_uint128_add (quo, bit);
621  }
622  bit = _cairo_uint128_rsl (bit, 1);
623  den = _cairo_uint128_rsl (den, 1);
624  }
625  qr.quo = quo;
626  qr.rem = num;
627  return qr;
628 }
629 
630 cairo_uint128_t
631 _cairo_uint128_negate (cairo_uint128_t a)
632 {
633  a.lo = _cairo_uint64_not (a.lo);
634  a.hi = _cairo_uint64_not (a.hi);
636 }
637 
638 cairo_uint128_t
639 _cairo_uint128_not (cairo_uint128_t a)
640 {
641  a.lo = _cairo_uint64_not (a.lo);
642  a.hi = _cairo_uint64_not (a.hi);
643  return a;
644 }
645 
646 #endif /* !HAVE_UINT128_T */
647 
650 {
651  int num_neg = _cairo_int128_negative (num);
652  int den_neg = _cairo_int128_negative (den);
653  cairo_uquorem128_t uqr;
655 
656  if (num_neg)
657  num = _cairo_int128_negate (num);
658  if (den_neg)
659  den = _cairo_int128_negate (den);
660  uqr = _cairo_uint128_divrem (num, den);
661  if (num_neg)
662  qr.rem = _cairo_int128_negate (uqr.rem);
663  else
664  qr.rem = uqr.rem;
665  if (num_neg != den_neg)
666  qr.quo = _cairo_int128_negate (uqr.quo);
667  else
668  qr.quo = uqr.quo;
669  return qr;
670 }
671 
682 _cairo_uint_96by64_32x64_divrem (cairo_uint128_t num,
683  cairo_uint64_t den)
684 {
685  cairo_uquorem64_t result;
687 
688  /* These are the high 64 bits of the *96* bit numerator. We're
689  * going to represent the numerator as xB + y, where x is a 64,
690  * and y is a 32 bit number. */
692 
693  /* Initialise the result to indicate overflow. */
694  result.quo = _cairo_uint32s_to_uint64 (UINT_MAX, UINT_MAX); //PDB cast
695  result.rem = den;
696 
697  /* Don't bother if the quotient is going to overflow. */
698  if (_cairo_uint64_ge (x, den)) {
699  return /* overflow */ result;
700  }
701 
702  if (_cairo_uint64_lt (x, B)) {
703  /* When the final quotient is known to fit in 32 bits, then
704  * num < 2^64 if and only if den < 2^32. */
706  }
707  else {
708  /* Denominator is >= 2^32. the numerator is >= 2^64, and the
709  * division won't overflow: need two divrems. Write the
710  * numerator and denominator as
711  *
712  * num = xB + y x : 64 bits, y : 32 bits
713  * den = uB + v u, v : 32 bits
714  */
715  uint32_t y = _cairo_uint128_to_uint32 (num);
716  uint32_t u = uint64_hi32 (den);
717  uint32_t v = _cairo_uint64_to_uint32 (den);
718 
719  /* Compute a lower bound approximate quotient of num/den
720  * from x/(u+1). Then we have
721  *
722  * x = q(u+1) + r ; q : 32 bits, r <= u : 32 bits.
723  *
724  * xB + y = q(u+1)B + (rB+y)
725  * = q(uB + B + v - v) + (rB+y)
726  * = q(uB + v) + qB - qv + (rB+y)
727  * = q(uB + v) + q(B-v) + (rB+y)
728  *
729  * The true quotient of num/den then is q plus the
730  * contribution of q(B-v) + (rB+y). The main contribution
731  * comes from the term q(B-v), with the term (rB+y) only
732  * contributing at most one part.
733  *
734  * The term q(B-v) must fit into 64 bits, since q fits into 32
735  * bits on account of being a lower bound to the true
736  * quotient, and as B-v <= 2^32, we may safely use a single
737  * 64/64 bit division to find its contribution. */
738 
739  cairo_uquorem64_t quorem;
740  cairo_uint64_t remainder; /* will contain final remainder */
741  uint32_t quotient; /* will contain final quotient. */
742  uint32_t q;
743  uint32_t r;
744 
745  /* Approximate quotient by dividing the high 64 bits of num by
746  * u+1. Watch out for overflow of u+1. */
747  if (u+1) {
749  q = _cairo_uint64_to_uint32 (quorem.quo);
750  r = _cairo_uint64_to_uint32 (quorem.rem);
751  }
752  else {
753  q = uint64_hi32 (x);
755  }
756  quotient = q;
757 
758  /* Add the main term's contribution to quotient. Note B-v =
759  * -v as an uint32 (unless v = 0) */
760  if (v)
761  quorem = _cairo_uint64_divrem (_cairo_uint32x32_64_mul (q, -(int32_t)v), den); //PDB cast
762  else
763  quorem = _cairo_uint64_divrem (_cairo_uint32s_to_uint64 (q, 0), den);
764  quotient += _cairo_uint64_to_uint32 (quorem.quo);
765 
766  /* Add the contribution of the subterm and start computing the
767  * true remainder. */
768  remainder = _cairo_uint32s_to_uint64 (r, y);
769  if (_cairo_uint64_ge (remainder, den)) {
770  remainder = _cairo_uint64_sub (remainder, den);
771  quotient++;
772  }
773 
774  /* Add the contribution of the main term's remainder. The
775  * funky test here checks that remainder + main_rem >= den,
776  * taking into account overflow of the addition. */
777  remainder = _cairo_uint64_add (remainder, quorem.rem);
778  if (_cairo_uint64_ge (remainder, den) ||
779  _cairo_uint64_lt (remainder, quorem.rem))
780  {
781  remainder = _cairo_uint64_sub (remainder, den);
782  quotient++;
783  }
784 
785  result.quo = _cairo_uint32_to_uint64 (quotient);
786  result.rem = remainder;
787  }
788  return result;
789 }
790 
793 {
794  int num_neg = _cairo_int128_negative (num);
795  int den_neg = _cairo_int64_negative (den);
796  cairo_uint64_t nonneg_den;
797  cairo_uquorem64_t uqr;
798  cairo_quorem64_t qr;
799 
800  if (num_neg)
801  num = _cairo_int128_negate (num);
802  if (den_neg)
803  nonneg_den = _cairo_int64_negate (den);
804  else
805  nonneg_den = den;
806 
807  uqr = _cairo_uint_96by64_32x64_divrem (num, nonneg_den);
808  if (_cairo_uint64_eq (uqr.rem, _cairo_int64_to_uint64 (nonneg_den))) {
809  /* bail on overflow. */
810  qr.quo = _cairo_uint32s_to_uint64 (0x7FFFFFFF, UINT_MAX); //PDB cast
811  qr.rem = den;
812  return qr;
813  }
814 
815  if (num_neg)
816  qr.rem = _cairo_int64_negate ((cairo_int64_t)uqr.rem); //PDB cast
817  else
818  qr.rem = uqr.rem;
819  if (num_neg != den_neg)
820  qr.quo = _cairo_int64_negate ((cairo_int64_t)uqr.quo); //PDB cast
821  else
822  qr.quo = uqr.quo;
823  return qr;
824 }
const char * cairo_impl64
Definition: cairo-wideint.c:44
#define _cairo_int64_to_uint64(i)
#define _cairo_uint64_le(a, b)
int _cairo_uint128_lt(cairo_uint128_t a, cairo_uint128_t b)
cairo_uint128_t _cairo_uint32_to_uint128(uint32_t i)
cairo_uint128_t _cairo_uint128_not(cairo_uint128_t a)
#define _cairo_uint64_rsa(a, b)
cairo_uint128_t _cairo_uint128_negate(cairo_uint128_t a)
#define _cairo_uint32x32_64_mul(a, b)
#define _cairo_uint64_not(a)
int64_t cairo_int64_t
#define _cairo_uint128_to_uint64(a)
#define _cairo_msbset64(q)
int _cairo_uint128_eq(cairo_uint128_t a, cairo_uint128_t b)
cairo_int128_t _cairo_int64x64_128_mul(cairo_int64_t a, cairo_int64_t b)
#define _cairo_uint64_lsl(a, b)
#define uint64_lo(i)
#define uint64_hi32(i)
cairo_uint128_t _cairo_uint128_mul(cairo_uint128_t a, cairo_uint128_t b)
cairo_uquorem64_t _cairo_uint_96by64_32x64_divrem(cairo_uint128_t num, cairo_uint64_t den)
_cairo_uint_96by64_32x64_divrem:
cairo_uint128_t _cairo_uint64_to_uint128(cairo_uint64_t i)
#define _cairo_uint64_gt(a, b)
#define _cairo_uint32s_to_uint64(h, l)
Definition: cairo-wideint.c:46
#define _cairo_uint128_to_uint32(a)
#define _cairo_uint64_to_uint32(i)
#define _cairo_uint64_mul(a, b)
#define _cairo_uint64_rsl(a, b)
cairo_uquorem128_t _cairo_uint128_divrem(cairo_uint128_t num, cairo_uint128_t den)
cairo_uint128_t _cairo_uint128_sub(cairo_uint128_t a, cairo_uint128_t b)
#define _cairo_int64_negative(a)
#define _cairo_uint32_to_uint64(i)
#define _cairo_int32x32_64_mul(a, b)
cairo_uquorem64_t _cairo_uint64_divrem(cairo_uint64_t num, cairo_uint64_t den)
Definition: cairo-wideint.c:49
#define uint64_hi(i)
#define uint64_shift32(i)
#define _cairo_int64_negate(a)
cairo_uint128_t _cairo_uint128_lsl(cairo_uint128_t a, int shift)
#define _cairo_int128_negative(a)
cairo_uint128_t _cairo_uint128_rsa(cairo_int128_t a, int shift)
cairo_quorem64_t _cairo_int64_divrem(cairo_int64_t num, cairo_int64_t den)
uint64_t cairo_uint64_t
const char * cairo_impl128
cairo_quorem64_t _cairo_int_96by64_32x64_divrem(cairo_int128_t num, cairo_int64_t den)
#define _cairo_uint64_add(a, b)
#define _cairo_uint128_le(a, b)
cairo_x function declarations, which provide the fallback high precision arithmetic implementation...
#define uint64_lo32(i)
cairo_uint128_t _cairo_uint64x64_128_mul(cairo_uint64_t a, cairo_uint64_t b)
#define _cairo_uint64_negate(a)
cairo_int128_t _cairo_int32_to_int128(int32_t i)
#define _cairo_int128_negate(a)
#define _cairo_uint64_lt(a, b)
int _cairo_int128_lt(cairo_int128_t a, cairo_int128_t b)
#define _cairo_uint64_ge(a, b)
#define _cairo_uint128_ne(a, b)
#define _cairo_int32_to_int64(i)
cairo_uint128_t _cairo_uint128_rsl(cairo_uint128_t a, int shift)
cairo_uint128_t _cairo_uint128_add(cairo_uint128_t a, cairo_uint128_t b)
#define _cairo_uint64_sub(a, b)
#define uint64_carry32
#define _cairo_int64_lt(a, b)
#define _cairo_uint64_eq(a, b)
cairo_int128_t _cairo_int64_to_int128(cairo_int64_t i)
cairo_quorem128_t _cairo_int128_divrem(cairo_int128_t num, cairo_int128_t den)