1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (C) 2005-2018 Andes Technology Corporation */
3 
4 #include <asm/bitfield.h>
5 
6 #define _FP_W_TYPE_SIZE		32
7 #define _FP_W_TYPE		unsigned long
8 #define _FP_WS_TYPE		signed long
9 #define _FP_I_TYPE		long
10 
11 #define __ll_B ((UWtype) 1 << (W_TYPE_SIZE / 2))
12 #define __ll_lowpart(t) ((UWtype) (t) & (__ll_B - 1))
13 #define __ll_highpart(t) ((UWtype) (t) >> (W_TYPE_SIZE / 2))
14 
15 #define _FP_MUL_MEAT_S(R, X, Y)				\
16 	_FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S, R, X, Y, umul_ppmm)
17 #define _FP_MUL_MEAT_D(R, X, Y)				\
18 	_FP_MUL_MEAT_2_wide(_FP_WFRACBITS_D, R, X, Y, umul_ppmm)
19 #define _FP_MUL_MEAT_Q(R, X, Y)				\
20 	_FP_MUL_MEAT_4_wide(_FP_WFRACBITS_Q, R, X, Y, umul_ppmm)
21 
22 #define _FP_MUL_MEAT_DW_S(R, X, Y)			\
23 	_FP_MUL_MEAT_DW_1_wide(_FP_WFRACBITS_S, R, X, Y, umul_ppmm)
24 #define _FP_MUL_MEAT_DW_D(R, X, Y)			\
25 	_FP_MUL_MEAT_DW_2_wide(_FP_WFRACBITS_D, R, X, Y, umul_ppmm)
26 
27 #define _FP_DIV_MEAT_S(R, X, Y)	_FP_DIV_MEAT_1_udiv_norm(S, R, X, Y)
28 #define _FP_DIV_MEAT_D(R, X, Y)	_FP_DIV_MEAT_2_udiv(D, R, X, Y)
29 
30 #define _FP_NANFRAC_S		((_FP_QNANBIT_S << 1) - 1)
31 #define _FP_NANFRAC_D		((_FP_QNANBIT_D << 1) - 1), -1
32 #define _FP_NANFRAC_Q		((_FP_QNANBIT_Q << 1) - 1), -1, -1, -1
33 #define _FP_NANSIGN_S		0
34 #define _FP_NANSIGN_D		0
35 #define _FP_NANSIGN_Q		0
36 
37 #define _FP_KEEPNANFRACP 1
38 #define _FP_QNANNEGATEDP 0
39 
40 #define _FP_CHOOSENAN(fs, wc, R, X, Y, OP)			\
41 do {								\
42 	if ((_FP_FRAC_HIGH_RAW_##fs(X) & _FP_QNANBIT_##fs)	\
43 	  && !(_FP_FRAC_HIGH_RAW_##fs(Y) & _FP_QNANBIT_##fs)) { \
44 		R##_s = Y##_s;					\
45 		_FP_FRAC_COPY_##wc(R, Y);			\
46 	} else {						\
47 		R##_s = X##_s;					\
48 		_FP_FRAC_COPY_##wc(R, X);			\
49 	}							\
50 	R##_c = FP_CLS_NAN;					\
51 } while (0)
52 
53 #define __FPU_FPCSR	(current->thread.fpu.fpcsr)
54 
55 /* Obtain the current rounding mode. */
56 #define FP_ROUNDMODE                    \
57 ({                                      \
58 	__FPU_FPCSR & FPCSR_mskRM;      \
59 })
60 
61 #define FP_RND_NEAREST		0
62 #define FP_RND_PINF		1
63 #define FP_RND_MINF		2
64 #define FP_RND_ZERO		3
65 
66 #define FP_EX_INVALID		FPCSR_mskIVO
67 #define FP_EX_DIVZERO		FPCSR_mskDBZ
68 #define FP_EX_OVERFLOW		FPCSR_mskOVF
69 #define FP_EX_UNDERFLOW		FPCSR_mskUDF
70 #define FP_EX_INEXACT		FPCSR_mskIEX
71 
72 #define SF_CEQ	2
73 #define SF_CLT	1
74 #define SF_CGT	3
75 #define SF_CUN	4
76 
77 #include <asm/byteorder.h>
78 
79 #ifdef __BIG_ENDIAN__
80 #define __BYTE_ORDER __BIG_ENDIAN
81 #define __LITTLE_ENDIAN 0
82 #else
83 #define __BYTE_ORDER __LITTLE_ENDIAN
84 #define __BIG_ENDIAN 0
85 #endif
86 
87 #define abort() do { } while (0)
88 #define umul_ppmm(w1, w0, u, v)						\
89 do {									\
90 	UWtype __x0, __x1, __x2, __x3;                                  \
91 	UHWtype __ul, __vl, __uh, __vh;                                 \
92 									\
93 	__ul = __ll_lowpart(u);						\
94 	__uh = __ll_highpart(u);					\
95 	__vl = __ll_lowpart(v);						\
96 	__vh = __ll_highpart(v);					\
97 									\
98 	__x0 = (UWtype) __ul * __vl;                                    \
99 	__x1 = (UWtype) __ul * __vh;                                    \
100 	__x2 = (UWtype) __uh * __vl;                                    \
101 	__x3 = (UWtype) __uh * __vh;                                    \
102 									\
103 	__x1 += __ll_highpart(__x0);					\
104 	__x1 += __x2;							\
105 	if (__x1 < __x2)						\
106 		__x3 += __ll_B;						\
107 									\
108 	(w1) = __x3 + __ll_highpart(__x1);				\
109 	(w0) = __ll_lowpart(__x1) * __ll_B + __ll_lowpart(__x0);	\
110 } while (0)
111 
112 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
113 do { \
114 	UWtype __x; \
115 	__x = (al) + (bl); \
116 	(sh) = (ah) + (bh) + (__x < (al)); \
117 	(sl) = __x; \
118 } while (0)
119 
120 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
121 do { \
122 	UWtype __x; \
123 	__x = (al) - (bl); \
124 	(sh) = (ah) - (bh) - (__x > (al)); \
125 	(sl) = __x; \
126 } while (0)
127 
128 #define udiv_qrnnd(q, r, n1, n0, d)				\
129 do {								\
130 	UWtype __d1, __d0, __q1, __q0, __r1, __r0, __m;		\
131 	__d1 = __ll_highpart(d);				\
132 	__d0 = __ll_lowpart(d);					\
133 								\
134 	__r1 = (n1) % __d1;					\
135 	__q1 = (n1) / __d1;					\
136 	__m = (UWtype) __q1 * __d0;				\
137 	__r1 = __r1 * __ll_B | __ll_highpart(n0);		\
138 	if (__r1 < __m)	{					\
139 		__q1--, __r1 += (d);				\
140 		if (__r1 >= (d))				\
141 			if (__r1 < __m)				\
142 				__q1--, __r1 += (d);		\
143 	}							\
144 	__r1 -= __m;						\
145 	__r0 = __r1 % __d1;					\
146 	__q0 = __r1 / __d1;					\
147 	__m = (UWtype) __q0 * __d0;				\
148 	__r0 = __r0 * __ll_B | __ll_lowpart(n0);		\
149 	if (__r0 < __m)	{					\
150 		__q0--, __r0 += (d);				\
151 		if (__r0 >= (d))				\
152 			if (__r0 < __m)				\
153 				__q0--, __r0 += (d);		\
154 	}							\
155 	__r0 -= __m;						\
156 	(q) = (UWtype) __q1 * __ll_B | __q0;			\
157 	(r) = __r0;						\
158 } while (0)
159