Xenomai  3.0.8
arith.h
1 /*
2  * Copyright (C) 2008 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
3  *
4  * This library is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2 of the License, or (at your option) any later version.
8  *
9  * This library is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13 
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with this library; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
17  */
18 #ifndef _COBALT_ARM_ASM_UAPI_ARITH_H
19 #define _COBALT_ARM_ASM_UAPI_ARITH_H
20 
21 #include <asm/xenomai/uapi/features.h>
22 
23 #if __LINUX_ARM_ARCH__ >= 4 && (!defined(CONFIG_THUMB2_KERNEL) || !defined(CONFIG_FTRACE))
24 static inline __attribute__((__const__)) unsigned long long
25 mach_arm_nodiv_ullimd(const unsigned long long op,
26  const unsigned long long frac,
27  const unsigned rhs_integ);
28 
29 #define xnarch_nodiv_ullimd(op, frac, integ) \
30  mach_arm_nodiv_ullimd((op), (frac), (integ))
31 
32 static inline __attribute__((__const__)) long long
33 mach_arm_nodiv_llimd(const long long op,
34  const unsigned long long frac,
35  const unsigned rhs_integ);
36 
37 #define xnarch_nodiv_llimd(op, frac, integ) \
38  mach_arm_nodiv_llimd((op), (frac), (integ))
39 #else /* arm <= v3 */
40 #define xnarch_add96and64(l0, l1, l2, s0, s1) \
41  do { \
42  __asm__ ("adds %2, %2, %4\n\t" \
43  "adcs %1, %1, %3\n\t" \
44  "adc %0, %0, #0\n\t" \
45  : "+r"(l0), "+r"(l1), "+r"(l2) \
46  : "r"(s0), "r"(s1): "cc"); \
47  } while (0)
48 #endif /* arm <= v3 */
49 
50 #include <cobalt/uapi/asm-generic/arith.h>
51 
52 #if __LINUX_ARM_ARCH__ >= 4 && (!defined(CONFIG_THUMB2_KERNEL) || !defined(CONFIG_FTRACE))
53 #define mach_arm_nodiv_ullimd_str \
54  "umull %[tl], %[rl], %[opl], %[fracl]\n\t" \
55  "umull %[rm], %[rh], %[oph], %[frach]\n\t" \
56  "adds %[rl], %[rl], %[tl], lsr #31\n\t" \
57  "adcs %[rm], %[rm], #0\n\t" \
58  "adc %[rh], %[rh], #0\n\t" \
59  "umull %[tl], %[th], %[oph], %[fracl]\n\t" \
60  "adds %[rl], %[rl], %[tl]\n\t" \
61  "adcs %[rm], %[rm], %[th]\n\t" \
62  "adc %[rh], %[rh], #0\n\t" \
63  "umull %[tl], %[th], %[opl], %[frach]\n\t" \
64  "adds %[rl], %[rl], %[tl]\n\t" \
65  "adcs %[rm], %[rm], %[th]\n\t" \
66  "adc %[rh], %[rh], #0\n\t" \
67  "umlal %[rm], %[rh], %[opl], %[integ]\n\t" \
68  "mla %[rh], %[oph], %[integ], %[rh]\n\t"
69 
70 static inline __attribute__((__const__)) unsigned long long
71 mach_arm_nodiv_ullimd(const unsigned long long op,
72  const unsigned long long frac,
73  const unsigned rhs_integ)
74 {
75  register unsigned rl __asm__("r5");
76  register unsigned rm __asm__("r0");
77  register unsigned rh __asm__("r1");
78  register unsigned fracl __asm__ ("r2");
79  register unsigned frach __asm__ ("r3");
80  register unsigned integ __asm__("r4") = rhs_integ;
81  register unsigned opl __asm__ ("r6");
82  register unsigned oph __asm__ ("r7");
83  register unsigned tl __asm__("r8");
84  register unsigned th __asm__("r9");
85 
86  xnarch_u64tou32(op, oph, opl);
87  xnarch_u64tou32(frac, frach, fracl);
88 
89  __asm__ (mach_arm_nodiv_ullimd_str
90  : [rl]"=r"(rl), [rm]"=r"(rm), [rh]"=r"(rh),
91  [tl]"=r"(tl), [th]"=r"(th)
92  : [opl]"r"(opl), [oph]"r"(oph),
93  [fracl]"r"(fracl), [frach]"r"(frach),
94  [integ]"r"(integ)
95  : "cc");
96 
97  return xnarch_u64fromu32(rh, rm);
98 }
99 
100 static inline __attribute__((__const__)) long long
101 mach_arm_nodiv_llimd(const long long op,
102  const unsigned long long frac,
103  const unsigned rhs_integ)
104 {
105  register unsigned rl __asm__("r5");
106  register unsigned rm __asm__("r0");
107  register unsigned rh __asm__("r1");
108  register unsigned fracl __asm__ ("r2");
109  register unsigned frach __asm__ ("r3");
110  register unsigned integ __asm__("r4") = rhs_integ;
111  register unsigned opl __asm__ ("r6");
112  register unsigned oph __asm__ ("r7");
113  register unsigned tl __asm__("r8");
114  register unsigned th __asm__("r9");
115  register unsigned s __asm__("r10");
116 
117  xnarch_u64tou32(op, oph, opl);
118  xnarch_u64tou32(frac, frach, fracl);
119 
120  __asm__ ("movs %[s], %[oph], lsr #30\n\t"
121  "beq 1f\n\t"
122  "rsbs %[opl], %[opl], #0\n\t"
123  "sbc %[oph], %[oph], %[oph], lsl #1\n"
124  "1:\t"
125  mach_arm_nodiv_ullimd_str
126  "teq %[s], #0\n\t"
127  "beq 2f\n\t"
128  "rsbs %[rm], %[rm], #0\n\t"
129  "sbc %[rh], %[rh], %[rh], lsl #1\n"
130  "2:\t"
131  : [rl]"=r"(rl), [rm]"=r"(rm), [rh]"=r"(rh),
132  [tl]"=r"(tl), [th]"=r"(th), [s]"=r"(s)
133  : [opl]"r"(opl), [oph]"r"(oph),
134  [fracl]"r"(fracl), [frach]"r"(frach),
135  [integ]"r"(integ)
136  : "cc");
137 
138  return xnarch_u64fromu32(rh, rm);
139 }
140 #endif /* arm >= v4 */
141 
142 #endif /* _COBALT_ARM_ASM_UAPI_ARITH_H */