Xenomai  3.0.8
fptest.h
1 /*
2  * Copyright (C) 2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
3  *
4  * This library is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2 of the License, or (at your option) any later version.
8  *
9  * This library is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13 
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with this library; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
17  */
18 #ifndef _COBALT_X86_ASM_UAPI_FPTEST_H
19 #define _COBALT_X86_ASM_UAPI_FPTEST_H
20 
21 #define __COBALT_HAVE_SSE2 0x1
22 #define __COBALT_HAVE_AVX 0x2
23 
24 static inline void fp_regs_set(int features, unsigned int val)
25 {
26  unsigned long long vec[4] = { val, 0, val, 0 };
27  unsigned i;
28 
29  for (i = 0; i < 8; i++)
30  __asm__ __volatile__("fildl %0": /* no output */ :"m"(val));
31 
32  if (features & __COBALT_HAVE_AVX) {
33  __asm__ __volatile__(
34  "vmovupd %0,%%ymm0;"
35  "vmovupd %0,%%ymm1;"
36  "vmovupd %0,%%ymm2;"
37  "vmovupd %0,%%ymm3;"
38  "vmovupd %0,%%ymm4;"
39  "vmovupd %0,%%ymm5;"
40  "vmovupd %0,%%ymm6;"
41  "vmovupd %0,%%ymm7;"
42  : : "m"(vec[0]), "m"(vec[1]), "m"(vec[2]), "m"(vec[3]));
43  } else if (features & __COBALT_HAVE_SSE2) {
44  __asm__ __volatile__(
45  "movupd %0,%%xmm0;"
46  "movupd %0,%%xmm1;"
47  "movupd %0,%%xmm2;"
48  "movupd %0,%%xmm3;"
49  "movupd %0,%%xmm4;"
50  "movupd %0,%%xmm5;"
51  "movupd %0,%%xmm6;"
52  "movupd %0,%%xmm7;"
53  : : "m"(vec[0]), "m"(vec[1]), "m"(vec[2]), "m"(vec[3]));
54  }
55 }
56 
57 static inline unsigned int fp_regs_check(int features, unsigned int val,
58  int (*report)(const char *fmt, ...))
59 {
60  unsigned long long vec[8][4];
61  unsigned int i, result = val;
62  unsigned e[8];
63 
64  for (i = 0; i < 8; i++)
65  __asm__ __volatile__("fistpl %0":"=m"(e[7 - i]));
66 
67  if (features & __COBALT_HAVE_AVX) {
68  __asm__ __volatile__(
69  "vmovupd %%ymm0,%0;"
70  "vmovupd %%ymm1,%1;"
71  "vmovupd %%ymm2,%2;"
72  "vmovupd %%ymm3,%3;"
73  "vmovupd %%ymm4,%4;"
74  "vmovupd %%ymm5,%5;"
75  "vmovupd %%ymm6,%6;"
76  "vmovupd %%ymm7,%7;"
77  : "=m" (vec[0][0]), "=m" (vec[1][0]),
78  "=m" (vec[2][0]), "=m" (vec[3][0]),
79  "=m" (vec[4][0]), "=m" (vec[5][0]),
80  "=m" (vec[6][0]), "=m" (vec[7][0]));
81  } else if (features & __COBALT_HAVE_SSE2) {
82  __asm__ __volatile__(
83  "movupd %%xmm0,%0;"
84  "movupd %%xmm1,%1;"
85  "movupd %%xmm2,%2;"
86  "movupd %%xmm3,%3;"
87  "movupd %%xmm4,%4;"
88  "movupd %%xmm5,%5;"
89  "movupd %%xmm6,%6;"
90  "movupd %%xmm7,%7;"
91  : "=m" (vec[0][0]), "=m" (vec[1][0]),
92  "=m" (vec[2][0]), "=m" (vec[3][0]),
93  "=m" (vec[4][0]), "=m" (vec[5][0]),
94  "=m" (vec[6][0]), "=m" (vec[7][0]));
95  }
96 
97  for (i = 0; i < 8; i++)
98  if (e[i] != val) {
99  report("r%d: %u != %u\n", i, e[i], val);
100  result = e[i];
101  }
102 
103  if (features & __COBALT_HAVE_AVX) {
104  for (i = 0; i < 8; i++) {
105  int error = 0;
106  if (vec[i][0] != val) {
107  result = vec[i][0];
108  error = 1;
109  }
110  if (vec[i][2] != val) {
111  result = vec[i][2];
112  error = 1;
113  }
114  if (error)
115  report("ymm%d: %llu/%llu != %u/%u\n",
116  i, (unsigned long long)vec[i][0],
117  (unsigned long long)vec[i][2],
118  val, val);
119  }
120  } else if (features & __COBALT_HAVE_SSE2) {
121  for (i = 0; i < 8; i++)
122  if (vec[i][0] != val) {
123  report("xmm%d: %llu != %u\n",
124  i, (unsigned long long)vec[i][0], val);
125  result = vec[i][0];
126  }
127  }
128 
129  return result;
130 }
131 
132 #endif /* _COBALT_X86_ASM_UAPI_FPTEST_H */