PAPI 7.1.0.0
Loading...
Searching...
No Matches
arm_v6.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2007 by NEC LE-IT: All rights reserved.
3 * A transcription of ARMv6 atomic operations for the ARM Realview Toolchain.
4 * This code works with armcc from RVDS 3.1
5 * This is based on work in gcc/arm.h by
6 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
7 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
8 * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
9 *
10 *
11 *
12 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
13 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
14 *
15 * Permission is hereby granted to use or copy this program
16 * for any purpose, provided the above notices are retained on all copies.
17 * Permission to modify the code and to distribute modified code is granted,
18 * provided the above notices are retained, and a notice that the code was
19 * modified is included with the above copyright notice.
20 *
21 */
22
23#include "../test_and_set_t_is_ao_t.h" /* Probably suboptimal */
24
25#if __TARGET_ARCH_ARM < 6
26# if !defined(CPPCHECK)
27# error Do not use with ARM instruction sets lower than v6
28# endif
29#else
30
31#define AO_ACCESS_CHECK_ALIGNED
32#define AO_ACCESS_short_CHECK_ALIGNED
33#define AO_ACCESS_int_CHECK_ALIGNED
34#include "../all_atomic_only_load.h"
35
36#include "../standard_ao_double_t.h"
37
38/* NEC LE-IT: ARMv6 is the first architecture providing support for simple LL/SC
39 * A data memory barrier must be raised via CP15 command (see documentation).
40 *
41 * ARMv7 is compatible to ARMv6 but has a simpler command for issuing a
42 * memory barrier (DMB). Raising it via CP15 should still work as told me by the
43 * support engineers. If it turns out to be much quicker than we should implement
44 * custom code for ARMv7 using the asm { dmb } command.
45 *
46 * If only a single processor is used, we can define AO_UNIPROCESSOR
47 * and do not need to access CP15 for ensuring a DMB at all.
48*/
49
50AO_INLINE void
51AO_nop_full(void)
52{
53# ifndef AO_UNIPROCESSOR
54 unsigned int dest=0;
55 /* Issue a data memory barrier (keeps ordering of memory transactions */
56 /* before and after this operation). */
57 __asm {
58 mcr p15,0,dest,c7,c10,5
59 };
60# else
62# endif
63}
64#define AO_HAVE_nop_full
65
66/* NEC LE-IT: atomic "store" - according to ARM documentation this is
67 * the only safe way to set variables also used in LL/SC environment.
68 * A direct write won't be recognized by the LL/SC construct in other CPUs.
69 *
70 * HB: Based on subsequent discussion, I think it would be OK to use an
71 * ordinary store here if we knew that interrupt handlers always cleared
72 * the reservation. They should, but there is some doubt that this is
73 * currently always the case for e.g. Linux.
74*/
75AO_INLINE void AO_store(volatile AO_t *addr, AO_t value)
76{
77 unsigned long tmp;
78
79retry:
80__asm {
81 ldrex tmp, [addr]
82 strex tmp, value, [addr]
83 teq tmp, #0
84 bne retry
85 };
86}
87#define AO_HAVE_store
88
89/* NEC LE-IT: replace the SWAP as recommended by ARM:
90
91 "Applies to: ARM11 Cores
92 Though the SWP instruction will still work with ARM V6 cores, it is recommended
93 to use the new V6 synchronization instructions. The SWP instruction produces
94 locked read and write accesses which are atomic, i.e. another operation cannot
95 be done between these locked accesses which ties up external bus (AHB,AXI)
96 bandwidth and can increase worst case interrupt latencies. LDREX,STREX are
97 more flexible, other instructions can be done between the LDREX and STREX accesses.
98 "
99*/
100#ifndef AO_PREFER_GENERALIZED
102AO_test_and_set(volatile AO_TS_t *addr) {
103 AO_TS_VAL_t oldval;
104 unsigned long tmp;
105 unsigned long one = 1;
106retry:
107__asm {
108 ldrex oldval, [addr]
109 strex tmp, one, [addr]
110 teq tmp, #0
111 bne retry
112 }
113
114 return oldval;
115}
116#define AO_HAVE_test_and_set
117
119AO_fetch_and_add(volatile AO_t *p, AO_t incr)
120{
121 unsigned long tmp,tmp2;
122 AO_t result;
123
124retry:
125__asm {
126 ldrex result, [p]
127 add tmp, incr, result
128 strex tmp2, tmp, [p]
129 teq tmp2, #0
130 bne retry
131 }
132
133 return result;
134}
135#define AO_HAVE_fetch_and_add
136
138AO_fetch_and_add1(volatile AO_t *p)
139{
140 unsigned long tmp,tmp2;
141 AO_t result;
142
143retry:
144__asm {
145 ldrex result, [p]
146 add tmp, result, #1
147 strex tmp2, tmp, [p]
148 teq tmp2, #0
149 bne retry
150 }
151
152 return result;
153}
154#define AO_HAVE_fetch_and_add1
155
157AO_fetch_and_sub1(volatile AO_t *p)
158{
159 unsigned long tmp,tmp2;
160 AO_t result;
161
162retry:
163__asm {
164 ldrex result, [p]
165 sub tmp, result, #1
166 strex tmp2, tmp, [p]
167 teq tmp2, #0
168 bne retry
169 }
170
171 return result;
172}
173#define AO_HAVE_fetch_and_sub1
174#endif /* !AO_PREFER_GENERALIZED */
175
176#ifndef AO_GENERALIZE_ASM_BOOL_CAS
177 /* Returns nonzero if the comparison succeeded. */
178 AO_INLINE int
179 AO_compare_and_swap(volatile AO_t *addr, AO_t old_val, AO_t new_val)
180 {
181 AO_t result, tmp;
182
183 retry:
184 __asm__ {
185 mov result, #2
186 ldrex tmp, [addr]
187 teq tmp, old_val
188# ifdef __thumb__
189 it eq
190# endif
191 strexeq result, new_val, [addr]
192 teq result, #1
193 beq retry
194 }
195 return !(result&2);
196 }
197# define AO_HAVE_compare_and_swap
198#endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
199
201AO_fetch_compare_and_swap(volatile AO_t *addr, AO_t old_val, AO_t new_val)
202{
203 AO_t fetched_val, tmp;
204
205retry:
206__asm__ {
207 mov tmp, #2
208 ldrex fetched_val, [addr]
209 teq fetched_val, old_val
210# ifdef __thumb__
211 it eq
212# endif
213 strexeq tmp, new_val, [addr]
214 teq tmp, #1
215 beq retry
216 }
217 return fetched_val;
218}
219#define AO_HAVE_fetch_compare_and_swap
220
221/* helper functions for the Realview compiler: LDREXD is not usable
222 * with inline assembler, so use the "embedded" assembler as
223 * suggested by ARM Dev. support (June 2008). */
224__asm inline double_ptr_storage AO_load_ex(const volatile AO_double_t *addr) {
225 LDREXD r0,r1,[r0]
226}
227
228__asm inline int AO_store_ex(AO_t val1, AO_t val2, volatile AO_double_t *addr) {
229 STREXD r3,r0,r1,[r2]
230 MOV r0,r3
231}
232
234AO_double_load(const volatile AO_double_t *addr)
235{
237
238 result.AO_whole = AO_load_ex(addr);
239 return result;
240}
241#define AO_HAVE_double_load
242
243AO_INLINE int
245 AO_t old_val1, AO_t old_val2,
246 AO_t new_val1, AO_t new_val2)
247{
248 double_ptr_storage old_val =
249 ((double_ptr_storage)old_val2 << 32) | old_val1;
251 int result;
252
253 while(1) {
254 tmp = AO_load_ex(addr);
255 if(tmp != old_val) return 0;
256 result = AO_store_ex(new_val1, new_val2, addr);
257 if(!result) return 1;
258 }
259}
260#define AO_HAVE_compare_double_and_swap_double
261
262#endif /* __TARGET_ARCH_ARM >= 6 */
263
264#define AO_T_IS_INT
volatile int result
double tmp
AO_INLINE AO_double_t AO_double_load(const volatile AO_double_t *addr)
Definition: aarch64.h:47
AO_INLINE int AO_compare_and_swap(volatile AO_t *addr, AO_t old, AO_t new_val)
Definition: alpha.h:46
AO_INLINE void AO_nop_full(void)
Definition: alpha.h:27
AO_INLINE int AO_compare_double_and_swap_double(volatile AO_double_t *addr, AO_t old_val1, AO_t old_val2, AO_t new_val1, AO_t new_val2)
Definition: arm64.h:49
#define AO_t
Definition: atomic_ops.h:156
#define AO_compiler_barrier()
Definition: atomic_ops.h:305
#define AO_INLINE
Definition: atomic_ops.h:186
AO_INLINE AO_t AO_fetch_and_add(volatile AO_t *p, AO_t incr)
Definition: gcc/arm.h:338
AO_INLINE AO_t AO_fetch_compare_and_swap(volatile AO_t *addr, AO_t old_val, AO_t new_val)
Definition: gcc/arm.h:559
AO_INLINE AO_TS_VAL_t AO_test_and_set(volatile AO_TS_t *addr)
Definition: gcc/arm.h:314
AO_INLINE AO_t AO_fetch_and_sub1(volatile AO_t *p)
Definition: gcc/arm.h:386
AO_INLINE AO_t AO_fetch_and_add1(volatile AO_t *p)
Definition: gcc/arm.h:362
#define AO_TS_t
Definition: gcc/hppa.h:39
#define AO_TS_VAL_t
Definition: gcc/hppa.h:44
AO_INLINE void AO_store(volatile AO_t *addr, AO_t value)
unsigned long long double_ptr_storage