Commit aea8a605 authored by Cyrill Gorcunov's avatar Cyrill Gorcunov Committed by Pavel Emelyanov

atomic -- Switch to linux kernel templates

Use same code as provided in kernel. In first place
we used own prototypes in case of simplicity (they
all were based on "lock xadd" instruction. There is
no more need for that and we can switch to well known
kernel's api.

Because kernel uses plain int type to carry atomic
counters I had to add explicit u32 type for futexes,
as well as a couple of fixes for new api usage.
Signed-off-by: 's avatarCyrill Gorcunov <gorcunov@openvz.org>
Signed-off-by: 's avatarPavel Emelyanov <xemul@parallels.com>
parent 19391c85
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#define __CR_ATOMIC_H__ #define __CR_ATOMIC_H__
typedef struct { typedef struct {
u32 counter; int counter;
} atomic_t; } atomic_t;
...@@ -20,7 +20,7 @@ typedef struct { ...@@ -20,7 +20,7 @@ typedef struct {
#define atomic_set(mem,v) ((mem)->counter = (v)) #define atomic_set(mem,v) ((mem)->counter = (v))
#define atomic_get(v) (*(volatile u32 *)&(v)->counter) #define atomic_get(v) (*(volatile int *)&(v)->counter)
static inline int atomic_add_return(int i, atomic_t *v) static inline int atomic_add_return(int i, atomic_t *v)
{ {
...@@ -68,11 +68,13 @@ static inline int atomic_sub_return(int i, atomic_t *v) ...@@ -68,11 +68,13 @@ static inline int atomic_sub_return(int i, atomic_t *v)
static inline int atomic_inc(atomic_t *v) { return atomic_add_return(1, v) - 1; } static inline int atomic_inc(atomic_t *v) { return atomic_add_return(1, v) - 1; }
static inline int atomic_add(atomic_t *v, int val) { return atomic_add_return(val, v) - val; } static inline int atomic_add(int val, atomic_t *v) { return atomic_add_return(val, v) - val; }
static inline int atomic_dec(atomic_t *v) { return atomic_sub_return(1, v) + 1; } static inline int atomic_dec(atomic_t *v) { return atomic_sub_return(1, v) + 1; }
/* true if the result is 0, or false for all other cases. */ /* true if the result is 0, or false for all other cases. */
#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
#define atomic_inc_return(v) (atomic_add_return(1, v))
#endif /* __CR_ATOMIC_H__ */ #endif /* __CR_ATOMIC_H__ */
#ifndef __CR_ATOMIC_H__ #ifndef __CR_ATOMIC_H__
#define __CR_ATOMIC_H__ #define __CR_ATOMIC_H__
#include "asm/types.h" #include "asm/cmpxchg.h"
#define LOCK_PREFIX "\n\tlock; "
typedef struct { typedef struct {
u32 counter; int counter;
} atomic_t; } atomic_t;
#define atomic_set(mem, v) \ #define ATOMIC_INIT(i) { (i) }
({ \
u32 ret__ = v; \ static inline int atomic_read(const atomic_t *v)
asm volatile ("lock xchg %0, %1\n" \ {
: "+r" (ret__), "+m" ((mem)->counter) \ return (*(volatile int *)&(v)->counter);
: \ }
: "cc", "memory"); \
}) /*
* FIXME Use atomic_read instead of atomic_get all over the code
#define atomic_get(mem) \ */
({ \ #define atomic_get atomic_read
u32 ret__ = 0; \
asm volatile ("lock xadd %0, %1\n" \ static inline void atomic_set(atomic_t *v, int i)
: "+r" (ret__), "+m" ((mem)->counter) \ {
: \ v->counter = i;
: "cc", "memory"); \ }
ret__; \
}) static inline void atomic_add(int i, atomic_t *v)
{
#define atomic_add(mem, val) \ asm volatile(LOCK_PREFIX "addl %1,%0"
({ \ : "+m" (v->counter)
u32 ret__ = (val); \ : "ir" (i));
asm volatile ("lock xadd %0, %1\n" \ }
: "+r" (ret__), "+m" ((mem)->counter) \
: \ static inline void atomic_sub(int i, atomic_t *v)
: "cc", "memory"); \ {
ret__; \ asm volatile(LOCK_PREFIX "subl %1,%0"
}) : "+m" (v->counter)
: "ir" (i));
#define atomic_inc(mem) atomic_add(mem, 1) }
#define atomic_dec(mem) \ static inline void atomic_inc(atomic_t *v)
({ \ {
u32 ret__ = -1; \ asm volatile(LOCK_PREFIX "incl %0"
asm volatile ("lock xadd %0, %1\n" \ : "+m" (v->counter));
: "+r" (ret__), "+m" ((mem)->counter) \ }
: \
: "cc", "memory"); \ static inline void atomic_dec(atomic_t *v)
ret__; \ {
}) asm volatile(LOCK_PREFIX "decl %0"
: "+m" (v->counter));
/* true if the result is 0, or false for all other cases. */ }
#define atomic_dec_and_test(mem) \
({ \ static inline int atomic_dec_and_test(atomic_t *v)
unsigned char ret__; \ {
asm volatile ("lock decl %0; sete %1\n" \ unsigned char c;
: "+m" ((mem)->counter), "=qm" (ret__) \
: \ asm volatile(LOCK_PREFIX "decl %0; sete %1"
: "cc", "memory"); \ : "+m" (v->counter), "=qm" (c)
ret__ != 0; \ : : "memory");
}) return c != 0;
}
static inline int atomic_add_return(int i, atomic_t *v)
{
return i + xadd(&v->counter, i);
}
static inline int atomic_sub_return(int i, atomic_t *v)
{
return atomic_add_return(-i, v);
}
#define atomic_inc_return(v) (atomic_add_return(1, v))
#define atomic_dec_return(v) (atomic_sub_return(1, v))
#endif /* __CR_ATOMIC_H__ */ #endif /* __CR_ATOMIC_H__ */
...@@ -27,7 +27,7 @@ static inline u32 futex_get(futex_t *f) ...@@ -27,7 +27,7 @@ static inline u32 futex_get(futex_t *f)
/* Set futex @f value to @v */ /* Set futex @f value to @v */
static inline void futex_set(futex_t *f, u32 v) static inline void futex_set(futex_t *f, u32 v)
{ {
atomic_set(&f->raw, v); atomic_set(&f->raw, (int)v);
} }
#define futex_init(f) futex_set(f, 0) #define futex_init(f) futex_set(f, 0)
...@@ -39,11 +39,11 @@ static inline void futex_set(futex_t *f, u32 v) ...@@ -39,11 +39,11 @@ static inline void futex_set(futex_t *f, u32 v)
u32 tmp; \ u32 tmp; \
\ \
while (1) { \ while (1) { \
tmp = atomic_get(&(__f)->raw); \ tmp = (u32)atomic_get(&(__f)->raw); \
if ((tmp & FUTEX_ABORT_FLAG) || \ if ((tmp & FUTEX_ABORT_FLAG) || \
(tmp __cond (__v))) \ (tmp __cond (__v))) \
break; \ break; \
ret = sys_futex(&(__f)->raw.counter, FUTEX_WAIT,\ ret = sys_futex((u32 *)&(__f)->raw.counter, FUTEX_WAIT,\
tmp, NULL, NULL, 0); \ tmp, NULL, NULL, 0); \
BUG_ON(ret < 0 && ret != -EWOULDBLOCK); \ BUG_ON(ret < 0 && ret != -EWOULDBLOCK); \
} \ } \
...@@ -52,8 +52,8 @@ static inline void futex_set(futex_t *f, u32 v) ...@@ -52,8 +52,8 @@ static inline void futex_set(futex_t *f, u32 v)
/* Set futex @f to @v and wake up all waiters */ /* Set futex @f to @v and wake up all waiters */
static inline void futex_set_and_wake(futex_t *f, u32 v) static inline void futex_set_and_wake(futex_t *f, u32 v)
{ {
atomic_set(&f->raw, v); atomic_set(&f->raw, (int)v);
BUG_ON(sys_futex(&f->raw.counter, FUTEX_WAKE, INT_MAX, NULL, NULL, 0) < 0); BUG_ON(sys_futex((u32 *)&f->raw.counter, FUTEX_WAKE, INT_MAX, NULL, NULL, 0) < 0);
} }
/* Mark futex @f as wait abort needed and wake up all waiters */ /* Mark futex @f as wait abort needed and wake up all waiters */
...@@ -67,14 +67,14 @@ static inline void futex_abort_and_wake(futex_t *f) ...@@ -67,14 +67,14 @@ static inline void futex_abort_and_wake(futex_t *f)
static inline void futex_dec_and_wake(futex_t *f) static inline void futex_dec_and_wake(futex_t *f)
{ {
atomic_dec(&f->raw); atomic_dec(&f->raw);
BUG_ON(sys_futex(&f->raw.counter, FUTEX_WAKE, INT_MAX, NULL, NULL, 0) < 0); BUG_ON(sys_futex((u32 *)&f->raw.counter, FUTEX_WAKE, INT_MAX, NULL, NULL, 0) < 0);
} }
/* Increment futex @f value and wake up all waiters */ /* Increment futex @f value and wake up all waiters */
static inline void futex_inc_and_wake(futex_t *f) static inline void futex_inc_and_wake(futex_t *f)
{ {
atomic_inc(&f->raw); atomic_inc(&f->raw);
BUG_ON(sys_futex(&f->raw.counter, FUTEX_WAKE, INT_MAX, NULL, NULL, 0) < 0); BUG_ON(sys_futex((u32 *)&f->raw.counter, FUTEX_WAKE, INT_MAX, NULL, NULL, 0) < 0);
} }
/* Plain increment futex @f value */ /* Plain increment futex @f value */
...@@ -102,8 +102,8 @@ static inline void futex_wait_while_eq(futex_t *f, u32 v) ...@@ -102,8 +102,8 @@ static inline void futex_wait_while_eq(futex_t *f, u32 v)
/* Wait while futex @f value is @v */ /* Wait while futex @f value is @v */
static inline void futex_wait_while(futex_t *f, u32 v) static inline void futex_wait_while(futex_t *f, u32 v)
{ {
while (atomic_get(&f->raw) == v) { while ((u32)atomic_get(&f->raw) == v) {
int ret = sys_futex(&f->raw.counter, FUTEX_WAIT, v, NULL, NULL, 0); int ret = sys_futex((u32 *)&f->raw.counter, FUTEX_WAIT, v, NULL, NULL, 0);
BUG_ON(ret < 0 && ret != -EWOULDBLOCK); BUG_ON(ret < 0 && ret != -EWOULDBLOCK);
} }
} }
...@@ -115,7 +115,7 @@ typedef struct { ...@@ -115,7 +115,7 @@ typedef struct {
static inline void mutex_init(mutex_t *m) static inline void mutex_init(mutex_t *m)
{ {
u32 c = 0; u32 c = 0;
atomic_set(&m->raw, c); atomic_set(&m->raw, (int)c);
} }
static inline void mutex_lock(mutex_t *m) static inline void mutex_lock(mutex_t *m)
...@@ -123,8 +123,8 @@ static inline void mutex_lock(mutex_t *m) ...@@ -123,8 +123,8 @@ static inline void mutex_lock(mutex_t *m)
u32 c; u32 c;
int ret; int ret;
while ((c = atomic_inc(&m->raw))) { while ((c = (u32)atomic_inc_return(&m->raw)) != 1) {
ret = sys_futex(&m->raw.counter, FUTEX_WAIT, c + 1, NULL, NULL, 0); ret = sys_futex((u32 *)&m->raw.counter, FUTEX_WAIT, c, NULL, NULL, 0);
BUG_ON(ret < 0 && ret != -EWOULDBLOCK); BUG_ON(ret < 0 && ret != -EWOULDBLOCK);
} }
} }
...@@ -132,8 +132,8 @@ static inline void mutex_lock(mutex_t *m) ...@@ -132,8 +132,8 @@ static inline void mutex_lock(mutex_t *m)
static inline void mutex_unlock(mutex_t *m) static inline void mutex_unlock(mutex_t *m)
{ {
u32 c = 0; u32 c = 0;
atomic_set(&m->raw, c); atomic_set(&m->raw, (int)c);
BUG_ON(sys_futex(&m->raw.counter, FUTEX_WAKE, 1, NULL, NULL, 0) < 0); BUG_ON(sys_futex((u32 *)&m->raw.counter, FUTEX_WAKE, 1, NULL, NULL, 0) < 0);
} }
#endif /* __CR_LOCK_H__ */ #endif /* __CR_LOCK_H__ */
...@@ -30,7 +30,7 @@ void cnt_add(int c, unsigned long val) ...@@ -30,7 +30,7 @@ void cnt_add(int c, unsigned long val)
dstats->counts[c] += val; dstats->counts[c] += val;
} else if (rstats != NULL) { } else if (rstats != NULL) {
BUG_ON(c >= RESTORE_CNT_NR_STATS); BUG_ON(c >= RESTORE_CNT_NR_STATS);
atomic_add(&rstats->counts[c], val); atomic_add(val, &rstats->counts[c]);
} else } else
BUG(); BUG();
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment