Commit aea8a605 authored by Cyrill Gorcunov's avatar Cyrill Gorcunov Committed by Pavel Emelyanov

atomic -- Switch to linux kernel templates

Use same code as provided in kernel. In first place
we used own prototypes in case of simplicity (they
all were based on "lock xadd" instruction. There is
no more need for that and we can switch to well known
kernel's api.

Because kernel uses plain int type to carry atomic
counters I had to add explicit u32 type for futexes,
as well as a couple of fixes for new api usage.
Signed-off-by: 's avatarCyrill Gorcunov <gorcunov@openvz.org>
Signed-off-by: 's avatarPavel Emelyanov <xemul@parallels.com>
parent 19391c85
......@@ -2,7 +2,7 @@
#define __CR_ATOMIC_H__
typedef struct {
u32 counter;
int counter;
} atomic_t;
......@@ -20,7 +20,7 @@ typedef struct {
#define atomic_set(mem,v) ((mem)->counter = (v))
#define atomic_get(v) (*(volatile u32 *)&(v)->counter)
#define atomic_get(v) (*(volatile int *)&(v)->counter)
static inline int atomic_add_return(int i, atomic_t *v)
{
......@@ -68,11 +68,13 @@ static inline int atomic_sub_return(int i, atomic_t *v)
static inline int atomic_inc(atomic_t *v) { return atomic_add_return(1, v) - 1; }
static inline int atomic_add(atomic_t *v, int val) { return atomic_add_return(val, v) - val; }
static inline int atomic_add(int val, atomic_t *v) { return atomic_add_return(val, v) - val; }
static inline int atomic_dec(atomic_t *v) { return atomic_sub_return(1, v) + 1; }
/* true if the result is 0, or false for all other cases. */
#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
#define atomic_inc_return(v) (atomic_add_return(1, v))
#endif /* __CR_ATOMIC_H__ */
#ifndef __CR_ATOMIC_H__
#define __CR_ATOMIC_H__
#include "asm/types.h"
#include "asm/cmpxchg.h"
#define LOCK_PREFIX "\n\tlock; "
typedef struct {
u32 counter;
int counter;
} atomic_t;
#define atomic_set(mem, v) \
({ \
u32 ret__ = v; \
asm volatile ("lock xchg %0, %1\n" \
: "+r" (ret__), "+m" ((mem)->counter) \
: \
: "cc", "memory"); \
})
#define atomic_get(mem) \
({ \
u32 ret__ = 0; \
asm volatile ("lock xadd %0, %1\n" \
: "+r" (ret__), "+m" ((mem)->counter) \
: \
: "cc", "memory"); \
ret__; \
})
#define atomic_add(mem, val) \
({ \
u32 ret__ = (val); \
asm volatile ("lock xadd %0, %1\n" \
: "+r" (ret__), "+m" ((mem)->counter) \
: \
: "cc", "memory"); \
ret__; \
})
#define atomic_inc(mem) atomic_add(mem, 1)
#define atomic_dec(mem) \
({ \
u32 ret__ = -1; \
asm volatile ("lock xadd %0, %1\n" \
: "+r" (ret__), "+m" ((mem)->counter) \
: \
: "cc", "memory"); \
ret__; \
})
/* true if the result is 0, or false for all other cases. */
#define atomic_dec_and_test(mem) \
({ \
unsigned char ret__; \
asm volatile ("lock decl %0; sete %1\n" \
: "+m" ((mem)->counter), "=qm" (ret__) \
: \
: "cc", "memory"); \
ret__ != 0; \
})
#define ATOMIC_INIT(i) { (i) }
static inline int atomic_read(const atomic_t *v)
{
return (*(volatile int *)&(v)->counter);
}
/*
* FIXME Use atomic_read instead of atomic_get all over the code
*/
#define atomic_get atomic_read
static inline void atomic_set(atomic_t *v, int i)
{
v->counter = i;
}
static inline void atomic_add(int i, atomic_t *v)
{
asm volatile(LOCK_PREFIX "addl %1,%0"
: "+m" (v->counter)
: "ir" (i));
}
static inline void atomic_sub(int i, atomic_t *v)
{
asm volatile(LOCK_PREFIX "subl %1,%0"
: "+m" (v->counter)
: "ir" (i));
}
static inline void atomic_inc(atomic_t *v)
{
asm volatile(LOCK_PREFIX "incl %0"
: "+m" (v->counter));
}
static inline void atomic_dec(atomic_t *v)
{
asm volatile(LOCK_PREFIX "decl %0"
: "+m" (v->counter));
}
static inline int atomic_dec_and_test(atomic_t *v)
{
unsigned char c;
asm volatile(LOCK_PREFIX "decl %0; sete %1"
: "+m" (v->counter), "=qm" (c)
: : "memory");
return c != 0;
}
static inline int atomic_add_return(int i, atomic_t *v)
{
return i + xadd(&v->counter, i);
}
static inline int atomic_sub_return(int i, atomic_t *v)
{
return atomic_add_return(-i, v);
}
#define atomic_inc_return(v) (atomic_add_return(1, v))
#define atomic_dec_return(v) (atomic_sub_return(1, v))
#endif /* __CR_ATOMIC_H__ */
......@@ -27,7 +27,7 @@ static inline u32 futex_get(futex_t *f)
/* Set futex @f value to @v */
static inline void futex_set(futex_t *f, u32 v)
{
atomic_set(&f->raw, v);
atomic_set(&f->raw, (int)v);
}
#define futex_init(f) futex_set(f, 0)
......@@ -39,11 +39,11 @@ static inline void futex_set(futex_t *f, u32 v)
u32 tmp; \
\
while (1) { \
tmp = atomic_get(&(__f)->raw); \
tmp = (u32)atomic_get(&(__f)->raw); \
if ((tmp & FUTEX_ABORT_FLAG) || \
(tmp __cond (__v))) \
break; \
ret = sys_futex(&(__f)->raw.counter, FUTEX_WAIT,\
ret = sys_futex((u32 *)&(__f)->raw.counter, FUTEX_WAIT,\
tmp, NULL, NULL, 0); \
BUG_ON(ret < 0 && ret != -EWOULDBLOCK); \
} \
......@@ -52,8 +52,8 @@ static inline void futex_set(futex_t *f, u32 v)
/* Set futex @f to @v and wake up all waiters */
static inline void futex_set_and_wake(futex_t *f, u32 v)
{
atomic_set(&f->raw, v);
BUG_ON(sys_futex(&f->raw.counter, FUTEX_WAKE, INT_MAX, NULL, NULL, 0) < 0);
atomic_set(&f->raw, (int)v);
BUG_ON(sys_futex((u32 *)&f->raw.counter, FUTEX_WAKE, INT_MAX, NULL, NULL, 0) < 0);
}
/* Mark futex @f as wait abort needed and wake up all waiters */
......@@ -67,14 +67,14 @@ static inline void futex_abort_and_wake(futex_t *f)
static inline void futex_dec_and_wake(futex_t *f)
{
atomic_dec(&f->raw);
BUG_ON(sys_futex(&f->raw.counter, FUTEX_WAKE, INT_MAX, NULL, NULL, 0) < 0);
BUG_ON(sys_futex((u32 *)&f->raw.counter, FUTEX_WAKE, INT_MAX, NULL, NULL, 0) < 0);
}
/* Increment futex @f value and wake up all waiters */
static inline void futex_inc_and_wake(futex_t *f)
{
atomic_inc(&f->raw);
BUG_ON(sys_futex(&f->raw.counter, FUTEX_WAKE, INT_MAX, NULL, NULL, 0) < 0);
BUG_ON(sys_futex((u32 *)&f->raw.counter, FUTEX_WAKE, INT_MAX, NULL, NULL, 0) < 0);
}
/* Plain increment futex @f value */
......@@ -102,8 +102,8 @@ static inline void futex_wait_while_eq(futex_t *f, u32 v)
/* Wait while futex @f value is @v */
static inline void futex_wait_while(futex_t *f, u32 v)
{
while (atomic_get(&f->raw) == v) {
int ret = sys_futex(&f->raw.counter, FUTEX_WAIT, v, NULL, NULL, 0);
while ((u32)atomic_get(&f->raw) == v) {
int ret = sys_futex((u32 *)&f->raw.counter, FUTEX_WAIT, v, NULL, NULL, 0);
BUG_ON(ret < 0 && ret != -EWOULDBLOCK);
}
}
......@@ -115,7 +115,7 @@ typedef struct {
static inline void mutex_init(mutex_t *m)
{
u32 c = 0;
atomic_set(&m->raw, c);
atomic_set(&m->raw, (int)c);
}
static inline void mutex_lock(mutex_t *m)
......@@ -123,8 +123,8 @@ static inline void mutex_lock(mutex_t *m)
u32 c;
int ret;
while ((c = atomic_inc(&m->raw))) {
ret = sys_futex(&m->raw.counter, FUTEX_WAIT, c + 1, NULL, NULL, 0);
while ((c = (u32)atomic_inc_return(&m->raw)) != 1) {
ret = sys_futex((u32 *)&m->raw.counter, FUTEX_WAIT, c, NULL, NULL, 0);
BUG_ON(ret < 0 && ret != -EWOULDBLOCK);
}
}
......@@ -132,8 +132,8 @@ static inline void mutex_lock(mutex_t *m)
static inline void mutex_unlock(mutex_t *m)
{
u32 c = 0;
atomic_set(&m->raw, c);
BUG_ON(sys_futex(&m->raw.counter, FUTEX_WAKE, 1, NULL, NULL, 0) < 0);
atomic_set(&m->raw, (int)c);
BUG_ON(sys_futex((u32 *)&m->raw.counter, FUTEX_WAKE, 1, NULL, NULL, 0) < 0);
}
#endif /* __CR_LOCK_H__ */
......@@ -30,7 +30,7 @@ void cnt_add(int c, unsigned long val)
dstats->counts[c] += val;
} else if (rstats != NULL) {
BUG_ON(c >= RESTORE_CNT_NR_STATS);
atomic_add(&rstats->counts[c], val);
atomic_add(val, &rstats->counts[c]);
} else
BUG();
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment