patch-1.3.77 linux/include/asm-i386/atomic.h

Next file: linux/include/asm-i386/processor.h
Previous file: linux/include/asm-alpha/atomic.h
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v1.3.76/linux/include/asm-i386/atomic.h linux/include/asm-i386/atomic.h
@@ -0,0 +1,56 @@
+#ifndef __ARCH_I386_ATOMIC__
+#define __ARCH_I386_ATOMIC__
+
+/*
+ * Atomic operations that C can't guarantee us.  Useful for
+ * resource counting etc..
+ */
+
+#ifdef __SMP__
+#define LOCK "lock ; "
+#else
+#define LOCK ""
+#endif
+
+/*
+ * Make sure gcc doesn't try to be clever and move things around
+ * on us. We need to use _exactly_ the address the user gave us,
+ * not some alias that contains the same information.
+ */
+#define __atomic_fool_gcc(x) (*(struct { int a[100]; } *)x)
+
+typedef int atomic_t;
+
+static __inline__ void atomic_add(atomic_t i, atomic_t *v)
+{
+	__asm__ __volatile__(
+		LOCK "addl %1,%0"
+		:"=m" (__atomic_fool_gcc(v))
+		:"ir" (i), "m" (__atomic_fool_gcc(v)));
+}
+
+static __inline__ void atomic_sub(atomic_t i, atomic_t *v)
+{
+	__asm__ __volatile__(
+		LOCK "subl %1,%0"
+		:"=m" (__atomic_fool_gcc(v))
+		:"ir" (i), "m" (__atomic_fool_gcc(v)));
+}
+
+static __inline__ void atomic_inc(atomic_t *v)
+{
+	__asm__ __volatile__(
+		LOCK "incl %0"
+		:"=m" (__atomic_fool_gcc(v))
+		:"m" (__atomic_fool_gcc(v)));
+}
+
+static __inline__ void atomic_dec(atomic_t *v)
+{
+	__asm__ __volatile__(
+		LOCK "decl %0"
+		:"=m" (__atomic_fool_gcc(v))
+		:"m" (__atomic_fool_gcc(v)));
+}
+
+#endif

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov with Sam's (original) version
of this