patch-2.1.34 linux/include/asm-i386/semaphore.h

Next file: linux/include/asm-i386/softirq.h
Previous file: linux/include/asm-i386/irq.h
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.1.33/linux/include/asm-i386/semaphore.h linux/include/asm-i386/semaphore.h
@@ -19,14 +19,17 @@
  *
  */
 
+#include <asm/system.h>
+#include <asm/atomic.h>
+
 struct semaphore {
-	int count;
+	atomic_t count;
 	int waking;
 	struct wait_queue * wait;
 };
 
-#define MUTEX ((struct semaphore) { 1, 0, NULL })
-#define MUTEX_LOCKED ((struct semaphore) { 0, 0, NULL })
+#define MUTEX ((struct semaphore) { { 1 }, 0, NULL })
+#define MUTEX_LOCKED ((struct semaphore) { { 0 }, 0, NULL })
 
 asmlinkage void __down_failed(void /* special register calling convention */);
 asmlinkage int  __down_failed_interruptible(void  /* params in registers */);
@@ -34,6 +37,44 @@
 
 extern void __down(struct semaphore * sem);
 extern void __up(struct semaphore * sem);
+
+#define sema_init(sem, val)	atomic_set(&((sem)->count), (val))
+
+/*
+ * These two _must_ execute atomically wrt each other.
+ *
+ * This is trivially done with load_locked/store_cond,
+ * but on the x86 we need an external synchronizer.
+ * Currently this is just the global interrupt lock,
+ * bah. Go for a smaller spinlock some day.
+ *
+ * (On the other hand this shouldn't be in any critical
+ * path, so..)
+ */
+static inline void wake_one_more(struct semaphore * sem)
+{
+	unsigned long flags;
+
+	save_flags(flags);
+	cli();
+	sem->waking++;
+	restore_flags(flags);
+}
+
+static inline int waking_non_zero(struct semaphore *sem)
+{
+	unsigned long flags;
+	int ret = 0;
+
+	save_flags(flags);
+	cli();
+	if (sem->waking > 0) {
+		sem->waking--;
+		ret = 1;
+	}
+	restore_flags(flags);
+	return ret;
+}
 
 /*
  * This is ugly, but we want the default case to fall through.

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov