patch-2.2.3 linux/include/asm-alpha/semaphore-helper.h

Next file: linux/include/asm-alpha/semaphore.h
Previous file: linux/fs/vfat/namei.c
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.2.2/linux/include/asm-alpha/semaphore-helper.h linux/include/asm-alpha/semaphore-helper.h
@@ -0,0 +1,122 @@
+#ifndef _ALPHA_SEMAPHORE_HELPER_H
+#define _ALPHA_SEMAPHORE_HELPER_H
+
+/*
+ * SMP- and interrupt-safe semaphores helper functions.
+ *
+ * (C) Copyright 1996 Linus Torvalds
+ * (C) Copyright 1999 Richard Henderson
+ */
+
+/*
+ * These two _must_ execute atomically wrt each other.
+ *
+ * This is trivially done with load_locked/store_cond,
+ * which we have.  Let the rest of the losers suck eggs.
+ */
+
+static inline void
+wake_one_more(struct semaphore * sem)
+{
+	atomic_inc(&sem->waking);
+}
+
+static inline int
+waking_non_zero(struct semaphore *sem)
+{
+	long ret, tmp;
+
+	/* An atomic conditional decrement.  */
+	__asm__ __volatile__(
+		"1:	ldl_l	%1,%2\n"
+		"	blt	%1,2f\n"
+		"	subl	%1,1,%0\n"
+		"	stl_c	%0,%2\n"
+		"	beq	%0,3f\n"
+		"2:\n"
+		".section .text2,\"ax\"\n"
+		"3:	br	1b\n"
+		".previous"
+		: "=r"(ret), "=r"(tmp), "=m"(__atomic_fool_gcc(&sem->waking))
+		: "0"(0));
+
+	return ret > 0;
+}
+
+
+/*
+ * waking_non_zero_interruptible:
+ *	1	got the lock
+ *	0	go to sleep
+ *	-EINTR	interrupted
+ *
+ * We must undo the sem->count down_interruptible increment
+ * simultaneously and atomicly with the sem->waking adjustment,
+ * otherwise we can race with wake_one_more.
+ *
+ * This is accomplished by doing a 64-bit ll/sc on the 2 32-bit words.
+ */
+
+static inline int
+waking_non_zero_interruptible(struct semaphore *sem, struct task_struct *tsk)
+{
+	long ret, tmp, tmp2, tmp3;
+
+	/* "Equivalent" C.  Note that we have to do this all without
+	   (taken) branches in order to be a valid ll/sc sequence.
+
+	   do {
+	       tmp = ldq_l;
+	       ret = 0;
+	       if (tmp >= 0) {
+	           tmp += 0xffffffff00000000;
+	           ret = 1;
+	       }
+	       else if (pending) {
+	           tmp += 1;
+	           ret = -EINTR;
+	       }
+	       else {
+		   break;	// ideally.  we don't actually break 
+		   		// since this is a predicate we don't
+				// have, and is more trouble to build
+				// than to elide the noop stq_c.
+	       }
+	       tmp = stq_c = tmp;
+	   } while (tmp == 0);
+	*/
+
+	__asm__ __volatile__(
+		"1:	ldq_l	%1,%4\n"
+		"	lda	%0,0\n"
+		"	addq	%1,1,%2\n"
+		"	ldah	%3,0x8000(%1)\n"
+		"	cmovne	%5,%6,%0\n"
+		"	ldah	%3,0x8000(%3)\n"
+		"	cmovge	%1,1,%0\n"
+		"	cmovne	%5,%2,%1\n"
+		"	cmovge	%2,%3,%1\n"
+		"	stq_c	%1,%4\n"
+		"	beq	%1,3f\n"
+		"2:\n"
+		".section .text2,\"ax\"\n"
+		"3:	br	1b\n"
+		".previous"
+		: "=&r"(ret), "=&r"(tmp), "=&r"(tmp2), "=&r"(tmp3), "=m"(*sem)
+		: "r"(signal_pending(tsk)), "r"(-EINTR));
+
+	return ret;
+}
+
+/*
+ * waking_non_zero_trylock is unused.  we do everything in 
+ * down_trylock and let non-ll/sc hosts bounce around.
+ */
+
+static inline int
+waking_non_zero_trylock(struct semaphore *sem)
+{
+	return 0;
+}
+
+#endif

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)