patch-2.1.36 linux/include/asm-sparc/spinlock.h

Next file: linux/include/asm-sparc/system.h
Previous file: linux/include/asm-sparc/softirq.h
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.1.35/linux/include/asm-sparc/spinlock.h linux/include/asm-sparc/spinlock.h
@@ -16,6 +16,7 @@
 #define spin_lock_init(lock)	do { } while(0)
 #define spin_lock(lock)		do { } while(0)
 #define spin_trylock(lock)	do { } while(0)
+#define spin_unlock_wait(lock)	do { } while(0)
 #define spin_unlock(lock)	do { } while(0)
 #define spin_lock_irq(lock)	cli()
 #define spin_unlock_irq(lock)	sti()
@@ -23,6 +24,33 @@
 #define spin_lock_irqsave(lock, flags)		save_and_cli(flags)
 #define spin_unlock_irqrestore(lock, flags)	restore_flags(flags)
 
+/*
+ * Read-write spinlocks, allowing multiple readers
+ * but only one writer.
+ *
+ * NOTE! it is quite common to have readers in interrupts
+ * but no interrupt writers. For those circumstances we
+ * can "mix" irq-safe locks - any writer needs to get a
+ * irq-safe write-lock, but readers can get non-irqsafe
+ * read-locks.
+ */
+typedef struct { } rwlock_t;
+#define RW_LOCK_UNLOCKED { }
+
+#define read_lock(lock)		do { } while(0)
+#define read_unlock(lock)	do { } while(0)
+#define write_lock(lock)	do { } while(0)
+#define write_unlock(lock)	do { } while(0)
+#define read_lock_irq(lock)	cli()
+#define read_unlock_irq(lock)	sti()
+#define write_lock_irq(lock)	cli()
+#define write_unlock_irq(lock)	sti()
+
+#define read_lock_irqsave(lock, flags)		save_and_cli(flags)
+#define read_unlock_irqrestore(lock, flags)	restore_flags(flags)
+#define write_lock_irqsave(lock, flags)		save_and_cli(flags)
+#define write_unlock_irqrestore(lock, flags)	restore_flags(flags)
+
 #else /* !(__SMP__) */
 
 #include <asm/psr.h>
@@ -30,20 +58,25 @@
 typedef unsigned char spinlock_t;
 #define SPIN_LOCK_UNLOCKED	0
 
+#define spin_lock_init(lock)	(*(lock) = 0)
+#define spin_unlock_wait(lock)	do { barrier(); } while(*(volatile spinlock_t *)lock)
+
 extern __inline__ void spin_lock(spinlock_t *lock)
 {
-	register spinlock_t *lp asm("g1");
-	lp = lock;
 	__asm__ __volatile__("
-	ldstub	[%%g1], %%g2
+1:	ldstub	[%0], %%g2
 	orcc	%%g2, 0x0, %%g0
-	be	1f
-	 mov	%%o7, %%g4
-	call	___spinlock_waitfor
-	 ldub	[%%g1], %%g2
-1:"	: /* no outputs */
-	: "r" (lp)
-	: "g2", "g4", "memory", "cc");
+	bne,a	2f
+	 ldub	[%0], %%g2
+	.text	2
+2:	orcc	%%g2, 0x0, %%g0
+	bne,a	2b
+	 ldub	[%0], %%g2
+	b,a	1b
+	.previous
+"	: /* no outputs */
+	: "r" (lock)
+	: "g2", "memory", "cc");
 }
 
 extern __inline__ int spin_trylock(spinlock_t *lock)
@@ -63,22 +96,24 @@
 
 extern __inline__ void spin_lock_irq(spinlock_t *lock)
 {
-	register spinlock_t *lp asm("g1");
-	lp = lock;
 	__asm__ __volatile__("
 	rd	%%psr, %%g2
 	or	%%g2, %0, %%g2
 	wr	%%g2, 0x0, %%psr
 	nop; nop; nop;
-	ldstub	[%%g1], %%g2
+1:	ldstub	[%1], %%g2
 	orcc	%%g2, 0x0, %%g0
-	be	1f
-	 mov	%%o7, %%g4
-	call	___spinlock_waitfor
-	 ldub	[%%g1], %%g2
-1:"	: /* No outputs */
-	: "i" (PSR_PIL), "r" (lp)
-	: "g2", "g4", "memory", "cc");
+	bne,a	2f
+	 ldub	[%1], %%g2
+	.text	2
+2:	orcc	%%g2, 0x0, %%g0
+	bne,a	2b
+	 ldub	[%1], %%g2
+	b,a	1b
+	.previous
+"	: /* No outputs */
+	: "i" (PSR_PIL), "r" (lock)
+	: "g2", "memory", "cc");
 }
 
 extern __inline__ void spin_unlock_irq(spinlock_t *lock)
@@ -102,16 +137,22 @@
 	"rd	%%psr, %0\n\t"			\
 	"or	%0, %1, %%g2\n\t"		\
 	"wr	%%g2, 0x0, %%psr\n\t"		\
-	"nop; nop; nop;\n\t"			\
-	"ldstub	[%%g1], %%g2\n\t"		\
+	"nop; nop; nop;\n"			\
+	"1:\n\t"				\
+	"ldstub	[%2], %%g2\n\t"			\
 	"orcc	%%g2, 0x0, %%g0\n\t"		\
-	"be	1f\n\t"				\
-	" mov	%%o7, %%g4\n\t"			\
-	"call	___spinlock_waitfor\n\t"	\
-	" ldub	[%%g1], %%g2\n\t"		\
-"1:"	: "=r" (flags)				\
-	: "i" (PSR_PIL), "r" (lp)		\
-	: "g2", "g4", "memory", "cc");		\
+	"bne,a	2f\n\t"				\
+	" ldub	[%2], %%g2\n\t"			\
+	".text	2\n"				\
+	"2:\n\t"				\
+	"orcc	%%g2, 0x0, %%g0\n\t"		\
+	"bne,a	2b\n\t"				\
+	" ldub	[%2], %%g2\n\t"			\
+	"b,a	1b\n\t"				\
+	".previous\n"				\
+	: "=r" (flags)				\
+	: "i" (PSR_PIL), "r" (lock)		\
+	: "g2", "memory", "cc");		\
 } while(0)
 
 extern __inline__ void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
@@ -124,6 +165,92 @@
 	: "r" (lock), "r" (flags)
 	: "memory", "cc");
 }
+
+/* Read-write spinlocks, allowing multiple readers
+ * but only one writer.
+ *
+ * NOTE! it is quite common to have readers in interrupts
+ * but no interrupt writers. For those circumstances we
+ * can "mix" irq-safe locks - any writer needs to get a
+ * irq-safe write-lock, but readers can get non-irqsafe
+ * read-locks.
+ *
+ * XXX This might create some problems with my dual spinlock
+ * XXX scheme, deadlocks etc. -DaveM
+ */
+typedef struct { volatile unsigned int lock; } rwlock_t;
+
+#define RW_LOCK_UNLOCKED { 0 }
+
+/* Sort of like atomic_t's on Sparc, but even more clever.
+ *
+ *	------------------------------------
+ *	| 16-bit counter   | clock | wlock |  rwlock_t
+ *	------------------------------------
+ *	 31              16 15    8 7     0
+ *
+ * wlock signifies the one writer is in, the clock protects
+ * counter bumping, however a reader must acquire wlock
+ * before he can bump the counter on a read_lock().
+ * Similarly a writer, once he has the wlock, must await
+ * for the top 24 bits to all clear before he can finish
+ * going in (this includes the clock of course).
+ *
+ * Unfortunately this scheme limits us to ~65,000 cpus.
+ */
+extern __inline__ void read_lock(rwlock_t *rw)
+{
+	register rwlock_t *lp asm("g1");
+	lp = rw;
+	__asm__ __volatile__("
+	mov	%%o7, %%g4
+	call	___rw_read_enter
+	 ldstub	[%%g1 + 3], %%g2
+"	: /* no outputs */
+	: "r" (lp)
+	: "g2", "g4", "g7", "memory", "cc");
+}
+
+extern __inline__ void read_unlock(rwlock_t *rw)
+{
+	register rwlock_t *lp asm("g1");
+	lp = rw;
+	__asm__ __volatile__("
+	mov	%%o7, %%g4
+	call	___rw_read_exit
+	 ldstub	[%%g1 + 2], %%g2
+"	: /* no outputs */
+	: "r" (lp)
+	: "g2", "g4", "g7", "memory", "cc");
+}
+
+extern __inline__ void write_lock(rwlock_t *rw)
+{
+	register rwlock_t *lp asm("g1");
+	lp = rw;
+	__asm__ __volatile__("
+	mov	%%o7, %%g4
+	call	___rw_write_enter
+	 ldstub	[%%g1 + 3], %%g2
+"	: /* no outputs */
+	: "r" (lp)
+	: "g2", "g4", "g7", "memory", "cc");
+}
+
+#define write_unlock(rw)	do { (rw)->lock = 0; } while(0)
+#define read_lock_irq(lock)	do { __cli(); read_lock(lock); } while (0)
+#define read_unlock_irq(lock)	do { read_unlock(lock); __sti(); } while (0)
+#define write_lock_irq(lock)	do { __cli(); write_lock(lock); } while (0)
+#define write_unlock_irq(lock)	do { write_unlock(lock); __sti(); } while (0)
+
+#define read_lock_irqsave(lock, flags)	\
+	do { __save_and_cli(flags); read_lock(lock); } while (0)
+#define read_unlock_irqrestore(lock, flags) \
+	do { read_unlock(lock); __restore_flags(flags); } while (0)
+#define write_lock_irqsave(lock, flags)	\
+	do { __save_and_cli(flags); write_lock(lock); } while (0)
+#define write_unlock_irqrestore(lock, flags) \
+	do { write_unlock(lock); __restore_flags(flags); } while (0)
 
 #endif /* __SMP__ */
 

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov