patch-2.2.16 linux/drivers/net/acenic.c

Next file: linux/drivers/net/acenic.h
Previous file: linux/drivers/net/Space.c
Back to the patch index
Back to the overall index

diff -urN v2.2.15/linux/drivers/net/acenic.c linux/drivers/net/acenic.c
@@ -2,7 +2,7 @@
  * acenic.c: Linux driver for the Alteon AceNIC Gigabit Ethernet card
  *           and other Tigon based cards.
  *
- * Copyright 1998, 1999 by Jes Sorensen, <Jes.Sorensen@cern.ch>.
+ * Copyright 1998-2000 by Jes Sorensen, <Jes.Sorensen@cern.ch>.
  *
  * Thanks to Alteon and 3Com for providing hardware and documentation
  * enabling me to write this driver.
@@ -17,14 +17,24 @@
  * the Free Software Foundation; either version 2 of the License, or
  * (at your option) any later version.
  *
- * Additional work by Pete Wyckoff <wyckoff@ca.sandia.gov> for initial
- * Alpha and trace dump support.
+ * Additional credits:
+ *   Pete Wyckoff <wyckoff@ca.sandia.gov>: Initial Linux/Alpha and trace
+ *       dump support. The trace dump support has not been
+ *       integrated yet however.
+ *   Troy Benjegerdes: Big Endian (PPC) patches.
+ *   Nate Stahl: Better out of memory handling and stats support.
+ *   Aman Singla: Nasty race between interrupt handler and tx code dealing
+ *                with 'testing the tx_ret_csm and setting tx_full'
+ *   David S. Miller <davem@redhat.com>: conversion to new PCI dma mapping
+ *                                       infrastructure and Sparc support
+ *   Pierrick Pinasseau (CERN): For lending me an Ultra 5 to test the
+ *                              driver under Linux/Sparc64
+ *   Matt Domsch <Matt_Domsch@dell.com>: Detect 1000baseT cards
  */
 
-#define PKT_COPY_THRESHOLD 300
-
+#include <linux/config.h>
 #include <linux/module.h>
-
+#include <linux/version.h>
 #include <linux/types.h>
 #include <linux/errno.h>
 #include <linux/ioport.h>
@@ -36,6 +46,13 @@
 #include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/mm.h>
+
+#undef ETHTOOL
+#undef INDEX_DEBUG
+
+#ifdef ETHTOOL
+#include <linux/ethtool.h>
+#endif
 #include <net/sock.h>
 #include <net/ip.h>
 
@@ -45,20 +62,19 @@
 #include <asm/byteorder.h>
 #include <asm/uaccess.h>
 
-#include "acenic.h"
-
-/*
- * These must be defined before the firmware is included.
- */
-#define MAX_TEXT_LEN	96*1024
-#define MAX_RODATA_LEN	8*1024
-#define MAX_DATA_LEN	2*1024
 
-#include "acenic_firmware.h"
+#ifdef CONFIG_ACENIC_OMIT_TIGON_I
+#define ACE_IS_TIGON_I(ap)	0
+#else
+#define ACE_IS_TIGON_I(ap)	(ap->version == 1)
+#endif
 
 #ifndef PCI_VENDOR_ID_ALTEON
 #define PCI_VENDOR_ID_ALTEON		0x12ae	
-#define PCI_DEVICE_ID_ALTEON_ACENIC	0x0001
+#endif
+#ifndef PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE
+#define PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE  0x0001
+#define PCI_DEVICE_ID_ALTEON_ACENIC_COPPER 0x0002
 #endif
 #ifndef PCI_DEVICE_ID_3COM_3C985
 #define PCI_DEVICE_ID_3COM_3C985	0x0001
@@ -71,18 +87,107 @@
  * They used the DEC vendor ID by mistake
  */
 #ifndef PCI_DEVICE_ID_FARALLON_PN9000SX
-#define PCI_DEVICE_ID_FARALLON_PN9000SX 0x1a
+#define PCI_DEVICE_ID_FARALLON_PN9000SX	0x1a
 #endif
 #ifndef PCI_VENDOR_ID_SGI
-#define PCI_VENDOR_ID_SGI             0x10a9
-#define PCI_DEVICE_ID_SGI_ACENIC      0x0009
+#define PCI_VENDOR_ID_SGI		0x10a9
+#endif
+#ifndef PCI_DEVICE_ID_SGI_ACENIC
+#define PCI_DEVICE_ID_SGI_ACENIC	0x0009
+#endif
+
+#ifndef wmb
+#define wmb()	mb()
+#endif
+
+#ifndef __exit
+#define __exit
+#endif
+
+#ifndef SMP_CACHE_BYTES
+#define SMP_CACHE_BYTES	L1_CACHE_BYTES
+#endif
+
+
+#if (LINUX_VERSION_CODE < 0x02030d)
+#define pci_resource_start(dev, bar)	dev->base_address[bar]
+#elif (LINUX_VERSION_CODE < 0x02032c)
+#define pci_resource_start(dev, bar)	dev->resource[bar].start
+#endif
+
+#if (LINUX_VERSION_CODE < 0x02030e)
+#define net_device device
+#endif
+
+#if (LINUX_VERSION_CODE >= 0x02031b)
+#define NEW_NETINIT
+#endif
+
+#if (LINUX_VERSION_CODE < 0x02032a)
+typedef u32 dma_addr_t;
+
+static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
+					 dma_addr_t *dma_handle)
+{
+	void *virt_ptr;
+
+	virt_ptr = kmalloc(size, GFP_KERNEL);
+	*dma_handle = virt_to_bus(virt_ptr);
+	return virt_ptr;
+}
+#define pci_free_consistent(cookie, size, ptr, dma_ptr)	kfree(ptr)
+#define pci_map_single(cookie, address, size, dir)	virt_to_bus(address)
+#define pci_unmap_single(cookie, address, size, dir)
+#endif
+
+#if (LINUX_VERSION_CODE < 0x02032b)
+/*
+ * SoftNet
+ */
+#define dev_kfree_skb_irq(a)	dev_kfree_skb(a)
+#define netif_wake_queue(dev)	clear_bit(0, &dev->tbusy)
+#define netif_stop_queue(dev)	set_bit(0, &dev->tbusy)
+
+static inline void netif_start_queue(struct net_device *dev)
+{
+	dev->tbusy = 0;
+	dev->interrupt = 0;
+	dev->start = 1;
+}
+
+#define ace_mark_net_bh(foo)		mark_bh(foo)
+#define netif_queue_stopped(dev)	dev->tbusy
+#define netif_running(dev)		dev->start
+#define ace_if_down(dev)		{do{dev->start = 0;}while (0);}
+#else
+#define NET_BH			0
+#define ace_mark_net_bh(foo)	{do{} while(0);}
+#define ace_if_down(dev)	{do{} while(0);}
 #endif
 
+
+#define ACE_MAX_MOD_PARMS	8
+#define BOARD_IDX_STATIC	0
+#define BOARD_IDX_OVERFLOW	-1
+
+
+#include "acenic.h"
+
+/*
+ * These must be defined before the firmware is included.
+ */
+#define MAX_TEXT_LEN	96*1024
+#define MAX_RODATA_LEN	8*1024
+#define MAX_DATA_LEN	2*1024
+
+#include "acenic_firmware.h"
+
 /*
  * This driver currently supports Tigon I and Tigon II based cards
- * including the Alteon AceNIC and the 3Com 3C985. The driver should
- * also work on the NetGear GA620, however I have not been able to
- * test that myself.
+ * including the Alteon AceNIC, the 3Com 3C985[B] and NetGear
+ * GA620. The driver should also work on the SGI, DEC and Farallon
+ * versions of the card, however I have not been able to test that
+ * myself.
  *
  * This card is really neat, it supports receive hardware checksumming
  * and jumbo frames (up to 9000 bytes) and does a lot of work in the
@@ -147,6 +252,12 @@
  *                is available, on the 1/2MB NIC app. 300KB is available.
  *                68KB will always be available as a minimum for both
  *                directions. The default value is a 50/50 split.
+ *  dis_pci_mem_inval=<val> - disable PCI memory write and invalidate
+ *                operations, default (1) is to always disable this as
+ *                that is what Alteon does on NT. I have not been able
+ *                to measure any real performance differences with
+ *                this on my systems. Set <val>=0 if you want to
+ *                enable these operations.
  *
  * If you use more than one NIC, specify the parameters for the
  * individual NICs with a comma, ie. trace=0,0x00001fff,0 you want to
@@ -160,46 +271,146 @@
  *
  * The mini ring is not used under Linux and I am not sure it makes sense
  * to actually use it.
+ *
+ * New interrupt handler strategy:
+ *
+ * The old interrupt handler worked using the traditional method of
+ * replacing an skbuff with a new one when a packet arrives. However
+ * the rx rings do not need to contain a static number of buffer
+ * descriptors, thus it makes sense to move the memory allocation out
+ * of the main interrupt handler and do it in a bottom half handler
+ * and only allocate new buffers when the number of buffers in the
+ * ring is below a certain threshold. In order to avoid starving the
+ * NIC under heavy load it is however necessary to force allocation
+ * when hitting a minimum threshold. The strategy for alloction is as
+ * follows:
+ *
+ *     RX_LOW_BUF_THRES    - allocate buffers in the bottom half
+ *     RX_PANIC_LOW_THRES  - we are very low on buffers, allocate
+ *                           the buffers in the interrupt handler
+ *     RX_RING_THRES       - maximum number of buffers in the rx ring
+ *     RX_MINI_THRES       - maximum number of buffers in the mini ring
+ *     RX_JUMBO_THRES      - maximum number of buffers in the jumbo ring
+ *
+ * One advantagous side effect of this allocation approach is that the
+ * entire rx processing can be done without holding any spin lock
+ * since the rx rings and registers are totally independant of the tx
+ * ring and its registers.  This of course includes the kmalloc's of
+ * new skb's. Thus start_xmit can run in parallel with rx processing
+ * and the memory allocation on SMP systems.
+ *
+ * Note that running the skb reallocation in a bottom half opens up
+ * another can of races which needs to be handled properly. In
+ * particular it can happen that the interrupt handler tries to run
+ * the reallocation while the bottom half is either running on another
+ * CPU or was interrupted on the same CPU. To get around this the
+ * driver uses bitops to prevent the reallocation routines from being
+ * reentered.
+ *
+ * TX handling can also be done without holding any spin lock, wheee
+ * this is fun! since tx_ret_csm is only written to by the interrupt
+ * handler. The case to be aware of is when shutting down the device
+ * and cleaning up where it is necessary to make sure that
+ * start_xmit() is not running while this is happening. Well DaveM
+ * informs me that this case is already protected against ... bye bye
+ * Mr. Spin Lock, it was nice to know you.
+ *
+ * TX interrupts are now partly disabled so the NIC will only generate
+ * TX interrupts for the number of coal ticks, not for the number of
+ * TX packets in the queue. This should reduce the number of TX only,
+ * ie. when no RX processing is done, interrupts seen.
  */
 
 /*
- * Default values for tuning parameters
+ * Threshold values for RX buffer allocation - the low water marks for
+ * when to start refilling the rings are set to 75% of the ring
+ * sizes. It seems to make sense to refill the rings entirely from the
+ * intrrupt handler once it gets below the panic threshold, that way
+ * we don't risk that the refilling is moved to another CPU when the
+ * one running the interrupt handler just got the slab code hot in its
+ * cache.
  */
-#define DEF_TX_RATIO	31
-#define DEF_TX_COAL	1000
-#define DEF_TX_MAX_DESC	40
-#define DEF_RX_COAL	1000
-#define DEF_RX_MAX_DESC	20
-#define DEF_TRACE	0
-#define DEF_STAT	2 * TICKS_PER_SEC
-
-static int link[8] = {0, };
-static int trace[8] = {0, };
-static int tx_coal_tick[8] = {0, };
-static int rx_coal_tick[8] = {0, };
-static int max_tx_desc[8] = {0, };
-static int max_rx_desc[8] = {0, };
-static int tx_ratio[8] = {0, };
+#define RX_RING_SIZE		72
+#define RX_MINI_SIZE		64
+#define RX_JUMBO_SIZE		48
+
+#define RX_PANIC_STD_THRES	16
+#define RX_PANIC_STD_REFILL	(3*RX_PANIC_STD_THRES)/2
+#define RX_LOW_STD_THRES	(3*RX_RING_SIZE)/4
+#define RX_PANIC_MINI_THRES	12
+#define RX_PANIC_MINI_REFILL	(3*RX_PANIC_MINI_THRES)/2
+#define RX_LOW_MINI_THRES	(3*RX_MINI_SIZE)/4
+#define RX_PANIC_JUMBO_THRES	6
+#define RX_PANIC_JUMBO_REFILL	(3*RX_PANIC_JUMBO_THRES)/2
+#define RX_LOW_JUMBO_THRES	(3*RX_JUMBO_SIZE)/4
 
-static const char __initdata *version = "acenic.c: v0.33a 08/16/99  Jes Sorensen (Jes.Sorensen@cern.ch)\n";
 
-static struct device *root_dev = NULL;
+/*
+ * Size of the mini ring entries, basically these just should be big
+ * enough to take TCP ACKs
+ */
+#define ACE_MINI_SIZE		100
+
+#define ACE_MINI_BUFSIZE	(ACE_MINI_SIZE + 2 + 16)
+#define ACE_STD_BUFSIZE		(ACE_STD_MTU + ETH_HLEN + 2+4+16)
+#define ACE_JUMBO_BUFSIZE	(ACE_JUMBO_MTU + ETH_HLEN + 2+4+16)
+
+/*
+ * There seems to be a magic difference in the effect between 995 and 996
+ * but little difference between 900 and 995 ... no idea why.
+ *
+ * There is now a default set of tuning parameters which is set, depending
+ * on whether or not the user enables Jumbo frames. It's assumed that if
+ * Jumbo frames are enabled, the user wants optimal tuning for that case.
+ */
+#define DEF_TX_COAL		400 /* 996 */
+#define DEF_TX_MAX_DESC		40
+#define DEF_RX_COAL		120 /* 1000 */
+#define DEF_RX_MAX_DESC		25
+#define DEF_TX_RATIO		21 /* 24 */
+
+#define DEF_JUMBO_TX_COAL	20
+#define DEF_JUMBO_TX_MAX_DESC	60
+#define DEF_JUMBO_RX_COAL	30
+#define DEF_JUMBO_RX_MAX_DESC	6
+#define DEF_JUMBO_TX_RATIO	21
+
+#define TX_COAL_INTS_ONLY	0	/* seems not worth it */
+#define DEF_TRACE		0
+#define DEF_STAT		(2 * TICKS_PER_SEC)
+
+static int link[ACE_MAX_MOD_PARMS] = {0, };
+static int trace[ACE_MAX_MOD_PARMS] = {0, };
+static int tx_coal_tick[ACE_MAX_MOD_PARMS] = {0, };
+static int rx_coal_tick[ACE_MAX_MOD_PARMS] = {0, };
+static int max_tx_desc[ACE_MAX_MOD_PARMS] = {0, };
+static int max_rx_desc[ACE_MAX_MOD_PARMS] = {0, };
+static int tx_ratio[ACE_MAX_MOD_PARMS] = {0, };
+static int dis_pci_mem_inval[ACE_MAX_MOD_PARMS] = {1, 1, 1, 1, 1, 1, 1, 1};
+
+static const char __initdata *version = 
+  "acenic.c: v0.44 05/11/2000  Jes Sorensen, linux-acenic@SunSITE.auc.dk\n"
+  "                            http://home.cern.ch/~jes/gige/acenic.html\n";
+
+static struct net_device *root_dev = NULL;
 
 static int probed __initdata = 0;
 
-__initfunc(int acenic_probe (struct device *dev))
+
+#ifdef NEW_NETINIT
+int __init acenic_probe (void)
+#else
+int __init acenic_probe (struct net_device *dev)
+#endif
 {
-	int boards_found = 0;
-	int version_disp;
-	struct ace_private *ap;
-	u8 pci_latency;
-#if 0
-	u16 vendor, device;
-	u8 pci_bus;
-	u8 pci_dev_fun;
-	u8 irq;
+#ifdef NEW_NETINIT
+	struct net_device *dev;
 #endif
+
+	struct ace_private *ap;
 	struct pci_dev *pdev = NULL;
+	int boards_found = 0;
+	int version_disp;
 
 	if (probed)
 		return -ENODEV;
@@ -210,11 +421,11 @@
 
 	version_disp = 0;
 
-	while ((pdev = pci_find_class(PCI_CLASS_NETWORK_ETHERNET<<8, pdev))){
-		dev = NULL;
+	while ((pdev = pci_find_class(PCI_CLASS_NETWORK_ETHERNET<<8, pdev))) {
 
 		if (!((pdev->vendor == PCI_VENDOR_ID_ALTEON) &&
-		      (pdev->device == PCI_DEVICE_ID_ALTEON_ACENIC)) &&
+		      ((pdev->device == PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE) ||
+		       (pdev->device == PCI_DEVICE_ID_ALTEON_ACENIC_COPPER)) ) &&
 		    !((pdev->vendor == PCI_VENDOR_ID_3COM) &&
 		      (pdev->device == PCI_DEVICE_ID_3COM_3C985)) &&
 		    !((pdev->vendor == PCI_VENDOR_ID_NETGEAR) &&
@@ -229,44 +440,34 @@
 		      (pdev->device == PCI_DEVICE_ID_SGI_ACENIC)))
 			continue;
 
-		dev = init_etherdev(dev, sizeof(struct ace_private));
+		dev = init_etherdev(NULL, sizeof(struct ace_private));
 
-		if (dev == NULL){
-			printk(KERN_ERR "Unable to allocate etherdev "
-			       "structure!\n");
+		if (dev == NULL) {
+			printk(KERN_ERR "acenic: Unable to allocate "
+			       "net_device structure!\n");
 			break;
 		}
 
 		if (!dev->priv)
 			dev->priv = kmalloc(sizeof(*ap), GFP_KERNEL);
-		if (!dev->priv)
+		if (!dev->priv) {
+			printk(KERN_ERR "acenic: Unable to allocate memory\n");
 			return -ENOMEM;
+		}
 
 		ap = dev->priv;
 		ap->pdev = pdev;
-		ap->vendor = pdev->vendor;
 
 		dev->irq = pdev->irq;
-#ifdef __SMP__
-		spin_lock_init(&ap->lock);
-#endif
-
 		dev->open = &ace_open;
 		dev->hard_start_xmit = &ace_start_xmit;
 		dev->stop = &ace_close;
 		dev->get_stats = &ace_get_stats;
 		dev->set_multicast_list = &ace_set_multicast_list;
-#if 0
 		dev->do_ioctl = &ace_ioctl;
-#endif
 		dev->set_mac_address = &ace_set_mac_addr;
 		dev->change_mtu = &ace_change_mtu;
 
-		/*
-		 * Dummy value.
-		 */
-		dev->base_addr = 42;
-
 		/* display version info if adapter is found */
 		if (!version_disp)
 		{
@@ -278,76 +479,109 @@
 
 		pci_read_config_word(pdev, PCI_COMMAND, &ap->pci_command);
 
-		pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency);
-		if (pci_latency <= 0x40){
-			pci_latency = 0x40;
+		/* OpenFirmware on Mac's does not set this - DOH.. */ 
+		if (!ap->pci_command & PCI_COMMAND_MEMORY) {
+			printk(KERN_INFO "%s: Enabling PCI Memory Mapped "
+			       "access - was not enabled by BIOS/Firmware\n",
+			       dev->name);
+			ap->pci_command = ap->pci_command | PCI_COMMAND_MEMORY;
+			pci_write_config_word(ap->pdev, PCI_COMMAND,
+					      ap->pci_command);
+			wmb();
+		}
+
+		pci_read_config_byte(pdev, PCI_LATENCY_TIMER,
+				     &ap->pci_latency);
+		if (ap->pci_latency <= 0x40) {
+			ap->pci_latency = 0x40;
 			pci_write_config_byte(pdev, PCI_LATENCY_TIMER,
-					      pci_latency);
+					      ap->pci_latency);
 		}
 
 		pci_set_master(pdev);
 
-		switch(ap->vendor){
+		/*
+		 * Remap the regs into kernel space - this is abuse of
+		 * dev->base_addr since it was means for I/O port
+		 * addresses but who gives a damn.
+		 */
+		dev->base_addr = pci_resource_start(pdev, 0);
+		ap->regs = (struct ace_regs *)ioremap(dev->base_addr, 0x4000);
+		if (!ap->regs) {
+			printk(KERN_ERR "%s:  Unable to map I/O register, "
+			       "AceNIC %i will be disabled.\n",
+			       dev->name, boards_found);
+			break;
+		}
+
+		switch(pdev->vendor) {
 		case PCI_VENDOR_ID_ALTEON:
-			sprintf(ap->name, "AceNIC Gigabit Ethernet");
+			strncpy(ap->name, "AceNIC Gigabit Ethernet",
+				sizeof (ap->name));
 			printk(KERN_INFO "%s: Alteon AceNIC ", dev->name);
 			break;
 		case PCI_VENDOR_ID_3COM:
-			sprintf(ap->name, "3Com 3C985 Gigabit Ethernet");
+			strncpy(ap->name, "3Com 3C985 Gigabit Ethernet",
+				sizeof (ap->name));
 			printk(KERN_INFO "%s: 3Com 3C985 ", dev->name);
 			break;
 		case PCI_VENDOR_ID_NETGEAR:
-			sprintf(ap->name, "NetGear GA620 Gigabit Ethernet");
+			strncpy(ap->name, "NetGear GA620 Gigabit Ethernet",
+				sizeof (ap->name));
 			printk(KERN_INFO "%s: NetGear GA620 ", dev->name);
 			break;
 		case PCI_VENDOR_ID_DEC:
 			if (pdev->device == PCI_DEVICE_ID_FARALLON_PN9000SX) {
-				sprintf(ap->name, "Farallon PN9000-SX "
-					"Gigabit Ethernet");
+				strncpy(ap->name, "Farallon PN9000-SX "
+					"Gigabit Ethernet", sizeof (ap->name));
 				printk(KERN_INFO "%s: Farallon PN9000-SX ",
 				       dev->name);
 				break;
 			}
 		case PCI_VENDOR_ID_SGI:
-			sprintf(ap->name, "SGI AceNIC Gigabit Ethernet");
+			strncpy(ap->name, "SGI AceNIC Gigabit Ethernet",
+				sizeof (ap->name));
 			printk(KERN_INFO "%s: SGI AceNIC ", dev->name);
 			break;
 		default:
-			sprintf(ap->name, "Unknown AceNIC based Gigabit Ethernet");
+ 			strncpy(ap->name, "Unknown AceNIC based Gigabit "
+				"Ethernet", sizeof (ap->name));
 			printk(KERN_INFO "%s: Unknown AceNIC ", dev->name);
 			break;
 		}
-		printk("Gigabit Ethernet at 0x%08lx, irq %i, PCI latency %i "
-		       "clks\n", pdev->base_address[0], dev->irq, pci_latency);
-
-		/*
-		 * Remap the regs into kernel space.
-		 */
+		ap->name [sizeof (ap->name) - 1] = '\0';
+		printk("Gigabit Ethernet at 0x%08lx, ", dev->base_addr);
+#ifdef __sparc__
+		printk("irq %s\n", __irq_itoa(dev->irq));
+#else
+		printk("irq %i\n", dev->irq);
+#endif
 
-		ap->regs = (struct ace_regs *)ioremap(pdev->base_address[0],
-						      0x4000);
-		if (!ap->regs){
-			printk(KERN_ERR "%s:  Unable to map I/O register, "
-			       "AceNIC %i will be disabled.\n",
-			       dev->name, boards_found);
-			break;
+#ifdef CONFIG_ACENIC_OMIT_TIGON_I
+		if ((readl(&ap->regs->HostCtrl) >> 28) == 4) {
+			printk(KERN_ERR "%s: Driver compiled without Tigon I"
+			       " support - NIC disabled\n", dev->name);
+			ace_init_cleanup(dev);
+			continue;
 		}
+#endif
 
-#ifdef MODULE
-		if (ace_init(dev, boards_found))
+		if (ace_allocate_descriptors(dev))
 			continue;
+
+#ifdef MODULE
+		if (boards_found >= ACE_MAX_MOD_PARMS)
+			ap->board_idx = BOARD_IDX_OVERFLOW;
+		else
+			ap->board_idx = boards_found;
 #else
-		if (ace_init(dev, -1))
-			continue;
+		ap->board_idx = BOARD_IDX_STATIC;
 #endif
 
-		boards_found++;
+		if (ace_init(dev))
+			continue;
 
-		/*
-		 * This is bollocks, but we need to tell the net-init
-		 * code that it shall go for the next device.
-		 */
-		dev->base_addr = 0;
+		boards_found++;
 	}
 
 	/*
@@ -356,21 +590,16 @@
 	 * or more boards. Otherwise, return failure (-ENODEV).
 	 */
 
-#ifdef MODULE
-	return boards_found;
-#else
 	if (boards_found > 0)
 		return 0;
 	else
 		return -ENODEV;
-#endif
 }
 
 
 #ifdef MODULE
-#if LINUX_VERSION_CODE > 0x20118
 MODULE_AUTHOR("Jes Sorensen <Jes.Sorensen@cern.ch>");
-MODULE_DESCRIPTION("AceNIC/3C985 Gigabit Ethernet driver");
+MODULE_DESCRIPTION("AceNIC/3C985/GA620 Gigabit Ethernet driver");
 MODULE_PARM(link, "1-" __MODULE_STRING(8) "i");
 MODULE_PARM(trace, "1-" __MODULE_STRING(8) "i");
 MODULE_PARM(tx_coal_tick, "1-" __MODULE_STRING(8) "i");
@@ -379,64 +608,252 @@
 MODULE_PARM(max_rx_desc, "1-" __MODULE_STRING(8) "i");
 #endif
 
-int init_module(void)
-{
-	int cards;
-
-	root_dev = NULL;
-
-	cards = acenic_probe(NULL);
-	return cards ? 0 : -ENODEV;
-}
 
-void cleanup_module(void)
+void __exit ace_module_cleanup(void)
 {
 	struct ace_private *ap;
 	struct ace_regs *regs;
-	struct device *next;
+	struct net_device *next;
 	short i;
-	unsigned long flags;
 
-	while (root_dev){
+	while (root_dev) {
 		next = ((struct ace_private *)root_dev->priv)->next;
-		ap = (struct ace_private *)root_dev->priv;
+		ap = root_dev->priv;
 
 		regs = ap->regs;
-		spin_lock_irqsave(&ap->lock, flags);
 
 		writel(readl(&regs->CpuCtrl) | CPU_HALT, &regs->CpuCtrl);
-		if (ap->version == 2)
+		if (ap->version >= 2)
 			writel(readl(&regs->CpuBCtrl) | CPU_HALT,
 			       &regs->CpuBCtrl);
-		writel(0, &regs->Mb0Lo);
-
-		spin_unlock_irqrestore(&ap->lock, flags);
+		/*
+		 * This clears any pending interrupts
+		 */
+		writel(1, &regs->Mb0Lo);
 
 		/*
-		 * Release the RX buffers.
+		 * Make sure no other CPUs are processing interrupts
+		 * on the card before the buffers are being released.
+		 * Otherwise one might experience some `interesting'
+		 * effects.
+		 *
+		 * Then release the RX buffers - jumbo buffers were
+		 * already released in ace_close().
 		 */
+		synchronize_irq();
+
 		for (i = 0; i < RX_STD_RING_ENTRIES; i++) {
-			if (ap->rx_std_skbuff[i]) {
+			struct sk_buff *skb = ap->skb->rx_std_skbuff[i].skb;
+
+			if (skb) {
+				dma_addr_t mapping;
+
+				mapping = ap->skb->rx_std_skbuff[i].mapping;
+
 				ap->rx_std_ring[i].size = 0;
-				set_aceaddr_bus(&ap->rx_std_ring[i].addr, 0);
-				dev_kfree_skb(ap->rx_std_skbuff[i]);
+				ap->skb->rx_std_skbuff[i].skb = NULL;
+				pci_unmap_single(ap->pdev, mapping,
+						 ACE_STD_BUFSIZE - (2 + 16),
+						 PCI_DMA_FROMDEVICE);
+				dev_kfree_skb(skb);
+			}
+		}
+		if (ap->version >= 2) {
+			for (i = 0; i < RX_MINI_RING_ENTRIES; i++) {
+				struct sk_buff *skb = ap->skb->rx_mini_skbuff[i].skb;
+
+				if (skb) {
+					dma_addr_t mapping;
+
+					mapping = ap->skb->rx_mini_skbuff[i].mapping;
+					ap->rx_mini_ring[i].size = 0;
+					ap->skb->rx_mini_skbuff[i].skb = NULL;
+					pci_unmap_single(ap->pdev, mapping,
+							 ACE_MINI_BUFSIZE - (2 + 16),
+							 PCI_DMA_FROMDEVICE);
+					dev_kfree_skb(skb);
+				}
 			}
 		}
+		for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++) {
+			struct sk_buff *skb = ap->skb->rx_jumbo_skbuff[i].skb;
+			if (skb) {
+				dma_addr_t mapping;
 
-		iounmap(regs);
-		if(ap->trace_buf)
-			kfree(ap->trace_buf);
-		kfree(ap->info);
-		free_irq(root_dev->irq, root_dev);
-		unregister_netdev(root_dev);
-		kfree(root_dev);
+				mapping = ap->skb->rx_jumbo_skbuff[i].mapping;
 
+				ap->rx_jumbo_ring[i].size = 0;
+				ap->skb->rx_jumbo_skbuff[i].skb = NULL;
+				pci_unmap_single(ap->pdev, mapping,
+						 ACE_JUMBO_BUFSIZE - (2 + 16),
+						 PCI_DMA_FROMDEVICE);
+				dev_kfree_skb(skb);
+			}
+		}
+
+		ace_init_cleanup(root_dev);
+		kfree(root_dev);
 		root_dev = next;
 	}
 }
+
+
+int __init ace_module_init(void)
+{
+	int status;
+
+	root_dev = NULL;
+
+#ifdef NEW_NETINIT
+	status = acenic_probe();
+#else
+	status = acenic_probe(NULL);
+#endif
+	return status;
+}
+
+
+#if (LINUX_VERSION_CODE < 0x02032a)
+int init_module(void)
+{
+	return ace_module_init();
+}
+
+
+void cleanup_module(void)
+{
+	ace_module_cleanup();
+}
+#else
+module_init(ace_module_init);
+module_exit(ace_module_cleanup);
 #endif
 
 
+static void ace_free_descriptors(struct net_device *dev)
+{
+	struct ace_private *ap = dev->priv;
+	int size;
+
+	if (ap->rx_std_ring != NULL) {
+		size = (sizeof(struct rx_desc) *
+			(RX_STD_RING_ENTRIES +
+			 RX_JUMBO_RING_ENTRIES +
+			 RX_MINI_RING_ENTRIES +
+			 RX_RETURN_RING_ENTRIES));
+		pci_free_consistent(ap->pdev, size,
+				    ap->rx_std_ring,
+				    ap->rx_ring_base_dma);
+		ap->rx_std_ring = NULL;
+		ap->rx_jumbo_ring = NULL;
+		ap->rx_mini_ring = NULL;
+		ap->rx_return_ring = NULL;
+	}
+	if (ap->evt_ring != NULL) {
+		size = (sizeof(struct event) * EVT_RING_ENTRIES);
+		pci_free_consistent(ap->pdev, size,
+				    ap->evt_ring,
+				    ap->evt_ring_dma);
+		ap->evt_ring = NULL;
+	}
+	if (ap->evt_prd != NULL) {
+		pci_free_consistent(ap->pdev, sizeof(u32),
+				    (void *)ap->evt_prd, ap->evt_prd_dma);
+		ap->evt_prd = NULL;
+	}
+	if (ap->rx_ret_prd != NULL) {
+		pci_free_consistent(ap->pdev, sizeof(u32),
+				    (void *)ap->rx_ret_prd, ap->rx_ret_prd_dma);
+		ap->rx_ret_prd = NULL;
+	}
+	if (ap->tx_csm != NULL) {
+		pci_free_consistent(ap->pdev, sizeof(u32),
+				    (void *)ap->tx_csm, ap->tx_csm_dma);
+		ap->tx_csm = NULL;
+	}
+}
+
+
+static int ace_allocate_descriptors(struct net_device *dev)
+{
+	struct ace_private *ap = dev->priv;
+	int size;
+
+	size = (sizeof(struct rx_desc) *
+		(RX_STD_RING_ENTRIES +
+		 RX_JUMBO_RING_ENTRIES +
+		 RX_MINI_RING_ENTRIES +
+		 RX_RETURN_RING_ENTRIES));
+
+	ap->rx_std_ring = pci_alloc_consistent(ap->pdev, size,
+					       &ap->rx_ring_base_dma);
+	if (ap->rx_std_ring == NULL)
+		goto fail;
+
+	ap->rx_jumbo_ring = ap->rx_std_ring + RX_STD_RING_ENTRIES;
+	ap->rx_mini_ring = ap->rx_jumbo_ring + RX_JUMBO_RING_ENTRIES;
+	ap->rx_return_ring = ap->rx_mini_ring + RX_MINI_RING_ENTRIES;
+
+	size = (sizeof(struct event) * EVT_RING_ENTRIES);
+
+	ap->evt_ring = pci_alloc_consistent(ap->pdev, size,
+					    &ap->evt_ring_dma);
+
+	if (ap->evt_ring == NULL)
+		goto fail;
+
+	ap->evt_prd = pci_alloc_consistent(ap->pdev, sizeof(u32),
+					   &ap->evt_prd_dma);
+	if (ap->evt_prd == NULL)
+		goto fail;
+
+	ap->rx_ret_prd = pci_alloc_consistent(ap->pdev, sizeof(u32),
+					      &ap->rx_ret_prd_dma);
+	if (ap->rx_ret_prd == NULL)
+		goto fail;
+
+	ap->tx_csm = pci_alloc_consistent(ap->pdev, sizeof(u32),
+					  &ap->tx_csm_dma);
+	if (ap->tx_csm == NULL)
+		goto fail;
+
+	return 0;
+
+fail:
+	/* Clean up. */
+	ace_init_cleanup(dev);
+	return 1;
+}
+
+
+/*
+ * Generic cleanup handling data allocated during init. Used when the
+ * module is unloaded or if an error occurs during initialization
+ */
+static void ace_init_cleanup(struct net_device *dev)
+{
+	struct ace_private *ap;
+
+	ap = dev->priv;
+
+	ace_free_descriptors(dev);
+
+	if (ap->info)
+		pci_free_consistent(ap->pdev, sizeof(struct ace_info),
+				    ap->info, ap->info_dma);
+	if (ap->skb)
+		kfree(ap->skb);
+	if (ap->trace_buf)
+		kfree(ap->trace_buf);
+
+	if (dev->irq)
+		free_irq(dev->irq, dev);
+
+	unregister_netdev(dev);
+	iounmap(ap->regs);
+}
+
+
 /*
  * Commands are considered to be slow.
  */
@@ -453,24 +870,40 @@
 }
 
 
-__initfunc(static int ace_init(struct device *dev, int board_idx))
+static int __init ace_init(struct net_device *dev)
 {
 	struct ace_private *ap;
 	struct ace_regs *regs;
-	struct ace_info *info;
-	u32 tig_ver, mac1, mac2, tmp;
+	struct ace_info *info = NULL;
 	unsigned long tmp_ptr, myjif;
+	u32 tig_ver, mac1, mac2, tmp, pci_state;
+	int board_idx, ecode = 0;
 	short i;
+	unsigned char cache;
 
 	ap = dev->priv;
 	regs = ap->regs;
 
+	board_idx = ap->board_idx;
+
+	/*
+	 * aman@sgi.com - its useful to do a NIC reset here to
+	 * address the `Firmware not running' problem subsequent
+	 * to any crashes involving the NIC
+	 */
+	writel(HW_RESET | (HW_RESET << 24), &regs->HostCtrl);
+	wmb();
+
 	/*
 	 * Don't access any other registes before this point!
 	 */
 #ifdef __BIG_ENDIAN
-	writel(((BYTE_SWAP | WORD_SWAP | CLR_INT) |
-		((BYTE_SWAP | WORD_SWAP | CLR_INT) << 24)),
+	/*
+	 * This will most likely need BYTE_SWAP once we switch
+	 * to using __raw_writel()
+	 */
+	writel((WORD_SWAP | CLR_INT |
+		((WORD_SWAP | CLR_INT) << 24)),
 	       &regs->HostCtrl);
 #else
 	writel((CLR_INT | WORD_SWAP | ((CLR_INT | WORD_SWAP) << 24)),
@@ -487,18 +920,25 @@
 	tig_ver = readl(&regs->HostCtrl) >> 28;
 
 	switch(tig_ver){
+#ifndef CONFIG_ACENIC_OMIT_TIGON_I
 	case 4:
-		printk(KERN_INFO"  Tigon I (Rev. 4), Firmware: %i.%i.%i, ",
+		printk(KERN_INFO"  Tigon I  (Rev. 4), Firmware: %i.%i.%i, ",
 		       tigonFwReleaseMajor, tigonFwReleaseMinor,
 		       tigonFwReleaseFix);
 		writel(0, &regs->LocalCtrl);
 		ap->version = 1;
 		break;
+#endif
 	case 6:
 		printk(KERN_INFO"  Tigon II (Rev. %i), Firmware: %i.%i.%i, ",
 		       tig_ver, tigon2FwReleaseMajor, tigon2FwReleaseMinor,
 		       tigon2FwReleaseFix);
 		writel(readl(&regs->CpuBCtrl) | CPU_HALT, &regs->CpuBCtrl);
+		/*
+		 * The SRAM bank size does _not_ indicate the amount
+		 * of memory on the card, it controls the _bank_ size!
+		 * Ie. a 1MB AceNIC will have two banks of 512KB.
+		 */
 		writel(SRAM_BANK_512K, &regs->LocalCtrl);
 		writel(SYNC_SRAM_TIMING, &regs->MiscCfg);
 		ap->version = 2;
@@ -506,7 +946,8 @@
 	default:
 		printk(KERN_INFO"  Unsupported Tigon version detected (%i), ",
 		       tig_ver);
-		return -ENODEV;
+		ecode = -ENODEV;
+		goto init_error;
 	}
 
 	/*
@@ -516,22 +957,34 @@
 	 * value a second time works as well. This is what caused the
 	 * `Firmware not running' problem on the Tigon II.
 	 */
-#ifdef __LITTLE_ENDIAN
-	writel(ACE_BYTE_SWAP_DATA | ACE_WARN | ACE_FATAL |
-	       ACE_WORD_SWAP | ACE_NO_JUMBO_FRAG, &regs->ModeStat);
+#ifdef __BIG_ENDIAN
+	writel(ACE_BYTE_SWAP_DMA | ACE_WARN | ACE_FATAL | ACE_BYTE_SWAP_BD |
+	       ACE_WORD_SWAP_BD | ACE_NO_JUMBO_FRAG, &regs->ModeStat);
 #else
-#error "this driver doesn't run on big-endian machines yet!"
+	writel(ACE_BYTE_SWAP_DMA | ACE_WARN | ACE_FATAL |
+	       ACE_WORD_SWAP_BD | ACE_NO_JUMBO_FRAG, &regs->ModeStat);
 #endif
+	mb();
 
 	mac1 = 0;
-	for(i = 0; i < 4; i++){
+	for(i = 0; i < 4; i++) {
 		mac1 = mac1 << 8;
-		mac1 |= read_eeprom_byte(regs, 0x8c+i);
+		tmp = read_eeprom_byte(dev, 0x8c+i);
+		if (tmp < 0) {
+			ecode = -EIO;
+			goto init_error;
+		} else
+			mac1 |= (tmp & 0xff);
 	}
 	mac2 = 0;
-	for(i = 4; i < 8; i++){
+	for(i = 4; i < 8; i++) {
 		mac2 = mac2 << 8;
-		mac2 |= read_eeprom_byte(regs, 0x8c+i);
+		tmp = read_eeprom_byte(dev, 0x8c+i);
+		if (tmp < 0) {
+			ecode = -EIO;
+			goto init_error;
+		} else
+			mac2 |= (tmp & 0xff);
 	}
 
 	writel(mac1, &regs->MacAddrHi);
@@ -549,24 +1002,57 @@
 	dev->dev_addr[5] = mac2 & 0xff;
 
 	/*
+	 * Looks like this is necessary to deal with on all architectures,
+	 * even this %$#%$# N440BX Intel based thing doesn't get it right.
+	 * Ie. having two NICs in the machine, one will have the cache
+	 * line set at boot time, the other will not.
+	 */
+	pci_read_config_byte(ap->pdev, PCI_CACHE_LINE_SIZE, &cache);
+	if ((cache << 2) != SMP_CACHE_BYTES) {
+		printk(KERN_INFO "  PCI cache line size set incorrectly "
+		       "(%i bytes) by BIOS/FW, corring to %i\n",
+		       (cache << 2), SMP_CACHE_BYTES);
+		pci_write_config_byte(ap->pdev, PCI_CACHE_LINE_SIZE,
+				      SMP_CACHE_BYTES >> 2);
+	}
+
+	pci_state = readl(&regs->PciState);
+	printk(KERN_INFO "  PCI bus width: %i bits, speed: %iMHz, "
+	       "latency: %i clks\n",
+	       	(pci_state & PCI_32BIT) ? 32 : 64,
+		(pci_state & PCI_66MHZ) ? 66 : 33, 
+		ap->pci_latency);
+
+	/*
 	 * Set the max DMA transfer size. Seems that for most systems
 	 * the performance is better when no MAX parameter is
 	 * set. However for systems enabling PCI write and invalidate,
 	 * DMA writes must be set to the L1 cache line size to get
 	 * optimal performance.
+	 *
+	 * The default is now to turn the PCI write and invalidate off
+	 * - that is what Alteon does for NT.
 	 */
 	tmp = READ_CMD_MEM | WRITE_CMD_MEM;
-	if (ap->version == 2){
-#if 0
+	if (ap->version >= 2) {
+		tmp |= (MEM_READ_MULTIPLE | (pci_state & PCI_66MHZ));
 		/*
-		 * According to the documentation this enables writes
-		 * to all PCI regs - NOT good.
+		 * Tuning parameters only supported for 8 cards
 		 */
-		tmp |= DMA_WRITE_ALL_ALIGN;
-#endif
-		tmp |= MEM_READ_MULTIPLE;
-		if (ap->pci_command & PCI_COMMAND_INVALIDATE){
-			switch(L1_CACHE_BYTES){
+		if (board_idx == BOARD_IDX_OVERFLOW ||
+		    dis_pci_mem_inval[board_idx]) {
+			if (ap->pci_command & PCI_COMMAND_INVALIDATE) {
+				ap->pci_command &= ~PCI_COMMAND_INVALIDATE;
+				pci_write_config_word(ap->pdev, PCI_COMMAND,
+						      ap->pci_command);
+				printk(KERN_INFO "  Disabling PCI memory "
+				       "write and invalidate\n");
+			}
+		} else if (ap->pci_command & PCI_COMMAND_INVALIDATE) {
+			printk(KERN_INFO "  PCI memory write & invalidate "
+			       "enabled by BIOS, enabling counter measures\n");
+			
+			switch(SMP_CACHE_BYTES) {
 			case 16:
 				tmp |= DMA_WRITE_MAX_16;
 				break;
@@ -586,22 +1072,62 @@
 			}
 		}
 	}
+
+#ifdef __sparc__
+	/*
+	 * On this platform, we know what the best dma settings
+	 * are.  We use 64-byte maximum bursts, because if we
+	 * burst larger than the cache line size (or even cross
+	 * a 64byte boundry in a single burst) the UltraSparc
+	 * PCI controller will disconnect at 64-byte multiples.
+	 *
+	 * Read-multiple will be properly enabled above, and when
+	 * set will give the PCI controller proper hints about
+	 * prefetching.
+	 */
+	tmp = tmp & ~DMA_READ_WRITE_MASK;
+	tmp |= DMA_READ_MAX_64;
+	tmp |= DMA_WRITE_MAX_64;
+#endif
 	writel(tmp, &regs->PciState);
 
-	if (request_irq(dev->irq, ace_interrupt, SA_SHIRQ, ap->name, dev)) {
-		printk(KERN_WARNING "%s: Requested IRQ %d is busy\n",
-		       dev->name, dev->irq);
-		return -EAGAIN;
+#if 0
+	/*
+	 * I have received reports from people having problems when this
+	 * bit is enabled.
+	 */
+	if (!(ap->pci_command & PCI_COMMAND_FAST_BACK)) {
+		printk(KERN_INFO "  Enabling PCI Fast Back to Back\n");
+		ap->pci_command |= PCI_COMMAND_FAST_BACK;
+		pci_write_config_word(ap->pdev, PCI_COMMAND, ap->pci_command);
 	}
-
+#endif
+		
 	/*
 	 * Initialize the generic info block and the command+event rings
 	 * and the control blocks for the transmit and receive rings
 	 * as they need to be setup once and for all.
 	 */
-	if (!(info = kmalloc(sizeof(struct ace_info), GFP_KERNEL))){
-		free_irq(dev->irq, dev);
-		return -EAGAIN;
+	if (!(info = pci_alloc_consistent(ap->pdev, sizeof(struct ace_info),
+				    &ap->info_dma))) {
+		ecode = -EAGAIN;
+		goto init_error;
+	}
+	ap->info = info;
+
+	/*
+	 * Get the memory for the skb rings.
+	 */
+	if (!(ap->skb = kmalloc(sizeof(struct ace_skb), GFP_KERNEL))) {
+		ecode = -EAGAIN;
+		goto init_error;
+	}
+
+	if (request_irq(dev->irq, ace_interrupt, SA_SHIRQ, ap->name, dev)) {
+		printk(KERN_WARNING "%s: Requested IRQ %d is busy\n",
+		       dev->name, dev->irq);
+		ecode = -EAGAIN;
+		goto init_error;
 	}
 
 	/*
@@ -611,13 +1137,20 @@
 	ap->next = root_dev;
 	root_dev = dev;
 
-	ap->info = info;
-	memset(info, 0, sizeof(struct ace_info));
+#ifdef INDEX_DEBUG
+	spin_lock_init(&ap->debug_lock);
+	ap->last_tx = TX_RING_ENTRIES - 1;
+	ap->last_std_rx = 0;
+	ap->last_mini_rx = 0;
+#endif
+
+	memset(ap->info, 0, sizeof(struct ace_info));
+	memset(ap->skb, 0, sizeof(struct ace_skb));
 
 	ace_load_firmware(dev);
 	ap->fw_running = 0;
 
-	tmp_ptr = virt_to_bus((void *)info);
+	tmp_ptr = (unsigned long) ap->info_dma;
 #if (BITS_PER_LONG == 64)
 	writel(tmp_ptr >> 32, &regs->InfoPtrHi);
 #else
@@ -627,15 +1160,16 @@
 
 	memset(ap->evt_ring, 0, EVT_RING_ENTRIES * sizeof(struct event));
 
-	set_aceaddr(&info->evt_ctrl.rngptr, ap->evt_ring);
+	set_aceaddr(&info->evt_ctrl.rngptr, ap->evt_ring_dma);
 	info->evt_ctrl.flags = 0;
 
-	set_aceaddr(&info->evt_prd_ptr, &ap->evt_prd);
-	ap->evt_prd = 0;
+	set_aceaddr(&info->evt_prd_ptr, ap->evt_prd_dma);
+	*(ap->evt_prd) = 0;
+	wmb();
 	writel(0, &regs->EvtCsm);
 
+	set_aceaddr(&info->cmd_ctrl.rngptr, 0x100);
 	info->cmd_ctrl.flags = 0;
-	set_aceaddr_bus(&info->cmd_ctrl.rngptr, (void *)0x100);
 	info->cmd_ctrl.max_len = 0;
 
 	for (i = 0; i < CMD_RING_ENTRIES; i++)
@@ -644,55 +1178,92 @@
 	writel(0, &regs->CmdPrd);
 	writel(0, &regs->CmdCsm);
 
-	set_aceaddr(&info->stats2_ptr, &info->s.stats);
+	tmp_ptr = ap->info_dma;
+	tmp_ptr += (unsigned long) &(((struct ace_info *)0)->s.stats);
+	set_aceaddr(&info->stats2_ptr, (dma_addr_t) tmp_ptr);
 
+	set_aceaddr(&info->rx_std_ctrl.rngptr, ap->rx_ring_base_dma);
 	info->rx_std_ctrl.max_len = ACE_STD_MTU + ETH_HLEN + 4;
-	set_aceaddr(&info->rx_std_ctrl.rngptr, ap->rx_std_ring);
-	info->rx_std_ctrl.flags = FLG_RX_TCP_UDP_SUM;
+	info->rx_std_ctrl.flags = RCB_FLG_TCP_UDP_SUM;
 
 	memset(ap->rx_std_ring, 0,
 	       RX_STD_RING_ENTRIES * sizeof(struct rx_desc));
 
+	for (i = 0; i < RX_STD_RING_ENTRIES; i++)
+		ap->rx_std_ring[i].flags = BD_FLG_TCP_UDP_SUM;
+
+	ap->rx_std_skbprd = 0;
+	atomic_set(&ap->cur_rx_bufs, 0);
+
+	set_aceaddr(&info->rx_jumbo_ctrl.rngptr,
+		    (ap->rx_ring_base_dma +
+		     (sizeof(struct rx_desc) * RX_STD_RING_ENTRIES)));
 	info->rx_jumbo_ctrl.max_len = 0;
-	set_aceaddr(&info->rx_jumbo_ctrl.rngptr, ap->rx_jumbo_ring);
-	info->rx_jumbo_ctrl.flags = FLG_RX_TCP_UDP_SUM;
+	info->rx_jumbo_ctrl.flags = RCB_FLG_TCP_UDP_SUM;
 
 	memset(ap->rx_jumbo_ring, 0,
 	       RX_JUMBO_RING_ENTRIES * sizeof(struct rx_desc));
 
-	info->rx_mini_ctrl.max_len = 0;
-#if 0
-	set_aceaddr(&info->rx_mini_ctrl.rngptr, ap->rx_mini_ring);
-#else
-	set_aceaddr_bus(&info->rx_mini_ctrl.rngptr, 0);
-#endif
-	info->rx_mini_ctrl.flags = FLG_RNG_DISABLED;
+	for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++)
+		ap->rx_jumbo_ring[i].flags = BD_FLG_TCP_UDP_SUM | BD_FLG_JUMBO;
+
+	ap->rx_jumbo_skbprd = 0;
+	atomic_set(&ap->cur_jumbo_bufs, 0);
 
-#if 0
 	memset(ap->rx_mini_ring, 0,
 	       RX_MINI_RING_ENTRIES * sizeof(struct rx_desc));
-#endif
 
-	set_aceaddr(&info->rx_return_ctrl.rngptr, ap->rx_return_ring);
+	if (ap->version >= 2) {
+		set_aceaddr(&info->rx_mini_ctrl.rngptr,
+			    (ap->rx_ring_base_dma +
+			     (sizeof(struct rx_desc) *
+			      (RX_STD_RING_ENTRIES +
+			       RX_JUMBO_RING_ENTRIES))));
+		info->rx_mini_ctrl.max_len = ACE_MINI_SIZE;
+		info->rx_mini_ctrl.flags = RCB_FLG_TCP_UDP_SUM;
+
+		for (i = 0; i < RX_MINI_RING_ENTRIES; i++)
+			ap->rx_mini_ring[i].flags =
+				BD_FLG_TCP_UDP_SUM | BD_FLG_MINI;
+	} else {
+		set_aceaddr(&info->rx_mini_ctrl.rngptr, 0);
+		info->rx_mini_ctrl.flags = RCB_FLG_RNG_DISABLE;
+		info->rx_mini_ctrl.max_len = 0;
+	}
+
+	ap->rx_mini_skbprd = 0;
+	atomic_set(&ap->cur_mini_bufs, 0);
+
+	set_aceaddr(&info->rx_return_ctrl.rngptr,
+		    (ap->rx_ring_base_dma +
+		     (sizeof(struct rx_desc) *
+		      (RX_STD_RING_ENTRIES +
+		       RX_JUMBO_RING_ENTRIES +
+		       RX_MINI_RING_ENTRIES))));
 	info->rx_return_ctrl.flags = 0;
 	info->rx_return_ctrl.max_len = RX_RETURN_RING_ENTRIES;
 
 	memset(ap->rx_return_ring, 0,
 	       RX_RETURN_RING_ENTRIES * sizeof(struct rx_desc));
 
-	set_aceaddr(&info->rx_ret_prd_ptr, &ap->rx_ret_prd);
+	set_aceaddr(&info->rx_ret_prd_ptr, ap->rx_ret_prd_dma);
+	*(ap->rx_ret_prd) = 0;
 
 	writel(TX_RING_BASE, &regs->WinBase);
 	ap->tx_ring = (struct tx_desc *)regs->Window;
-	for (i = 0; i < (TX_RING_ENTRIES * sizeof(struct tx_desc) / 4); i++){
+	for (i = 0; i < (TX_RING_ENTRIES * sizeof(struct tx_desc) / 4); i++) {
 		writel(0, (unsigned long)ap->tx_ring + i * 4);
 	}
 
+	set_aceaddr(&info->tx_ctrl.rngptr, TX_RING_BASE);
 	info->tx_ctrl.max_len = TX_RING_ENTRIES;
+#if TX_COAL_INTS_ONLY
+	info->tx_ctrl.flags = RCB_FLG_COAL_INT_ONLY;
+#else
 	info->tx_ctrl.flags = 0;
-	set_aceaddr_bus(&info->tx_ctrl.rngptr, (void *)TX_RING_BASE);
+#endif
 
-	set_aceaddr(&info->tx_csm_ptr, &ap->tx_csm);
+	set_aceaddr(&info->tx_csm_ptr, ap->tx_csm_dma);
 
 	/*
 	 * Potential item for tuning parameter
@@ -705,21 +1276,15 @@
 	writel(1, &regs->AssistState);
 
 	writel(DEF_STAT, &regs->TuneStatTicks);
-
-	writel(DEF_TX_COAL, &regs->TuneTxCoalTicks);
-	writel(DEF_TX_MAX_DESC, &regs->TuneMaxTxDesc);
-	writel(DEF_RX_COAL, &regs->TuneRxCoalTicks);
-	writel(DEF_RX_MAX_DESC, &regs->TuneMaxRxDesc);
 	writel(DEF_TRACE, &regs->TuneTrace);
-	writel(DEF_TX_RATIO, &regs->TxBufRat);
 
-	if (board_idx >= 8) {
-		printk(KERN_WARNING "%s: more then 8 NICs detected, "
-		       "ignoring module parameters!\n", dev->name);
-		board_idx = -1;
-	}
+	ace_set_rxtx_parms(dev, 0);
 
-	if (board_idx >= 0) {
+	if (board_idx == BOARD_IDX_OVERFLOW) {
+		printk(KERN_WARNING "%s: more then %i NICs detected, "
+		       "ignoring module parameters!\n",
+		       dev->name, ACE_MAX_MOD_PARMS);
+	} else if (board_idx >= 0) {
 		if (tx_coal_tick[board_idx])
 			writel(tx_coal_tick[board_idx],
 			       &regs->TuneTxCoalTicks);
@@ -744,7 +1309,7 @@
 	 */
 	tmp = LNK_ENABLE | LNK_FULL_DUPLEX | LNK_1000MB | LNK_100MB |
 		LNK_10MB | LNK_RX_FLOW_CTL_Y | LNK_NEG_FCTL | LNK_NEGOTIATE;
-	if(ap->version == 2)
+	if(ap->version >= 2)
 		tmp |= LNK_TX_FLOW_CTL_Y;
 
 	/*
@@ -755,7 +1320,7 @@
 
 		tmp = LNK_ENABLE;
 
-		if (option & 0x01){
+		if (option & 0x01) {
 			printk(KERN_INFO "%s: Setting half duplex link\n",
 			       dev->name);
 			tmp &= ~LNK_FULL_DUPLEX;
@@ -768,7 +1333,7 @@
 			tmp |= LNK_100MB;
 		if (option & 0x40)
 			tmp |= LNK_1000MB;
-		if ((option & 0x70) == 0){
+		if ((option & 0x70) == 0) {
 			printk(KERN_WARNING "%s: No media speed specified, "
 			       "forcing auto negotiation\n", dev->name);
 			tmp |= LNK_NEGOTIATE | LNK_1000MB |
@@ -781,28 +1346,42 @@
 			       "negotiation\n", dev->name);
 		if (option & 0x200)
 			tmp |= LNK_RX_FLOW_CTL_Y;
-		if ((option & 0x400) && (ap->version == 2)){
+		if ((option & 0x400) && (ap->version >= 2)) {
 			printk(KERN_INFO "%s: Enabling TX flow control\n",
 			       dev->name);
 			tmp |= LNK_TX_FLOW_CTL_Y;
 		}
 	}
 
+	ap->link = tmp;
 	writel(tmp, &regs->TuneLink);
-	if (ap->version == 2)
+	if (ap->version >= 2)
 		writel(tmp, &regs->TuneFastLink);
 
-	if (ap->version == 1)
+	if (ACE_IS_TIGON_I(ap))
 		writel(tigonFwStartAddr, &regs->Pc);
-	else if (ap->version == 2)
+	if (ap->version == 2)
 		writel(tigon2FwStartAddr, &regs->Pc);
 
 	writel(0, &regs->Mb0Lo);
 
 	/*
-	 * Start the NIC CPU
+	 * Set tx_csm before we start receiving interrupts, otherwise
+	 * the interrupt handler might think it is supposed to process
+	 * tx ints before we are up and running, which may cause a null
+	 * pointer access in the int handler.
 	 */
+	ap->tx_full = 0;
+	ap->cur_rx = 0;
+	ap->tx_prd = *(ap->tx_csm) = ap->tx_ret_csm = 0;
 
+	wmb();
+	ace_set_txprd(regs, ap, 0);
+	writel(0, &regs->RxRetCsm);
+
+	/*
+	 * Start the NIC CPU
+	 */
 	writel(readl(&regs->CpuCtrl) & ~(CPU_HALT|CPU_TRACE), &regs->CpuCtrl);
 
 	/*
@@ -810,20 +1389,95 @@
 	 */
 	myjif = jiffies + 3 * HZ;
 	while (time_before(jiffies, myjif) && !ap->fw_running);
-	if (!ap->fw_running){
+
+	if (!ap->fw_running) {
 		printk(KERN_ERR "%s: Firmware NOT running!\n", dev->name);
+
 		ace_dump_trace(ap);
 		writel(readl(&regs->CpuCtrl) | CPU_HALT, &regs->CpuCtrl);
-		return -EBUSY;
+
+		/* aman@sgi.com - account for badly behaving firmware/NIC:
+		 * - have observed that the NIC may continue to generate
+		 *   interrupts for some reason; attempt to stop it - halt
+		 *   second CPU for Tigon II cards, and also clear Mb0
+		 * - if we're a module, we'll fail to load if this was
+		 *   the only GbE card in the system => if the kernel does
+		 *   see an interrupt from the NIC, code to handle it is
+		 *   gone and OOps! - so free_irq also
+		 */
+		if (ap->version >= 2)
+			writel(readl(&regs->CpuBCtrl) | CPU_HALT,
+			       &regs->CpuBCtrl);
+		writel(0, &regs->Mb0Lo);
+
+		ecode = -EBUSY;
+		goto init_error;
 	}
 
 	/*
 	 * We load the ring here as there seem to be no way to tell the
 	 * firmware to wipe the ring without re-initializing it.
 	 */
-	ace_load_std_rx_ring(dev);
-
+	if (!test_and_set_bit(0, &ap->std_refill_busy))
+		ace_load_std_rx_ring(ap, RX_RING_SIZE);
+	else
+		printk(KERN_ERR "%s: Someone is busy refilling the RX ring\n",
+		       dev->name);
+	if (ap->version >= 2) {
+		if (!test_and_set_bit(0, &ap->mini_refill_busy))
+			ace_load_mini_rx_ring(ap, RX_MINI_SIZE);
+		else
+			printk(KERN_ERR "%s: Someone is busy refilling "
+			       "the RX mini ring\n", dev->name);
+	}
 	return 0;
+
+ init_error:
+	ace_init_cleanup(dev);
+	return ecode;
+}
+
+
+static void ace_set_rxtx_parms(struct net_device *dev, int jumbo)
+{
+	struct ace_private *ap;
+	struct ace_regs *regs;
+	int board_idx;
+
+	ap = dev->priv;
+	regs = ap->regs;
+
+	board_idx = ap->board_idx;
+
+	if (board_idx >= 0) {
+		if (!jumbo) {
+			if (!tx_coal_tick[board_idx])
+				writel(DEF_TX_COAL, &regs->TuneTxCoalTicks);
+			if (!max_tx_desc[board_idx])
+				writel(DEF_TX_MAX_DESC, &regs->TuneMaxTxDesc);
+			if (!rx_coal_tick[board_idx])
+				writel(DEF_RX_COAL, &regs->TuneRxCoalTicks);
+			if (!max_rx_desc[board_idx])
+				writel(DEF_RX_MAX_DESC, &regs->TuneMaxRxDesc);
+			if (!tx_ratio[board_idx])
+				writel(DEF_TX_RATIO, &regs->TxBufRat);
+		} else {
+			if (!tx_coal_tick[board_idx])
+				writel(DEF_JUMBO_TX_COAL,
+				       &regs->TuneTxCoalTicks);
+			if (!max_tx_desc[board_idx])
+				writel(DEF_JUMBO_TX_MAX_DESC,
+				       &regs->TuneMaxTxDesc);
+			if (!rx_coal_tick[board_idx])
+				writel(DEF_JUMBO_RX_COAL,
+				       &regs->TuneRxCoalTicks);
+			if (!max_rx_desc[board_idx])
+				writel(DEF_JUMBO_RX_MAX_DESC,
+				       &regs->TuneMaxRxDesc);
+			if (!tx_ratio[board_idx])
+				writel(DEF_JUMBO_TX_RATIO, &regs->TxBufRat);
+		}
+	}
 }
 
 
@@ -832,8 +1486,8 @@
  */
 static void ace_timer(unsigned long data)
 {
-	struct device *dev = (struct device *)data;
-	struct ace_private *ap = (struct ace_private *)dev->priv;
+	struct net_device *dev = (struct net_device *)data;
+	struct ace_private *ap = dev->priv;
 	struct ace_regs *regs = ap->regs;
 
 	/*
@@ -841,7 +1495,7 @@
 	 * seconds and there is data in the transmit queue, thus we
 	 * asume the card is stuck.
 	 */
-	if (ap->tx_csm != ap->tx_ret_csm){
+	if (*ap->tx_csm != ap->tx_ret_csm) {
 		printk(KERN_WARNING "%s: Transmitter is stuck, %08x\n",
 		       dev->name, (unsigned int)readl(&regs->HostCtrl));
 	}
@@ -851,6 +1505,44 @@
 }
 
 
+static void ace_bh(struct net_device *dev)
+{
+	struct ace_private *ap = dev->priv;
+	int cur_size;
+
+	cur_size = atomic_read(&ap->cur_rx_bufs);
+	if ((cur_size < RX_LOW_STD_THRES) &&
+	    !test_and_set_bit(0, &ap->std_refill_busy)) {
+#if DEBUG
+		printk("refilling buffers (current %i)\n", cur_size);
+#endif
+		ace_load_std_rx_ring(ap, RX_RING_SIZE - cur_size);
+	}
+
+	if (ap->version >= 2) {
+		cur_size = atomic_read(&ap->cur_mini_bufs);
+		if ((cur_size < RX_LOW_MINI_THRES) &&
+		    !test_and_set_bit(0, &ap->mini_refill_busy)) {
+#if DEBUG
+			printk("refilling mini buffers (current %i)\n",
+			       cur_size);
+#endif
+			ace_load_mini_rx_ring(ap, RX_MINI_SIZE - cur_size);
+		}
+	}
+
+	cur_size = atomic_read(&ap->cur_jumbo_bufs);
+	if (ap->jumbo && (cur_size < RX_LOW_JUMBO_THRES) &&
+	    !test_and_set_bit(0, &ap->jumbo_refill_busy)) {
+#if DEBUG
+		printk("refilling jumbo buffers (current %i)\n", >cur_size);
+#endif
+		ace_load_jumbo_rx_ring(ap, RX_JUMBO_SIZE - cur_size);
+	}
+	ap->bh_pending = 0;
+}
+
+
 /*
  * Copy the contents of the NIC's trace buffer to kernel memory.
  */
@@ -866,160 +1558,191 @@
 
 /*
  * Load the standard rx ring.
+ *
+ * Loading rings is safe without holding the spin lock since this is
+ * done only before the device is enabled, thus no interrupts are
+ * generated and by the interrupt handler/bh handler.
  */
-static int ace_load_std_rx_ring(struct device *dev)
+static void ace_load_std_rx_ring(struct ace_private *ap, int nr_bufs)
 {
-	struct ace_private *ap;
 	struct ace_regs *regs;
-	struct ace_info *info;
-	unsigned long flags;
-	struct cmd cmd;
-	short i;
+	short i, idx;
 
-	ap = (struct ace_private *)dev->priv;
 	regs = ap->regs;
-	info = ap->info;
 
-	spin_lock_irqsave(&ap->lock, flags);
+	idx = ap->rx_std_skbprd;
 
-	/*
-	 * Set tx_csm before we start receiving interrupts, otherwise
-	 * the interrupt handler might think it is supposed to process
-	 * tx ints before we are up and running, which may cause a null
-	 * pointer access in the int handler.
-	 */
-	ap->tx_full = 0;
-	ap->cur_rx = ap->dirty_rx = 0;
-	ap->tx_prd = ap->tx_csm = ap->tx_ret_csm = 0;
-	writel(0, &regs->RxRetCsm);
-
-	for (i = 0; i < RX_RING_THRESH; i++) {
+	for (i = 0; i < nr_bufs; i++) {
 		struct sk_buff *skb;
+		struct rx_desc *rd;
+		dma_addr_t mapping;
 
-		ap->rx_std_ring[i].flags = 0;
-		skb = alloc_skb(ACE_STD_MTU + ETH_HLEN + 6, GFP_ATOMIC);
-		ap->rx_std_skbuff[i] = skb;
+		skb = alloc_skb(ACE_STD_BUFSIZE, GFP_ATOMIC);
+		if (!skb)
+			break;
 
 		/*
-		 * Make sure the data contents end up on an aligned address
+		 * Make sure IP header starts on a fresh cache line.
 		 */
-		skb_reserve(skb, 2);
-
-		set_aceaddr(&ap->rx_std_ring[i].addr, skb->data);
-		ap->rx_std_ring[i].size = ACE_STD_MTU + ETH_HLEN + 4;
-
-		ap->rx_std_ring[i].flags = 0;
-		ap->rx_std_ring[i].type = DESC_RX;
-
-		ap->rx_std_ring[i].idx = i;
-	}
-
-	ap->rx_std_skbprd = i;
-
-	/*
-	 * The last descriptor needs to be marked as being special.
-	 */
-	ap->rx_std_ring[i-1].type = DESC_END;
-
-	cmd.evt = C_SET_RX_PRD_IDX;
-	cmd.code = 0;
-	cmd.idx = ap->rx_std_skbprd;
-	ace_issue_cmd(regs, &cmd);
+		skb_reserve(skb, 2 + 16);
+		mapping = pci_map_single(ap->pdev, skb->data,
+					 ACE_STD_BUFSIZE - (2 + 16),
+					 PCI_DMA_FROMDEVICE);
+		ap->skb->rx_std_skbuff[idx].skb = skb;
+		ap->skb->rx_std_skbuff[idx].mapping = mapping;
+
+		rd = &ap->rx_std_ring[idx];
+		set_aceaddr(&rd->addr, mapping);
+		rd->size = ACE_STD_MTU + ETH_HLEN + 4;
+		rd->idx = idx;
+		idx = (idx + 1) % RX_STD_RING_ENTRIES;
+	}
+
+	if (!i)
+		goto error_out;
+
+	atomic_add(i, &ap->cur_rx_bufs);
+	ap->rx_std_skbprd = idx;
+
+	if (ACE_IS_TIGON_I(ap)) {
+		struct cmd cmd;
+		cmd.evt = C_SET_RX_PRD_IDX;
+		cmd.code = 0;
+		cmd.idx = ap->rx_std_skbprd;
+		ace_issue_cmd(regs, &cmd);
+	} else {
+		writel(idx, &regs->RxStdPrd);
+		wmb();
+	}
 
-	spin_unlock_irqrestore(&ap->lock, flags);
+ out:
+	clear_bit(0, &ap->std_refill_busy);
+	return;
 
-	return 0;
+ error_out:
+	printk(KERN_INFO "Out of memory when allocating "
+	       "standard receive buffers\n");
+	goto out;
 }
 
 
-/*
- * Load the jumbo rx ring, this may happen at any time if the MTU
- * is changed to a value > 1500.
- */
-static int ace_load_jumbo_rx_ring(struct device *dev)
+static void ace_load_mini_rx_ring(struct ace_private *ap, int nr_bufs)
 {
-	struct ace_private *ap;
 	struct ace_regs *regs;
-	struct cmd cmd;
-	unsigned long flags;
-	short i;
+	short i, idx;
 
-	ap = (struct ace_private *)dev->priv;
 	regs = ap->regs;
 
-	spin_lock_irqsave(&ap->lock, flags);
-
-	for (i = 0; i < RX_RING_JUMBO_THRESH; i++) {
+	idx = ap->rx_mini_skbprd;
+	for (i = 0; i < nr_bufs; i++) {
 		struct sk_buff *skb;
+		struct rx_desc *rd;
+		dma_addr_t mapping;
 
-		ap->rx_jumbo_ring[i].flags = 0;
-		skb = alloc_skb(ACE_JUMBO_MTU + ETH_HLEN + 6, GFP_ATOMIC);
-		ap->rx_jumbo_skbuff[i] = skb;
+		skb = alloc_skb(ACE_MINI_BUFSIZE, GFP_ATOMIC);
+		if (!skb)
+			break;
 
 		/*
-		 * Make sure the data contents end up on an aligned address
+		 * Make sure the IP header ends up on a fresh cache line
 		 */
-		skb_reserve(skb, 2);
-
-		set_aceaddr(&ap->rx_jumbo_ring[i].addr, skb->data);
-		ap->rx_jumbo_ring[i].size = ACE_JUMBO_MTU + ETH_HLEN + 4;
+		skb_reserve(skb, 2 + 16);
+		mapping = pci_map_single(ap->pdev, skb->data,
+					 ACE_MINI_BUFSIZE - (2 + 16),
+					 PCI_DMA_FROMDEVICE);
+		ap->skb->rx_mini_skbuff[idx].skb = skb;
+		ap->skb->rx_mini_skbuff[idx].mapping = mapping;
 
-		ap->rx_jumbo_ring[i].flags = DFLG_RX_JUMBO;
-		ap->rx_jumbo_ring[i].type = DESC_RX;
-
-		ap->rx_jumbo_ring[i].idx = i;
+		rd = &ap->rx_mini_ring[idx];
+		set_aceaddr(&rd->addr, mapping);
+		rd->size = ACE_MINI_SIZE;
+		rd->idx = idx;
+		idx = (idx + 1) % RX_MINI_RING_ENTRIES;
 	}
 
-	ap->rx_jumbo_skbprd = i;
+	if (!i)
+		goto error_out;
 
-	/*
-	 * The last descriptor needs to be marked as being special.
-	 */
-	ap->rx_jumbo_ring[i-1].type = DESC_END;
+	atomic_add(i, &ap->cur_mini_bufs);
 
-	cmd.evt = C_SET_RX_JUMBO_PRD_IDX;
-	cmd.code = 0;
-	cmd.idx = ap->rx_jumbo_skbprd;
-	ace_issue_cmd(regs, &cmd);
+	ap->rx_mini_skbprd = idx;
 
-	spin_unlock_irqrestore(&ap->lock, flags);
+	writel(idx, &regs->RxMiniPrd);
+	wmb();
 
-	return 0;
+ out:
+	clear_bit(0, &ap->mini_refill_busy);
+	return;
+ error_out:
+	printk(KERN_INFO "Out of memory when allocating "
+	       "mini receive buffers\n");
+	goto out;
 }
 
 
 /*
- * Tell the firmware not to accept jumbos and flush the jumbo ring.
- * This function must be called with the spinlock held.
+ * Load the jumbo rx ring, this may happen at any time if the MTU
+ * is changed to a value > 1500.
  */
-static int ace_flush_jumbo_rx_ring(struct device *dev)
+static void ace_load_jumbo_rx_ring(struct ace_private *ap, int nr_bufs)
 {
-	struct ace_private *ap;
 	struct ace_regs *regs;
-	struct cmd cmd;
-	short i;
+	short i, idx;
 
-	ap = (struct ace_private *)dev->priv;
 	regs = ap->regs;
 
-	if (ap->jumbo){
-		cmd.evt = C_RESET_JUMBO_RNG;
+	idx = ap->rx_jumbo_skbprd;
+
+	for (i = 0; i < nr_bufs; i++) {
+		struct sk_buff *skb;
+		struct rx_desc *rd;
+		dma_addr_t mapping;
+
+		skb = alloc_skb(ACE_JUMBO_BUFSIZE, GFP_ATOMIC);
+		if (!skb)
+			break;
+
+		/*
+		 * Make sure the IP header ends up on a fresh cache line
+		 */
+		skb_reserve(skb, 2 + 16);
+		mapping = pci_map_single(ap->pdev, skb->data,
+					 ACE_JUMBO_BUFSIZE - (2 + 16),
+					 PCI_DMA_FROMDEVICE);
+		ap->skb->rx_jumbo_skbuff[idx].skb = skb;
+		ap->skb->rx_jumbo_skbuff[idx].mapping = mapping;
+
+		rd = &ap->rx_jumbo_ring[idx];
+		set_aceaddr(&rd->addr, mapping);
+		rd->size = ACE_JUMBO_MTU + ETH_HLEN + 4;
+		rd->idx = idx;
+		idx = (idx + 1) % RX_JUMBO_RING_ENTRIES;
+	}
+
+	if (!i)
+		goto error_out;
+
+	atomic_add(i, &ap->cur_jumbo_bufs);
+	ap->rx_jumbo_skbprd = idx;
+
+	if (ACE_IS_TIGON_I(ap)) {
+		struct cmd cmd;
+		cmd.evt = C_SET_RX_JUMBO_PRD_IDX;
 		cmd.code = 0;
-		cmd.idx = 0;
+		cmd.idx = ap->rx_jumbo_skbprd;
 		ace_issue_cmd(regs, &cmd);
+	} else {
+		writel(idx, &regs->RxJumboPrd);
+		wmb();
+	}
 
-		for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++) {
-			if (ap->rx_jumbo_skbuff[i]) {
-				ap->rx_jumbo_ring[i].size = 0;
-				set_aceaddr_bus(&ap->rx_jumbo_ring[i].addr, 0);
-				dev_kfree_skb(ap->rx_jumbo_skbuff[i]);
-			}
-		}
-	}else
-		printk(KERN_ERR "%s: Trying to flush Jumbo ring without "
-		       "Jumbo support enabled\n", dev->name);
-
-	return 0;
+ out:
+	clear_bit(0, &ap->jumbo_refill_busy);
+	return;
+ error_out:
+	printk(KERN_INFO "Out of memory when allocating "
+	       "jumbo receive buffers\n");
+	goto out;
 }
 
 
@@ -1028,37 +1751,46 @@
  * events) and are handled here, outside the main interrupt handler,
  * to reduce the size of the handler.
  */
-static u32 ace_handle_event(struct device *dev, u32 evtcsm, u32 evtprd)
+static u32 ace_handle_event(struct net_device *dev, u32 evtcsm, u32 evtprd)
 {
 	struct ace_private *ap;
 
-	ap = (struct ace_private *)dev->priv;
+	ap = dev->priv;
 
-	while (evtcsm != evtprd){
-		switch (ap->evt_ring[evtcsm].evt){
+	while (evtcsm != evtprd) {
+		switch (ap->evt_ring[evtcsm].evt) {
 		case E_FW_RUNNING:
 			printk(KERN_INFO "%s: Firmware up and running\n",
 			       dev->name);
 			ap->fw_running = 1;
+			wmb();
 			break;
 		case E_STATS_UPDATED:
 			break;
 		case E_LNK_STATE:
 		{
 			u16 code = ap->evt_ring[evtcsm].code;
-			if (code == E_C_LINK_UP){
-				printk("%s: Optical link UP\n", dev->name);
-			}
-			else if (code == E_C_LINK_DOWN)
-				printk(KERN_INFO "%s: Optical link DOWN\n",
+			switch (code) {
+			case E_C_LINK_UP:
+				printk(KERN_WARNING "%s: Optical link UP\n",
 				       dev->name);
-			else
-				printk(KERN_INFO "%s: Unknown optical link "
+				break;
+			case E_C_LINK_DOWN:
+				printk(KERN_WARNING "%s: Optical link DOWN\n",
+				       dev->name);
+				break;
+			case E_C_LINK_10_100:
+				printk(KERN_WARNING "%s: 10/100BaseT link "
+				       "UP\n", dev->name);
+				break;
+			default:
+				printk(KERN_ERR "%s: Unknown optical link "
 				       "state %02x\n", dev->name, code);
+			}
 			break;
 		}
 		case E_ERROR:
-			switch(ap->evt_ring[evtcsm].code){
+			switch(ap->evt_ring[evtcsm].code) {
 			case E_C_ERR_INVAL_CMD:
 				printk(KERN_ERR "%s: invalid command error\n",
 				       dev->name);
@@ -1077,7 +1809,24 @@
 			}
 			break;
 		case E_RESET_JUMBO_RNG:
+		{
+			int i;
+			for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++) {
+				if (ap->skb->rx_jumbo_skbuff[i].skb) {
+					ap->rx_jumbo_ring[i].size = 0;
+					set_aceaddr(&ap->rx_jumbo_ring[i].addr, 0);
+					dev_kfree_skb(ap->skb->rx_jumbo_skbuff[i].skb);
+					ap->skb->rx_jumbo_skbuff[i].skb = NULL;
+				}
+			}
+			ap->jumbo = 0;
+			printk(KERN_INFO "%s: Jumbo ring flushed\n",
+			       dev->name);
+			if (!ap->tx_full)
+				netif_wake_queue(dev);
+			clear_bit(0, &ap->jumbo_refill_busy);
 			break;
+		}
 		default:
 			printk(KERN_ERR "%s: Unhandled event 0x%02x\n",
 			       dev->name, ap->evt_ring[evtcsm].evt);
@@ -1089,115 +1838,77 @@
 }
 
 
-static int ace_rx_int(struct device *dev, u32 rxretprd, u32 rxretcsm)
+static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
 {
-	struct ace_private *ap = (struct ace_private *)dev->priv;
-	struct ace_regs *regs = ap->regs;
-	u32 idx, oldidx;
+	struct ace_private *ap = dev->priv;
+	u32 idx;
+	int mini_count = 0, std_count = 0;
 
 	idx = rxretcsm;
 
-	while (idx != rxretprd){
-		struct sk_buff *skb, *newskb, *oldskb;
-		struct rx_desc *newrxdesc, *oldrxdesc;
-		u32 prdidx, size;
-		void *addr;
+	while (idx != rxretprd) {
+		struct ring_info *rip;
+		struct sk_buff *skb;
+		struct rx_desc *rxdesc, *retdesc;
+		u32 skbidx;
+		int desc_type, mapsize;
 		u16 csum;
-		int jumbo;
-
-		oldidx = ap->rx_return_ring[idx].idx;
-		jumbo = ap->rx_return_ring[idx].flags & DFLG_RX_JUMBO;
-
-		if (jumbo){
-			oldskb = ap->rx_jumbo_skbuff[oldidx];
-			prdidx = ap->rx_jumbo_skbprd;
-			newrxdesc = &ap->rx_jumbo_ring[prdidx];
-			oldrxdesc = &ap->rx_jumbo_ring[oldidx];
-		}else{
-			oldskb = ap->rx_std_skbuff[oldidx];
-			prdidx = ap->rx_std_skbprd;
-			newrxdesc = &ap->rx_std_ring[prdidx];
-			oldrxdesc = &ap->rx_std_ring[oldidx];
-		}
-
-		size = oldrxdesc->size;
-
-		if (size < PKT_COPY_THRESHOLD) {
-			skb = alloc_skb(size + 2, GFP_ATOMIC);
-			if (skb == NULL){
-				printk(KERN_ERR "%s: Out of memory\n",
-				       dev->name);
-				goto error;
-			}
-			/*
-			 * Make sure the real data is aligned
-			 */
 
-			skb_reserve(skb, 2);
-			memcpy(skb_put(skb, size), oldskb->data, size);
-			addr = get_aceaddr_bus(&oldrxdesc->addr);
-			newskb = oldskb;
-		}else{
-			skb = oldskb;
-
-			skb_put(skb, size);
-
-			newskb = alloc_skb(size + 2, GFP_ATOMIC);
-			if (newskb == NULL){
-				printk(KERN_ERR "%s: Out of memory\n",
-				       dev->name);
-				goto error;
-			}
+		retdesc = &ap->rx_return_ring[idx];
+		skbidx = retdesc->idx;
+		desc_type = retdesc->flags & (BD_FLG_JUMBO | BD_FLG_MINI);
 
+		switch(desc_type) {
 			/*
-			 * Make sure we DMA directly into nicely
-			 * aligned receive buffers
+			 * Normal frames do not have any flags set
+			 *
+			 * Mini and normal frames arrive frequently,
+			 * so use a local counter to avoid doing
+			 * atomic operations for each packet arriving.
 			 */
-			skb_reserve(newskb, 2);
-			addr = (void *)virt_to_bus(newskb->data);
+		case 0:
+			rip = &ap->skb->rx_std_skbuff[skbidx];
+			mapsize = ACE_STD_BUFSIZE - (2 + 16);
+			rxdesc = &ap->rx_std_ring[skbidx];
+			std_count++;
+			break;
+		case BD_FLG_JUMBO:
+			rip = &ap->skb->rx_jumbo_skbuff[skbidx];
+			mapsize = ACE_JUMBO_BUFSIZE - (2 + 16);
+			rxdesc = &ap->rx_jumbo_ring[skbidx];
+			atomic_dec(&ap->cur_jumbo_bufs);
+			break;
+		case BD_FLG_MINI:
+			rip = &ap->skb->rx_mini_skbuff[skbidx];
+			mapsize = ACE_MINI_BUFSIZE - (2 + 16);
+			rxdesc = &ap->rx_mini_ring[skbidx];
+			mini_count++; 
+			break;
+		default:
+			printk(KERN_INFO "%s: unknown frame type (0x%02x) "
+			       "returned by NIC\n", dev->name,
+			       retdesc->flags);
+			goto error;
 		}
 
-		set_aceaddr_bus(&newrxdesc->addr, addr);
-		newrxdesc->size = size;
-
-		newrxdesc->flags = oldrxdesc->flags;
-		newrxdesc->idx = prdidx;
-		newrxdesc->type = DESC_RX;
-#if (BITS_PER_LONG == 32)
-		newrxdesc->addr.addrhi = 0;
+		skb = rip->skb;
+		rip->skb = NULL;
+		pci_unmap_single(ap->pdev, rip->mapping, mapsize,
+				 PCI_DMA_FROMDEVICE);
+		skb_put(skb, retdesc->size);
+#if 0
+		/* unncessary */
+		rxdesc->size = 0;
 #endif
 
-		oldrxdesc->size = 0;
-		set_aceaddr_bus(&oldrxdesc->addr, 0);
-
-		if (jumbo){
-			ap->rx_jumbo_skbuff[oldidx] = NULL;
-			ap->rx_jumbo_skbuff[prdidx] = newskb;
-
-			prdidx = (prdidx + 1) % RX_JUMBO_RING_ENTRIES;
-			ap->rx_jumbo_skbprd = prdidx;
-		}else{
-			ap->rx_std_skbuff[oldidx] = NULL;
-			ap->rx_std_skbuff[prdidx] = newskb;
-
-			prdidx = (prdidx + 1) % RX_STD_RING_ENTRIES;
-			ap->rx_std_skbprd = prdidx;
-		}
-
 		/*
 		 * Fly baby, fly!
 		 */
-		csum = ap->rx_return_ring[idx].tcp_udp_csum;
+		csum = retdesc->tcp_udp_csum;
 
 		skb->dev = dev;
 		skb->protocol = eth_type_trans(skb, dev);
 
-#if 0
-		/*
-		 * This was never actually enabled in the RX descriptors
-		 * anyway - it requires a bit more testing before enabling
-		 * it again.
-		 */
 		/*
 		 * If the checksum is correct and this is not a
 		 * fragment, tell the stack that the data is correct.
@@ -1208,34 +1919,31 @@
 			skb->ip_summed = CHECKSUM_UNNECESSARY;
 		else
 			skb->ip_summed = CHECKSUM_NONE;
-#endif
+
 		netif_rx(skb);		/* send it up */
 
 		ap->stats.rx_packets++;
-		ap->stats.rx_bytes += skb->len;
-
-		if ((prdidx & 0x7) == 0){
-			struct cmd cmd;
-			if (jumbo)
-				cmd.evt = C_SET_RX_JUMBO_PRD_IDX;
-			else
-				cmd.evt = C_SET_RX_PRD_IDX;
-			cmd.code = 0;
-			cmd.idx = prdidx;
-			ace_issue_cmd(regs, &cmd);
-		}
+		ap->stats.rx_bytes += retdesc->size;
 
 		idx = (idx + 1) % RX_RETURN_RING_ENTRIES;
 	}
+
+	atomic_sub(std_count, &ap->cur_rx_bufs);
+	if (!ACE_IS_TIGON_I(ap))
+		atomic_sub(mini_count, &ap->cur_mini_bufs);
+
  out:
 	/*
 	 * According to the documentation RxRetCsm is obsolete with
-	 * the 12.3.x Firmware - my Tigon I NIC's seem to disagree!
+	 * the 12.3.x Firmware - my Tigon I NICs seem to disagree!
 	 */
-	writel(idx, &regs->RxRetCsm);
+	if (ACE_IS_TIGON_I(ap)) {
+		struct ace_regs *regs = ap->regs;
+		writel(idx, &regs->RxRetCsm);
+	}
 	ap->cur_rx = idx;
 
-	return idx;
+	return;
  error:
 	idx = rxretprd;
 	goto out;
@@ -1246,24 +1954,21 @@
 {
 	struct ace_private *ap;
 	struct ace_regs *regs;
-	struct device *dev = (struct device *)dev_id;
+	struct net_device *dev = (struct net_device *)dev_id;
+	u32 idx;
 	u32 txcsm, rxretcsm, rxretprd;
 	u32 evtcsm, evtprd;
 
-	ap = (struct ace_private *)dev->priv;
+	ap = dev->priv;
 	regs = ap->regs;
 
-	spin_lock(&ap->lock);
-
 	/*
 	 * In case of PCI shared interrupts or spurious interrupts,
 	 * we want to make sure it is actually our interrupt before
 	 * spending any time in here.
 	 */
-	if (!(readl(&regs->HostCtrl) & IN_INT)){
-		spin_unlock(&ap->lock);
+	if (!(readl(&regs->HostCtrl) & IN_INT))
 		return;
-	}
 
 	/*
 	 * Tell the card not to generate interrupts while we are in here.
@@ -1271,25 +1976,46 @@
 	writel(1, &regs->Mb0Lo);
 
 	/*
-	 * Service RX ints before TX
+	 * There is no conflict between transmit handling in
+	 * start_xmit and receive processing, thus there is no reason
+	 * to take a spin lock for RX handling. Wait until we start
+	 * working on the other stuff - hey we don't need a spin lock
+	 * anymore.
 	 */
-	rxretprd = ap->rx_ret_prd;
+	rxretprd = *ap->rx_ret_prd;
 	rxretcsm = ap->cur_rx;
 
 	if (rxretprd != rxretcsm)
-		rxretprd = ace_rx_int(dev, rxretprd, rxretcsm);
+		ace_rx_int(dev, rxretprd, rxretcsm);
 
-	txcsm = ap->tx_csm;
-	if (txcsm != ap->tx_ret_csm) {
-		u32 idx = ap->tx_ret_csm;
+	txcsm = *ap->tx_csm;
+	idx = ap->tx_ret_csm;
 
+	if (txcsm != idx) {
 		do {
-			ap->stats.tx_packets++;
-			ap->stats.tx_bytes += ap->tx_skbuff[idx]->len;
-			dev_kfree_skb(ap->tx_skbuff[idx]);
+			struct sk_buff *skb;
+
+			skb = ap->skb->tx_skbuff[idx].skb;
+			if (skb) {
+				dma_addr_t mapping;
 
-			ap->tx_skbuff[idx] = NULL;
+				mapping = ap->skb->tx_skbuff[idx].mapping;
 
+				ap->stats.tx_packets++;
+				ap->stats.tx_bytes += skb->len;
+				pci_unmap_single(ap->pdev, mapping, skb->len,
+						 PCI_DMA_TODEVICE);
+				dev_kfree_skb_irq(skb);
+
+				ap->skb->tx_skbuff[idx].skb = NULL;
+			}
+
+			/*
+			 * Question here is whether one should not skip
+			 * these writes - I have never seen any errors
+			 * caused by the NIC actually trying to access
+			 * these incorrectly.
+			 */
 #if (BITS_PER_LONG == 64)
 			writel(0, &ap->tx_ring[idx].addr.addrhi);
 #endif
@@ -1299,11 +2025,19 @@
 			idx = (idx + 1) % TX_RING_ENTRIES;
 		} while (idx != txcsm);
 
-		if (ap->tx_full && dev->tbusy &&
-		    (((ap->tx_prd + 1) % TX_RING_ENTRIES) != txcsm)){
-			ap->tx_full = 0;
-			dev->tbusy = 0;
-			mark_bh(NET_BH);
+		/*
+		 * Once we actually get to this point the tx ring has
+		 * already been trimmed thus it cannot be full!
+		 * Ie. skip the comparison of the tx producer vs. the
+		 * consumer.
+		 */
+		if (netif_queue_stopped(dev) && xchg(&ap->tx_full, 0)) {
+			/*
+			 * This does not need to be atomic (and expensive),
+			 * I've seen cases where it would fail otherwise ;-(
+			 */
+			netif_wake_queue(dev);
+			ace_mark_net_bh(NET_BH);
 
 			/*
 			 * TX ring is no longer full, aka the
@@ -1313,23 +2047,84 @@
 		}
 
 		ap->tx_ret_csm = txcsm;
+		wmb();
 	}
 
 	evtcsm = readl(&regs->EvtCsm);
-	evtprd = ap->evt_prd;
+	evtprd = *ap->evt_prd;
 
-	if (evtcsm != evtprd){
+	if (evtcsm != evtprd) {
 		evtcsm = ace_handle_event(dev, evtcsm, evtprd);
+		writel(evtcsm, &regs->EvtCsm);
 	}
 
-	writel(evtcsm, &regs->EvtCsm);
-	writel(0, &regs->Mb0Lo);
+	/*
+	 * This has to go last in the interrupt handler and run with
+	 * the spin lock released ... what lock?
+	 */
+	if (netif_running(dev)) {
+		int cur_size;
+		int run_bh = 0;
+
+		cur_size = atomic_read(&ap->cur_rx_bufs);
+		if (cur_size < RX_LOW_STD_THRES) {
+			if ((cur_size < RX_PANIC_STD_THRES) &&
+			    !test_and_set_bit(0, &ap->std_refill_busy)) {
+#if DEBUG
+				printk("low on std buffers %i\n", cur_size);
+#endif
+				ace_load_std_rx_ring(ap,
+						     RX_RING_SIZE - cur_size);
+			} else
+				run_bh = 1;
+		}
+
+		if (!ACE_IS_TIGON_I(ap)) {
+			cur_size = atomic_read(&ap->cur_mini_bufs);
+			if (cur_size < RX_LOW_MINI_THRES) {
+				if ((cur_size < RX_PANIC_MINI_THRES) &&
+				    !test_and_set_bit(0,
+						      &ap->mini_refill_busy)) {
+#if DEBUG
+					printk("low on mini buffers %i\n",
+					       cur_size);
+#endif
+					ace_load_mini_rx_ring(ap, RX_MINI_SIZE - cur_size);
+				} else
+					run_bh = 1;
+			}
+		}
+
+		if (ap->jumbo) {
+			cur_size = atomic_read(&ap->cur_jumbo_bufs);
+			if (cur_size < RX_LOW_JUMBO_THRES) {
+				if ((cur_size < RX_PANIC_JUMBO_THRES) &&
+				    !test_and_set_bit(0,
+						      &ap->jumbo_refill_busy)){
+#if DEBUG
+					printk("low on jumbo buffers %i\n",
+					       cur_size);
+#endif
+					ace_load_jumbo_rx_ring(ap, RX_JUMBO_SIZE - cur_size);
+				} else
+					run_bh = 1;
+			}
+		}
+		if (run_bh && !ap->bh_pending) {
+			ap->bh_pending = 1;
+			queue_task(&ap->immediate, &tq_immediate);
+			mark_bh(IMMEDIATE_BH);
+		}
+	}
 
-	spin_unlock(&ap->lock);
+	/*
+	 * Allow the card to generate interrupts again
+	 */
+	writel(0, &regs->Mb0Lo);
 }
 
 
-static int ace_open(struct device *dev)
+static int ace_open(struct net_device *dev)
 {
 	struct ace_private *ap;
 	struct ace_regs *regs;
@@ -1338,22 +2133,33 @@
 	ap = dev->priv;
 	regs = ap->regs;
 
-	if (!(ap->fw_running)){
-		printk(KERN_WARNING "%s: firmware not running!\n", dev->name);
+	if (!(ap->fw_running)) {
+		printk(KERN_WARNING "%s: Firmware not running!\n", dev->name);
 		return -EBUSY;
 	}
 
 	writel(dev->mtu + ETH_HLEN + 4, &regs->IfMtu);
 
+	/*
+	 * Zero the stats when restarting the interface...
+	 */
+	memset(&ap->stats, 0, sizeof(ap->stats));
+
+	cmd.evt = C_CLEAR_STATS;
+	cmd.code = 0;
+	cmd.idx = 0;
+	ace_issue_cmd(regs, &cmd);
+
 	cmd.evt = C_HOST_STATE;
 	cmd.code = C_C_STACK_UP;
 	cmd.idx = 0;
 	ace_issue_cmd(regs, &cmd);
 
-	if (ap->jumbo)
-		ace_load_jumbo_rx_ring(dev);
+	if (ap->jumbo &&
+	    !test_and_set_bit(0, &ap->jumbo_refill_busy))
+		ace_load_jumbo_rx_ring(ap, RX_JUMBO_SIZE);
 
-	if (dev->flags & IFF_PROMISC){
+	if (dev->flags & IFF_PROMISC) {
 		cmd.evt = C_SET_PROMISC_MODE;
 		cmd.code = C_C_PROMISC_ENABLE;
 		cmd.idx = 0;
@@ -1365,19 +2171,13 @@
 	ap->mcast_all = 0;
 
 #if 0
-	{ long myjif = jiffies + HZ;
-	while (time_before(jiffies, myjif));
-	}
-
 	cmd.evt = C_LNK_NEGOTIATION;
 	cmd.code = 0;
 	cmd.idx = 0;
 	ace_issue_cmd(regs, &cmd);
 #endif
 
-	dev->tbusy = 0;
-	dev->interrupt = 0;
-	dev->start = 1;
+	netif_start_queue(dev);
 
 	MOD_INC_USE_COUNT;
 
@@ -1387,11 +2187,20 @@
 	init_timer(&ap->timer);
 	ap->timer.data = (unsigned long)dev;
 	ap->timer.function = ace_timer;
+
+	/*
+	 * Setup the bottom half rx ring refill handler
+	 */
+	ap->immediate.next = NULL;
+	ap->immediate.sync = 0;
+	ap->immediate.routine = (void *)(void *)ace_bh;
+	ap->immediate.data = dev;
+
 	return 0;
 }
 
 
-static int ace_close(struct device *dev)
+static int ace_close(struct net_device *dev)
 {
 	struct ace_private *ap;
 	struct ace_regs *regs;
@@ -1399,15 +2208,15 @@
 	unsigned long flags;
 	short i;
 
-	dev->start = 0;
-	set_bit(0, (void*)&dev->tbusy);
+	ace_if_down(dev);
+	netif_stop_queue(dev);
 
-	ap = (struct ace_private *)dev->priv;
+	ap = dev->priv;
 	regs = ap->regs;
 
 	del_timer(&ap->timer);
 
-	if (ap->promisc){
+	if (ap->promisc) {
 		cmd.evt = C_SET_PROMISC_MODE;
 		cmd.code = C_C_PROMISC_DISABLE;
 		cmd.idx = 0;
@@ -1420,56 +2229,95 @@
 	cmd.idx = 0;
 	ace_issue_cmd(regs, &cmd);
 
-	spin_lock_irqsave(&ap->lock, flags);
+	/*
+	 * Make sure one CPU is not processing packets while
+	 * buffers are being released by another.
+	 */
+	save_flags(flags);
+	cli();
 
 	for (i = 0; i < TX_RING_ENTRIES; i++) {
-		if (ap->tx_skbuff[i]) {
+		struct sk_buff *skb;
+		dma_addr_t mapping;
+
+		skb = ap->skb->tx_skbuff[i].skb;
+		mapping = ap->skb->tx_skbuff[i].mapping;
+		if (skb) {
 			writel(0, &ap->tx_ring[i].addr.addrhi);
 			writel(0, &ap->tx_ring[i].addr.addrlo);
 			writel(0, &ap->tx_ring[i].flagsize);
-			dev_kfree_skb(ap->tx_skbuff[i]);
+			pci_unmap_single(ap->pdev, mapping, skb->len,
+					 PCI_DMA_TODEVICE);
+			dev_kfree_skb(skb);
+			ap->skb->tx_skbuff[i].skb = NULL;
 		}
 	}
 
-	if (ap->jumbo)
-		ace_flush_jumbo_rx_ring(dev);
+	if (ap->jumbo) {
+		cmd.evt = C_RESET_JUMBO_RNG;
+		cmd.code = 0;
+		cmd.idx = 0;
+		ace_issue_cmd(regs, &cmd);
+	}
 
-	spin_unlock_irqrestore(&ap->lock, flags);
+	restore_flags(flags);
 
 	MOD_DEC_USE_COUNT;
 	return 0;
 }
 
 
-static int ace_start_xmit(struct sk_buff *skb, struct device *dev)
+static int ace_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
-	struct ace_private *ap = (struct ace_private *)dev->priv;
+	struct ace_private *ap = dev->priv;
 	struct ace_regs *regs = ap->regs;
-	unsigned long flags;
 	unsigned long addr;
 	u32 idx, flagsize;
 
-	spin_lock_irqsave(&ap->lock, flags);
+	/*
+	 * ARGH, there is just no pretty way to do this
+	 */
+#if (LINUX_VERSION_CODE < 0x02032b)
+	if (test_and_set_bit(0, &dev->tbusy))
+		return 1;
+#else
+	netif_stop_queue(dev);
+#endif
 
 	idx = ap->tx_prd;
 
-	ap->tx_skbuff[idx] = skb;
-	addr = virt_to_bus(skb->data);
+	if ((idx + 1) % TX_RING_ENTRIES == ap->tx_ret_csm) {
+		ap->tx_full = 1;
+#if DEBUG
+		printk("%s: trying to transmit while the tx ring is full "
+		       "- this should not happen!\n", dev->name);
+#endif
+		return 1;
+	}
+
+	ap->skb->tx_skbuff[idx].skb = skb;
+	ap->skb->tx_skbuff[idx].mapping =
+		pci_map_single(ap->pdev, skb->data, skb->len,
+			       PCI_DMA_TODEVICE);
+	addr = (unsigned long) ap->skb->tx_skbuff[idx].mapping;
 #if (BITS_PER_LONG == 64)
 	writel(addr >> 32, &ap->tx_ring[idx].addr.addrhi);
 #endif
 	writel(addr & 0xffffffff, &ap->tx_ring[idx].addr.addrlo);
-	flagsize = (skb->len << 16) | (DESC_END) ;
+	flagsize = (skb->len << 16) | (BD_FLG_END) ;
 	writel(flagsize, &ap->tx_ring[idx].flagsize);
-	mb();
+	wmb();
 	idx = (idx + 1) % TX_RING_ENTRIES;
 
 	ap->tx_prd = idx;
-	writel(idx, &regs->TxPrd);
+	ace_set_txprd(regs, ap, idx);
 
-	if ((idx + 1) % TX_RING_ENTRIES == ap->tx_ret_csm){
+	/*
+	 * tx_csm is set by the NIC whereas we set tx_ret_csm which
+	 * is always trying to catch tx_csm
+	 */
+	if ((idx + 2) % TX_RING_ENTRIES == ap->tx_ret_csm) {
 		ap->tx_full = 1;
-		set_bit(0, (void*)&dev->tbusy);
 		/*
 		 * Queue is full, add timer to detect whether the
 		 * transmitter is stuck. Use mod_timer as we can get
@@ -1477,16 +2325,33 @@
 		 * timers.
 		 */
 		mod_timer(&ap->timer, jiffies + (3 * HZ));
-	}
 
-	spin_unlock_irqrestore(&ap->lock, flags);
+		/* The following check will fix a race between the interrupt
+		 * handler increasing the tx_ret_csm and testing for tx_full
+		 * and this tx routine's testing the tx_ret_csm and setting
+		 * the tx_full; note that this fix makes assumptions on the
+		 * ordering of writes (sequential consistency will fly; TSO
+		 * processor order would work too) but that's what lock-less
+		 * programming is all about
+		 */
+		if (((idx + 2) % TX_RING_ENTRIES != ap->tx_ret_csm)
+			&& xchg(&ap->tx_full, 0)) {
+			del_timer(&ap->timer);
+			netif_wake_queue(dev);
+		}
+	} else {
+		/*
+		 * No need for it to be atomic - seems it needs to be
+		 */
+		netif_wake_queue(dev);
+	}
 
 	dev->trans_start = jiffies;
 	return 0;
 }
 
 
-static int ace_change_mtu(struct device *dev, int new_mtu)
+static int ace_change_mtu(struct net_device *dev, int new_mtu)
 {
 	struct ace_private *ap = dev->priv;
 	struct ace_regs *regs = ap->regs;
@@ -1497,40 +2362,182 @@
 	writel(new_mtu + ETH_HLEN + 4, &regs->IfMtu);
 	dev->mtu = new_mtu;
 
-	if (new_mtu > ACE_STD_MTU){
-		if (!(ap->jumbo)){
+	if (new_mtu > ACE_STD_MTU) {
+		if (!(ap->jumbo)) {
 			printk(KERN_INFO "%s: Enabling Jumbo frame "
 			       "support\n", dev->name);
 			ap->jumbo = 1;
-			ace_load_jumbo_rx_ring(dev);
-		}
-		ap->jumbo = 1;
-	}else{
+			if (!test_and_set_bit(0, &ap->jumbo_refill_busy))
+				ace_load_jumbo_rx_ring(ap, RX_JUMBO_SIZE);
+			ace_set_rxtx_parms(dev, 1);
+		}
+	} else {
+		netif_stop_queue(dev);
+		while (test_and_set_bit(0, &ap->jumbo_refill_busy));
+		synchronize_irq();
+		ace_set_rxtx_parms(dev, 0);
 		if (ap->jumbo){
-			ace_flush_jumbo_rx_ring(dev);
+			struct cmd cmd;
 
-			printk(KERN_INFO "%s: Disabling Jumbo frame support\n",
-			       dev->name);
+			cmd.evt = C_RESET_JUMBO_RNG;
+			cmd.code = 0;
+			cmd.idx = 0;
+			ace_issue_cmd(regs, &cmd);
 		}
-		ap->jumbo = 0;
 	}
 
 	return 0;
 }
 
 
+static int ace_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+#ifdef ETHTOOL
+	struct ace_private *ap = dev->priv;
+	struct ace_regs *regs = ap->regs;
+	struct ethtool_cmd ecmd;
+	u32 link, speed;
+
+#ifdef SPIN_DEBUG
+	if (cmd == (SIOCDEVPRIVATE+0x0e)) {
+		printk(KERN_NOTICE "%s: dumping debug info\n", dev->name);
+		printk(KERN_NOTICE "%s: tbusy %li, tx_ret_csm %i, "
+		       "tx_prd %i\n", dev->name, dev->tbusy,
+		       ap->tx_ret_csm, ap->tx_prd);
+		printk(KERN_NOTICE "%s: cur_rx %i, std_refill %li, "
+		       "mini_rx %i, mini_refill %li\n", dev->name,
+		       atomic_read(&ap->cur_rx_bufs), ap->std_refill_busy,
+		       atomic_read(&ap->cur_mini_bufs), ap->mini_refill_busy);
+		printk(KERN_NOTICE "%s: CpuCtrl %08x\n",
+		       dev->name, readl(&regs->CpuCtrl));
+		return 0;
+	}
+#endif
+	if (cmd != SIOCETHTOOL)
+		return -EOPNOTSUPP;
+	if (copy_from_user(&ecmd, ifr->ifr_data, sizeof(ecmd)))
+		return -EFAULT;
+
+	if (ecmd.cmd == ETH_GSET) {
+		ecmd.supported =
+			(SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
+			 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
+			 SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full |
+			 SUPPORTED_Autoneg | SUPPORTED_FIBRE);
+
+		ecmd.port = PORT_FIBRE;
+		ecmd.transceiver = XCVR_INTERNAL;
+		ecmd.phy_address = 0;
+
+		link = readl(&regs->GigLnkState);
+		if (link & LNK_1000MB)
+			ecmd.speed = SPEED_1000;
+		else {
+			link = readl(&regs->FastLnkState);
+			if (link & LNK_100MB)
+				ecmd.speed = SPEED_100;
+			else if (link & LNK_100MB)
+				ecmd.speed = SPEED_10;
+			else
+				ecmd.speed = 0;
+		}
+		if (link & LNK_FULL_DUPLEX)
+			ecmd.duplex = DUPLEX_FULL;
+		else
+			ecmd.duplex = DUPLEX_HALF;
+
+		if (link & LNK_NEGOTIATE)
+			ecmd.autoneg = AUTONEG_ENABLE;
+		else
+			ecmd.autoneg = AUTONEG_DISABLE;
+
+		ecmd.trace = readl(&regs->TuneTrace);
+
+		ecmd.txcoal = readl(&regs->TuneTxCoalTicks);
+		ecmd.rxcoal = readl(&regs->TuneRxCoalTicks);
+		ecmd.maxtxpkt = readl(&regs->TuneMaxTxDesc);
+		ecmd.maxrxpkt = readl(&regs->TuneMaxRxDesc);
+
+		if(copy_to_user(ifr->ifr_data, &ecmd, sizeof(ecmd)))
+			return -EFAULT;
+		return 0;
+	} else if (ecmd.cmd == ETH_SSET) {
+		if(!capable(CAP_NET_ADMIN))
+			return -EPERM;
+
+		link = readl(&regs->GigLnkState);
+		if (link & LNK_1000MB)
+			speed = SPEED_1000;
+		else {
+			link = readl(&regs->FastLnkState);
+			if (link & LNK_100MB)
+				speed = SPEED_100;
+			else if (link & LNK_100MB)
+				speed = SPEED_10;
+			else
+				speed = SPEED_100;
+		}
+
+		link = LNK_ENABLE | LNK_1000MB | LNK_100MB | LNK_10MB |
+			LNK_RX_FLOW_CTL_Y | LNK_NEG_FCTL;
+		if (!ACE_IS_TIGON_I(ap))
+			link |= LNK_TX_FLOW_CTL_Y;
+		if (ecmd.autoneg == AUTONEG_ENABLE)
+			link |= LNK_NEGOTIATE;
+		if (ecmd.speed != speed) {
+			link &= ~(LNK_1000MB | LNK_100MB | LNK_10MB);
+			switch (speed) {
+			case SPEED_1000:
+				link |= LNK_1000MB;
+				break;
+			case SPEED_100:
+				link |= LNK_100MB;
+				break;
+			case SPEED_10:
+				link |= LNK_10MB;
+				break;
+			}
+		}
+		if (ecmd.duplex == DUPLEX_FULL)
+			link |= LNK_FULL_DUPLEX;
+
+		if (link != ap->link) {
+			struct cmd cmd;
+			printk(KERN_INFO "%s: Renegotiating link state\n",
+			       dev->name);
+
+			ap->link = link;
+			writel(link, &regs->TuneLink);
+			if (!ACE_IS_TIGON_I(ap))
+				writel(link, &regs->TuneFastLink);
+			wmb();
+
+			cmd.evt = C_LNK_NEGOTIATION;
+			cmd.code = 0;
+			cmd.idx = 0;
+			ace_issue_cmd(regs, &cmd);
+		}
+		return 0;
+	}
+#endif
+
+	return -EOPNOTSUPP;
+}
+
+
 /*
  * Set the hardware MAC address.
  */
-static int ace_set_mac_addr(struct device *dev, void *p)
+static int ace_set_mac_addr(struct net_device *dev, void *p)
 {
 	struct sockaddr *addr=p;
 	struct ace_regs *regs;
 	u16 *da;
 	struct cmd cmd;
 
-	if(dev->start)
+	if(netif_running(dev))
 		return -EBUSY;
+
 	memcpy(dev->dev_addr, addr->sa_data,dev->addr_len);
 
 	da = (u16 *)dev->dev_addr;
@@ -1548,7 +2555,7 @@
 }
 
 
-static void ace_set_multicast_list(struct device *dev)
+static void ace_set_multicast_list(struct net_device *dev)
 {
 	struct ace_private *ap = dev->priv;
 	struct ace_regs *regs = ap->regs;
@@ -1560,9 +2567,9 @@
 		cmd.idx = 0;
 		ace_issue_cmd(regs, &cmd);
 		ap->mcast_all = 1;
-	} else if (ap->mcast_all){
+	} else if (ap->mcast_all) {
 		cmd.evt = C_SET_MULTICAST_MODE;
-		cmd.code = C_C_MCAST_ENABLE;
+		cmd.code = C_C_MCAST_DISABLE;
 		cmd.idx = 0;
 		ace_issue_cmd(regs, &cmd);
 		ap->mcast_all = 0;
@@ -1574,7 +2581,7 @@
 		cmd.idx = 0;
 		ace_issue_cmd(regs, &cmd);
 		ap->promisc = 1;
-	}else if (!(dev->flags & IFF_PROMISC) && (ap->promisc)){
+	}else if (!(dev->flags & IFF_PROMISC) && (ap->promisc)) {
 		cmd.evt = C_SET_PROMISC_MODE;
 		cmd.code = C_C_PROMISC_DISABLE;
 		cmd.idx = 0;
@@ -1602,15 +2609,21 @@
 }
 
 
-static struct net_device_stats *ace_get_stats(struct device *dev)
+static struct net_device_stats *ace_get_stats(struct net_device *dev)
 {
 	struct ace_private *ap = dev->priv;
+	struct ace_mac_stats *mac_stats =
+		(struct ace_mac_stats *)ap->regs->Stats;
+
+	ap->stats.rx_missed_errors = readl(&mac_stats->drop_space);
+	ap->stats.multicast = readl(&mac_stats->kept_mc);
+	ap->stats.collisions = readl(&mac_stats->coll);
 
 	return(&ap->stats);
 }
 
 
-__initfunc(void ace_copy(struct ace_regs *regs, void *src, u32 dest, int size))
+void __init ace_copy(struct ace_regs *regs, void *src, u32 dest, int size)
 {
 	unsigned long tdest;
 	u32 *wsrc;
@@ -1619,23 +2632,20 @@
 	if (size <= 0)
 		return;
 
-	while (size > 0){
+	while (size > 0) {
 		tsize = min(((~dest & (ACE_WINDOW_SIZE - 1)) + 1),
 			    min(size, ACE_WINDOW_SIZE));
 		tdest = (unsigned long)&regs->Window +
 			(dest & (ACE_WINDOW_SIZE - 1));
 		writel(dest & ~(ACE_WINDOW_SIZE - 1), &regs->WinBase);
-#ifdef __BIG_ENDIAN
-#error "data must be swapped here"
-#else
-/*
- * XXX - special memcpy needed here!!!
- */
+		/*
+		 * This requires byte swapping on big endian, however
+		 * writel does that for us
+		 */
 		wsrc = src;
-		for (i = 0; i < (tsize / 4); i++){
+		for (i = 0; i < (tsize / 4); i++) {
 			writel(wsrc[i], tdest + i*4);
 		}
-#endif
 		dest += tsize;
 		src += tsize;
 		size -= tsize;
@@ -1645,7 +2655,7 @@
 }
 
 
-__initfunc(void ace_clear(struct ace_regs *regs, u32 dest, int size))
+void __init ace_clear(struct ace_regs *regs, u32 dest, int size)
 {
 	unsigned long tdest;
 	short tsize = 0, i;
@@ -1653,14 +2663,14 @@
 	if (size <= 0)
 		return;
 
-	while (size > 0){
+	while (size > 0) {
 		tsize = min(((~dest & (ACE_WINDOW_SIZE - 1)) + 1),
 			    min(size, ACE_WINDOW_SIZE));
 		tdest = (unsigned long)&regs->Window +
 			(dest & (ACE_WINDOW_SIZE - 1));
 		writel(dest & ~(ACE_WINDOW_SIZE - 1), &regs->WinBase);
 
-		for (i = 0; i < (tsize / 4); i++){
+		for (i = 0; i < (tsize / 4); i++) {
 			writel(0, tdest + i*4);
 		}
 
@@ -1678,15 +2688,15 @@
  * This operation requires the NIC to be halted and is performed with
  * interrupts disabled and with the spinlock hold.
  */
-__initfunc(int ace_load_firmware(struct device *dev))
+int __init ace_load_firmware(struct net_device *dev)
 {
 	struct ace_private *ap;
 	struct ace_regs *regs;
 
-	ap = (struct ace_private *)dev->priv;
+	ap = dev->priv;
 	regs = ap->regs;
 
-	if (!(readl(&regs->CpuCtrl) & CPU_HALTED)){
+	if (!(readl(&regs->CpuCtrl) & CPU_HALTED)) {
 		printk(KERN_ERR "%s: trying to download firmware while the "
 		       "CPU is running!\n", dev->name);
 		return -EFAULT;
@@ -1697,14 +2707,14 @@
 	 * funny things on NICs with only 512KB SRAM
 	 */
 	ace_clear(regs, 0x2000, 0x80000-0x2000);
-	if (ap->version == 1){
+	if (ACE_IS_TIGON_I(ap)) {
 		ace_copy(regs, tigonFwText, tigonFwTextAddr, tigonFwTextLen);
 		ace_copy(regs, tigonFwData, tigonFwDataAddr, tigonFwDataLen);
 		ace_copy(regs, tigonFwRodata, tigonFwRodataAddr,
 			 tigonFwRodataLen);
 		ace_clear(regs, tigonFwBssAddr, tigonFwBssLen);
 		ace_clear(regs, tigonFwSbssAddr, tigonFwSbssLen);
-	}else if (ap->version == 2){
+	}else if (ap->version == 2) {
 		ace_clear(regs, tigon2FwBssAddr, tigon2FwBssLen);
 		ace_clear(regs, tigon2FwSbssAddr, tigon2FwSbssLen);
 		ace_copy(regs, tigon2FwText, tigon2FwTextAddr,tigon2FwTextLen);
@@ -1728,36 +2738,40 @@
  * specs.
  *
  * Oh yes, this is only the beginning!
+ *
+ * Thanks to Stevarino Webinski for helping tracking down the bugs in the
+ * code i2c readout code by beta testing all my hacks.
  */
-static void eeprom_start(struct ace_regs *regs)
+static void __init eeprom_start(struct ace_regs *regs)
 {
-	u32 local = readl(&regs->LocalCtrl);
+	u32 local;
 
-	udelay(1);
+	udelay(ACE_SHORT_DELAY);
+	local = readl(&regs->LocalCtrl);
 	local |= EEPROM_DATA_OUT | EEPROM_WRITE_ENABLE;
 	writel(local, &regs->LocalCtrl);
 	mb();
-	udelay(1);
+	udelay(ACE_SHORT_DELAY);
 	local |= EEPROM_CLK_OUT;
 	writel(local, &regs->LocalCtrl);
 	mb();
-	udelay(1);
+	udelay(ACE_SHORT_DELAY);
 	local &= ~EEPROM_DATA_OUT;
 	writel(local, &regs->LocalCtrl);
 	mb();
-	udelay(1);
+	udelay(ACE_SHORT_DELAY);
 	local &= ~EEPROM_CLK_OUT;
 	writel(local, &regs->LocalCtrl);
 	mb();
 }
 
 
-static void eeprom_prep(struct ace_regs *regs, u8 magic)
+static void __init eeprom_prep(struct ace_regs *regs, u8 magic)
 {
 	short i;
 	u32 local;
 
-	udelay(2);
+	udelay(ACE_SHORT_DELAY);
 	local = readl(&regs->LocalCtrl);
 	local &= ~EEPROM_DATA_OUT;
 	local |= EEPROM_WRITE_ENABLE;
@@ -1765,7 +2779,7 @@
 	mb();
 
 	for (i = 0; i < 8; i++, magic <<= 1) {
-		udelay(2);
+		udelay(ACE_SHORT_DELAY);
 		if (magic & 0x80) 
 			local |= EEPROM_DATA_OUT;
 		else
@@ -1773,11 +2787,11 @@
 		writel(local, &regs->LocalCtrl);
 		mb();
 
-		udelay(1);
+		udelay(ACE_SHORT_DELAY);
 		local |= EEPROM_CLK_OUT;
 		writel(local, &regs->LocalCtrl);
 		mb();
-		udelay(1);
+		udelay(ACE_SHORT_DELAY);
 		local &= ~(EEPROM_CLK_OUT | EEPROM_DATA_OUT);
 		writel(local, &regs->LocalCtrl);
 		mb();
@@ -1785,7 +2799,7 @@
 }
 
 
-static int eeprom_check_ack(struct ace_regs *regs)
+static int __init eeprom_check_ack(struct ace_regs *regs)
 {
 	int state;
 	u32 local;
@@ -1794,14 +2808,14 @@
 	local &= ~EEPROM_WRITE_ENABLE;
 	writel(local, &regs->LocalCtrl);
 	mb();
-	udelay(2);
+	udelay(ACE_LONG_DELAY);
 	local |= EEPROM_CLK_OUT;
 	writel(local, &regs->LocalCtrl);
 	mb();
-	udelay(1);
+	udelay(ACE_SHORT_DELAY);
 	/* sample data in middle of high clk */
 	state = (readl(&regs->LocalCtrl) & EEPROM_DATA_IN) != 0;
-	udelay(1);
+	udelay(ACE_SHORT_DELAY);
 	mb();
 	writel(readl(&regs->LocalCtrl) & ~EEPROM_CLK_OUT, &regs->LocalCtrl);
 	mb();
@@ -1810,27 +2824,28 @@
 }
 
 
-static void eeprom_stop(struct ace_regs *regs)
+static void __init eeprom_stop(struct ace_regs *regs)
 {
 	u32 local;
 
+	udelay(ACE_SHORT_DELAY);
 	local = readl(&regs->LocalCtrl);
 	local |= EEPROM_WRITE_ENABLE;
 	writel(local, &regs->LocalCtrl);
 	mb();
-	udelay(1);
+	udelay(ACE_SHORT_DELAY);
 	local &= ~EEPROM_DATA_OUT;
 	writel(local, &regs->LocalCtrl);
 	mb();
-	udelay(1);
+	udelay(ACE_SHORT_DELAY);
 	local |= EEPROM_CLK_OUT;
 	writel(local, &regs->LocalCtrl);
 	mb();
-	udelay(1);
+	udelay(ACE_SHORT_DELAY);
 	local |= EEPROM_DATA_OUT;
 	writel(local, &regs->LocalCtrl);
 	mb();
-	udelay(2);
+	udelay(ACE_LONG_DELAY);
 	local &= ~EEPROM_CLK_OUT;
 	writel(local, &regs->LocalCtrl);
 	mb();
@@ -1840,78 +2855,120 @@
 /*
  * Read a whole byte from the EEPROM.
  */
-static u8 read_eeprom_byte(struct ace_regs *regs, unsigned long offset)
+static int __init read_eeprom_byte(struct net_device *dev,
+				   unsigned long offset)
 {
+	struct ace_regs *regs;
+	unsigned long flags;
 	u32 local;
+	int result = 0;
 	short i;
-	u8 result = 0;
 
-	if (!regs){
-		printk(KERN_ERR "No regs!\n");
-		return 0;
+	if (!dev) {
+		printk(KERN_ERR "No device!\n");
+		result = -ENODEV;
+		goto eeprom_read_error;
 	}
 
+	regs = ((struct ace_private *)dev->priv)->regs;
+
+	/*
+	 * Don't take interrupts on this CPU will bit banging
+	 * the %#%#@$ I2C device
+	 */
+	__save_flags(flags);
+	__cli();
+
 	eeprom_start(regs);
 
 	eeprom_prep(regs, EEPROM_WRITE_SELECT);
-	if (eeprom_check_ack(regs)){
-		printk("Unable to sync eeprom\n");
-		return 0;
+	if (eeprom_check_ack(regs)) {
+		__restore_flags(flags);
+		printk(KERN_ERR "%s: Unable to sync eeprom\n", dev->name);
+		result = -EIO;
+		goto eeprom_read_error;
 	}
 
 	eeprom_prep(regs, (offset >> 8) & 0xff);
-	if (eeprom_check_ack(regs))
-		return 0;
+	if (eeprom_check_ack(regs)) {
+		__restore_flags(flags);
+		printk(KERN_ERR "%s: Unable to set address byte 0\n",
+		       dev->name);
+		result = -EIO;
+		goto eeprom_read_error;
+	}
 
 	eeprom_prep(regs, offset & 0xff);
-	if (eeprom_check_ack(regs))
-		return 0;
+	if (eeprom_check_ack(regs)) {
+		__restore_flags(flags);
+		printk(KERN_ERR "%s: Unable to set address byte 1\n",
+		       dev->name);
+		result = -EIO;
+		goto eeprom_read_error;
+	}
 
 	eeprom_start(regs);
 	eeprom_prep(regs, EEPROM_READ_SELECT);
-	if (eeprom_check_ack(regs))
-		return 0;
+	if (eeprom_check_ack(regs)) {
+		__restore_flags(flags);
+		printk(KERN_ERR "%s: Unable to set READ_SELECT\n",
+		       dev->name);
+		result = -EIO;
+		goto eeprom_read_error;
+	}
 
 	for (i = 0; i < 8; i++) {
 		local = readl(&regs->LocalCtrl);
 		local &= ~EEPROM_WRITE_ENABLE;
 		writel(local, &regs->LocalCtrl);
-		udelay(2);
+		udelay(ACE_LONG_DELAY);
 		mb();
 		local |= EEPROM_CLK_OUT;
 		writel(local, &regs->LocalCtrl);
-		udelay(1);
 		mb();
+		udelay(ACE_SHORT_DELAY);
 		/* sample data mid high clk */
 		result = (result << 1) |
 			((readl(&regs->LocalCtrl) & EEPROM_DATA_IN) != 0);
-		udelay(1);
+		udelay(ACE_SHORT_DELAY);
 		mb();
 		local = readl(&regs->LocalCtrl);
 		local &= ~EEPROM_CLK_OUT;
 		writel(local, &regs->LocalCtrl);
+		udelay(ACE_SHORT_DELAY);
 		mb();
-		if (i == 7){
+		if (i == 7) {
 			local |= EEPROM_WRITE_ENABLE;
 			writel(local, &regs->LocalCtrl);
 			mb();
+			udelay(ACE_SHORT_DELAY);
 		}
 	}
 
 	local |= EEPROM_DATA_OUT;
 	writel(local, &regs->LocalCtrl);
-	udelay(1);
+	mb();
+	udelay(ACE_SHORT_DELAY);
 	writel(readl(&regs->LocalCtrl) | EEPROM_CLK_OUT, &regs->LocalCtrl);
-	udelay(2);
+	udelay(ACE_LONG_DELAY);
 	writel(readl(&regs->LocalCtrl) & ~EEPROM_CLK_OUT, &regs->LocalCtrl);
+	mb();
+	udelay(ACE_SHORT_DELAY);
 	eeprom_stop(regs);
 
+	__restore_flags(flags);
+ out:
 	return result;
+
+ eeprom_read_error:
+	printk(KERN_ERR "%s: Unable to read eeprom byte 0x%02lx\n",
+	       dev->name, offset);
+	goto out;
 }
 
 
 /*
  * Local variables:
- * compile-command: "gcc -D__KERNEL__ -D__SMP__ -DMODULE -I/data/home/jes/linux/include -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer -pipe -fno-strength-reduce -DMODVERSIONS -include /data/home/jes/linux/include/linux/modversions.h   -c -o acenic.o acenic.c"
+ * compile-command: "gcc -D__SMP__ -D__KERNEL__ -DMODULE -I../../include -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer -pipe -fno-strength-reduce -DMODVERSIONS -include ../../include/linux/modversions.h   -c -o acenic.o acenic.c"
  * End:
  */

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)