patch-2.3.27 linux/fs/buffer.c

Next file: linux/fs/coda/sysctl.c
Previous file: linux/fs/binfmt_misc.c
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.3.26/linux/fs/buffer.c linux/fs/buffer.c
@@ -91,7 +91,7 @@
 };
 static struct bh_free_head free_list[NR_SIZES];
 
-static kmem_cache_t *bh_cachep;
+kmem_cache_t *bh_cachep;
 
 static int grow_buffers(int size);
 
@@ -130,8 +130,6 @@
 int bdflush_min[N_PARAM] = {  0,  10,    5,   25,  0,   1*HZ,   1*HZ, 1, 1};
 int bdflush_max[N_PARAM] = {100,50000, 20000, 20000,600*HZ, 6000*HZ, 6000*HZ, 2047, 5};
 
-void wakeup_bdflush(int);
-
 /*
  * Rewrote the wait-routines to use the "new" wait-queue functionality,
  * and getting rid of the cli-sti pairs. The wait-queue routines still
@@ -707,7 +705,7 @@
 	mark_buffer_uptodate(bh, uptodate);
 
 	/* This is a temporary buffer used for page I/O. */
-	page = mem_map + MAP_NR(bh->b_data);
+	page = bh->b_page;
 
 	if (!uptodate)
 		SetPageError(page);
@@ -760,7 +758,6 @@
 	return;
 }
 
-
 /*
  * Ok, this is getblk, and it isn't very clear, again to hinder
  * race-conditions. Most of the code is seldom used, (ie repeating),
@@ -823,7 +820,7 @@
 	unsigned long dirty, tot, hard_dirty_limit, soft_dirty_limit;
 
 	dirty = size_buffers_type[BUF_DIRTY] >> PAGE_SHIFT;
-	tot = nr_lru_pages + nr_free_pages - nr_free_highpages;
+	tot = nr_free_buffer_pages();
 	hard_dirty_limit = tot * bdf_prm.b_un.nfract / 100;
 	soft_dirty_limit = hard_dirty_limit >> 1;
 
@@ -1092,6 +1089,20 @@
 	return NULL;
 }
 
+void set_bh_page (struct buffer_head *bh, struct page *page, unsigned int offset)
+{
+	bh->b_page = page;
+	if (offset >= PAGE_SIZE)
+		BUG();
+	if (PageHighMem(page))
+		/*
+		 * This catches illegal uses and preserves the offset:
+		 */
+		bh->b_data = (char *)(0 + offset);
+	else
+		bh->b_data = (char *)(page_address(page) + offset);
+}
+
 /*
  * Create the appropriate buffers when given a page for data area and
  * the size of each buffer.. Use the bh->b_this_page linked list to
@@ -1101,7 +1112,7 @@
  * from ordinary buffer allocations, and only async requests are allowed
  * to sleep waiting for buffer heads. 
  */
-static struct buffer_head * create_buffers(unsigned long page, unsigned long size, int async)
+static struct buffer_head * create_buffers(struct page * page, unsigned long size, int async)
 {
 	struct buffer_head *bh, *head;
 	long offset;
@@ -1124,7 +1135,8 @@
 		atomic_set(&bh->b_count, 0);
 		bh->b_size = size;
 
-		bh->b_data = (char *) (page+offset);
+		set_bh_page(bh, page, offset);
+
 		bh->b_list = BUF_CLEAN;
 		bh->b_end_io = end_buffer_io_bad;
 	}
@@ -1183,7 +1195,7 @@
 	 * They don't show up in the buffer hash table, but they *are*
 	 * registered in page->buffers.
 	 */
-	head = create_buffers(page_address(page), size, 1);
+	head = create_buffers(page, size, 1);
 	if (page->buffers)
 		BUG();
 	if (!head)
@@ -1273,7 +1285,7 @@
 {
 	struct buffer_head *bh, *head, *tail;
 
-	head = create_buffers(page_address(page), blocksize, 1);
+	head = create_buffers(page, blocksize, 1);
 	if (page->buffers)
 		BUG();
 
@@ -1377,10 +1389,11 @@
 	unsigned long start_offset, start_bytes, end_bytes;
 	unsigned long bbits, blocks, i, len;
 	struct buffer_head *bh, *head;
-	char * target_buf;
+	char *target_buf, *kaddr;
 	int need_balance_dirty;
 
-	target_buf = (char *)page_address(page) + offset;
+	kaddr = (char *)kmap(page);
+	target_buf = kaddr + offset;
 
 	if (!PageLocked(page))
 		BUG();
@@ -1445,7 +1458,7 @@
 
 		if (!buffer_uptodate(bh) && (start_offset || (end_bytes && (i == end_block)))) {
 			if (buffer_new(bh)) {
-				memset(bh->b_data, 0, bh->b_size);
+				memset(kaddr + i*blocksize, 0, blocksize);
 			} else {
 				ll_rw_block(READ, 1, &bh);
 				wait_on_buffer(bh);
@@ -1463,6 +1476,10 @@
 			len = end_bytes;
 			end_bytes = 0;
 		}
+		if (target_buf >= kaddr + PAGE_SIZE)
+			BUG();
+		if (target_buf+len-1 >= kaddr + PAGE_SIZE)
+			BUG();
 		err = copy_from_user(target_buf, buf, len);
 		target_buf += len;
 		buf += len;
@@ -1511,9 +1528,11 @@
 	 */
 	if (!partial)
 		SetPageUptodate(page);
+	kunmap(page);
 	return bytes;
 out:
 	ClearPageUptodate(page);
+	kunmap(page);
 	return err;
 }
 
@@ -1714,8 +1733,6 @@
  * for them to complete.  Clean up the buffer_heads afterwards.  
  */
 
-#define dprintk(x...) 
-
 static int do_kio(struct kiobuf *kiobuf,
 		  int rw, int nr, struct buffer_head *bh[], int size)
 {
@@ -1726,8 +1743,6 @@
 	struct task_struct *tsk = current;
 	DECLARE_WAITQUEUE(wait, tsk);
 
-	dprintk ("do_kio start %d\n", rw);
-
 	if (rw == WRITE)
 		rw = WRITERAW;
 	atomic_add(nr, &kiobuf->io_count);
@@ -1753,8 +1768,6 @@
 	
 	spin_unlock(&unused_list_lock);
 
-	dprintk ("do_kio end %d %d\n", iosize, err);
-	
 	if (iosize)
 		return iosize;
 	if (kiobuf->errno)
@@ -1808,12 +1821,6 @@
 			panic("brw_kiovec: iobuf not initialised");
 	}
 
-	/* DEBUG */
-#if 0
-	return iobuf->length;
-#endif
-	dprintk ("brw_kiovec: start\n");
-	
 	/* 
 	 * OK to walk down the iovec doing page IO on each page we find. 
 	 */
@@ -1822,7 +1829,6 @@
 		iobuf = iovec[i];
 		offset = iobuf->offset;
 		length = iobuf->length;
-		dprintk ("iobuf %d %d %d\n", offset, length, size);
 
 		for (pageind = 0; pageind < iobuf->nr_pages; pageind++) {
 			map  = iobuf->maplist[pageind];
@@ -1842,7 +1848,7 @@
 				
 				tmp->b_dev = B_FREE;
 				tmp->b_size = size;
-				tmp->b_data = (char *) (page + offset);
+				set_bh_page(tmp, map, offset);
 				tmp->b_this_page = tmp;
 
 				init_buffer(tmp, end_buffer_io_kiobuf, NULL);
@@ -1856,8 +1862,6 @@
 					set_bit(BH_Dirty, &tmp->b_state);
 				}
 
-				dprintk ("buffer %d (%d) at %p\n", 
-					 bhind, tmp->b_blocknr, tmp->b_data);
 				bh[bhind++] = tmp;
 				length -= size;
 				offset += size;
@@ -1892,7 +1896,6 @@
 	}
 
  finished:
-	dprintk ("brw_kiovec: end (%d, %d)\n", transferred, err);
 	if (transferred)
 		return transferred;
 	return err;
@@ -2002,7 +2005,8 @@
 	unsigned long iblock;
 	struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
 	unsigned int blocksize, blocks;
-	int nr;
+	unsigned long kaddr = 0;
+	int nr, i;
 
 	if (!PageLocked(page))
 		PAGE_BUG(page);
@@ -2015,6 +2019,7 @@
 	iblock = page->index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
 	bh = head;
 	nr = 0;
+	i = 0;
 
 	do {
 		if (buffer_uptodate(bh))
@@ -2023,7 +2028,9 @@
 		if (!buffer_mapped(bh)) {
 			inode->i_op->get_block(inode, iblock, bh, 0);
 			if (!buffer_mapped(bh)) {
-				memset(bh->b_data, 0, blocksize);
+				if (!kaddr)
+					kaddr = kmap(page);
+				memset((char *)(kaddr + i*blocksize), 0, blocksize);
 				set_bit(BH_Uptodate, &bh->b_state);
 				continue;
 			}
@@ -2033,7 +2040,7 @@
 		atomic_inc(&bh->b_count);
 		arr[nr] = bh;
 		nr++;
-	} while (iblock++, (bh = bh->b_this_page) != head);
+	} while (i++, iblock++, (bh = bh->b_this_page) != head);
 
 	++current->maj_flt;
 	if (nr) {
@@ -2048,6 +2055,8 @@
 		SetPageUptodate(page);
 		UnlockPage(page);
 	}
+	if (kaddr)
+		kunmap(page);
 	return 0;
 }
 
@@ -2057,8 +2066,7 @@
  */
 static int grow_buffers(int size)
 {
-	unsigned long page;
-	struct page * page_map;
+	struct page * page;
 	struct buffer_head *bh, *tmp;
 	struct buffer_head * insert_point;
 	int isize;
@@ -2068,8 +2076,9 @@
 		return 0;
 	}
 
-	if (!(page = __get_free_page(GFP_BUFFER)))
-		return 0;
+	page = alloc_page(GFP_BUFFER);
+	if (!page)
+		goto out;
 	bh = create_buffers(page, size, 0);
 	if (!bh)
 		goto no_buffer_head;
@@ -2099,14 +2108,14 @@
 	free_list[isize].list = bh;
 	spin_unlock(&free_list[isize].lock);
 
-	page_map = mem_map + MAP_NR(page);
-	page_map->buffers = bh;
-	lru_cache_add(page_map);
+	page->buffers = bh;
+	lru_cache_add(page);
 	atomic_inc(&buffermem_pages);
 	return 1;
 
 no_buffer_head:
-	free_page(page);
+	__free_page(page);
+out:
 	return 0;
 }
 

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)