lkml.org 
[lkml]   [2000]   [Jul]   [12]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
SubjectRe: [PATCH] Additions to Manfred patch to ncpfs
I found another minor problem:

The old code avoided building the wait table when the timeout is 0, now we
always build the wait table.

Below is an update for the memory allocations within fs/select.c:

* do not build the wait table if timeout==0
* store the first 8 poll_table_entries within the poll_table.
* sys_select: if select is called for <= 256 bits, then store these bits
on the stack.
* sys_poll: replace the arrays with a linked list, and begin that list on
the stack (the first 24 entries).

It means that sys_poll and sys_select both store around 450 bytes on the
stack - IMHO acceptable.

Result: poll() for one file descriptor: down from ~3200 cpu ticks to
~1000 cpu ticks.

The patch was tested with test4-pre4.
Petr, could you double check that I didn't break ncpfs?

--
Manfred

<<<<<<<<<<
// $Header$
// Kernel Version:
// VERSION = 2
// PATCHLEVEL = 4
// SUBLEVEL = 0
// EXTRAVERSION = -test4
--- 2.4/fs/select.c Wed Jul 12 20:06:25 2000
+++ build-2.4/fs/select.c Wed Jul 12 20:54:35 2000
@@ -40,8 +40,15 @@
void poll_freewait(poll_table* pt)
{
struct poll_table_page * p = pt->table;
+ struct poll_table_entry * entry;
+ entry = pt->internal + pt->nr;
+ while(pt->nr > 0) {
+ pt->nr--;
+ entry--;
+ remove_wait_queue(entry->wait_address,&entry->wait);
+ fput(entry->filp);
+ }
while (p) {
- struct poll_table_entry * entry;
struct poll_table_page *old;

entry = p->entry + p->nr;
@@ -53,41 +60,44 @@
}
old = p;
p = p->next;
- free_page((unsigned long) old);
+ kfree(old);
}
}

void __pollwait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p)
{
- struct poll_table_page *table = p->table;
-
- if (!table || table->nr >= __MAX_POLL_TABLE_ENTRIES) {
- struct poll_table_page *new_table;
+ struct poll_table_entry * entry;

- new_table = (struct poll_table_page *) __get_free_page(GFP_KERNEL);
- if (!new_table) {
- p->error = -ENOMEM;
- __set_current_state(TASK_RUNNING);
- return;
- }
- new_table->nr = 0;
- new_table->entry = (struct poll_table_entry *)(new_table + 1);
- new_table->next = table;
- p->table = new_table;
- table = new_table;
- }
-
- /* Add a new entry */
- {
- struct poll_table_entry * entry;
+ if(p->nr < POLL_TABLE_INTERNAL) {
+ entry = p->internal + p->nr;
+ p->nr++;
+ } else {
+ struct poll_table_page *table = p->table;
+ if (!table || table->nr >= __MAX_POLL_TABLE_ENTRIES) {
+ struct poll_table_page *new_table;
+
+ new_table = (struct poll_table_page *) kmalloc(PAGE_SIZE,GFP_KERNEL);
+ if (!new_table) {
+ p->error = -ENOMEM;
+ __set_current_state(TASK_RUNNING);
+ return;
+ }
+ new_table->nr = 0;
+ new_table->entry = (struct poll_table_entry *)(new_table + 1);
+ new_table->next = table;
+ p->table = new_table;
+ table = new_table;
+ }
entry = table->entry + table->nr;
table->nr++;
- get_file(filp);
- entry->filp = filp;
- entry->wait_address = wait_address;
- init_waitqueue_entry(&entry->wait, current);
- add_wait_queue(wait_address,&entry->wait);
}
+
+ /* Add a new entry */
+ get_file(filp);
+ entry->filp = filp;
+ entry->wait_address = wait_address;
+ init_waitqueue_entry(&entry->wait, current);
+ add_wait_queue(wait_address,&entry->wait);
}

#define __IN(fds, n) (fds->in + n)
@@ -163,7 +173,10 @@
n = retval;

poll_initwait(&table);
- wait = &table;
+ if(__timeout)
+ wait = &table;
+ else
+ wait = NULL;
retval = 0;
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
@@ -234,6 +247,7 @@
sys_select(int n, fd_set *inp, fd_set *outp, fd_set *exp, struct timeval *tvp)
{
fd_set_bits fds;
+ char inline_bits[192];
char *bits;
long timeout;
int ret, size;
@@ -271,9 +285,13 @@
*/
ret = -ENOMEM;
size = FDS_BYTES(n);
- bits = kmalloc(6 * size, GFP_KERNEL);
- if (!bits)
- goto out_nofds;
+ if(size <= sizeof(inline_bits)/6) {
+ bits = inline_bits;
+ } else {
+ bits = kmalloc(6 * size, GFP_KERNEL);
+ if (!bits)
+ goto out_nofds;
+ }
fds.in = (unsigned long *) bits;
fds.out = (unsigned long *) (bits + size);
fds.ex = (unsigned long *) (bits + 2*size);
@@ -316,12 +334,18 @@
set_fd_set(n, exp, fds.res_ex);

out:
- kfree(bits);
+ if(bits != inline_bits)
+ kfree(bits);
out_nofds:
return ret;
}

-#define POLLFD_PER_PAGE ((PAGE_SIZE) / sizeof(struct pollfd))
+struct poll_list {
+ struct poll_list *next;
+ int len;
+};
+
+ #define POLLFD_PER_PAGE ((PAGE_SIZE-sizeof(struct poll_list)) / sizeof(struct pollfd))

static void do_pollfd(unsigned int num, struct pollfd * fdpage,
poll_table ** pwait, int *count)
@@ -355,38 +379,44 @@
}
}

-static int do_poll(unsigned int nfds, unsigned int nchunks, unsigned int nleft,
- struct pollfd *fds[], poll_table *wait, long timeout)
+
+static int do_poll(int nfds, struct poll_list *list,
+ poll_table *wait, long timeout)
{
int count = 0;
poll_table* pt = wait;
-
+
for (;;) {
- unsigned int i;
-
+ struct poll_list* walk;
set_current_state(TASK_INTERRUPTIBLE);
- for (i=0; i < nchunks; i++)
- do_pollfd(POLLFD_PER_PAGE, fds[i], &pt, &count);
- if (nleft)
- do_pollfd(nleft, fds[nchunks], &pt, &count);
+ walk = list;
+ while(walk != NULL) {
+ do_pollfd( walk->len, (struct pollfd*)(walk+1), &pt, &count);
+ walk = walk->next;
+ }
pt = NULL;
if (count || !timeout || signal_pending(current))
break;
- if(wait->error) {
+ if(wait->error)
return wait->error;
- }
+
timeout = schedule_timeout(timeout);
}
current->state = TASK_RUNNING;
return count;
}

+#define INLINE_POLL_COUNT 24
asmlinkage long sys_poll(struct pollfd * ufds, unsigned int nfds, long timeout)
{
- int i, j, fdcount, err;
- struct pollfd **fds;
+ int fdcount, err;
+ unsigned int i;
+ struct poll_list *pollwalk;
+ struct {
+ struct poll_list head;
+ struct pollfd entries[INLINE_POLL_COUNT];
+ } polllist;
poll_table table;
- int nchunks, nleft;

/* Do a sanity check on nfds ... */
if (nfds > current->files->max_fds)
@@ -402,64 +432,65 @@

poll_initwait(&table);
err = -ENOMEM;
-
- fds = NULL;
- if (nfds != 0) {
- fds = (struct pollfd **)kmalloc(
- (1 + (nfds - 1) / POLLFD_PER_PAGE) * sizeof(struct pollfd *),
- GFP_KERNEL);
- if (fds == NULL)
- goto out;
- }
-
- nchunks = 0;
- nleft = nfds;
- while (nleft > POLLFD_PER_PAGE) { /* allocate complete PAGE_SIZE chunks */
- fds[nchunks] = (struct pollfd *)__get_free_page(GFP_KERNEL);
- if (fds[nchunks] == NULL)
+ polllist.head.next = NULL;
+ if(nfds <= INLINE_POLL_COUNT)
+ polllist.head.len = nfds;
+ else
+ polllist.head.len = INLINE_POLL_COUNT;
+ pollwalk = &polllist.head;
+ i = nfds;
+ err = -ENOMEM;
+ goto start;
+ while(i!=0) {
+ struct poll_list *pp;
+ pp = kmalloc(sizeof(struct poll_list)+
+ sizeof(struct pollfd)*
+ (i>POLLFD_PER_PAGE?POLLFD_PER_PAGE:i),
+ GFP_KERNEL);
+ if(pp==NULL)
goto out_fds;
- nchunks++;
- nleft -= POLLFD_PER_PAGE;
- }
- if (nleft) { /* allocate last PAGE_SIZE chunk, only nleft elements used */
- fds[nchunks] = (struct pollfd *)__get_free_page(GFP_KERNEL);
- if (fds[nchunks] == NULL)
+ pp->next=NULL;
+ pp->len = (i>POLLFD_PER_PAGE?POLLFD_PER_PAGE:i);
+ pollwalk->next = pp;
+ pollwalk = pp;
+start:
+ if (copy_from_user(pollwalk+1, ufds + nfds-i,
+ sizeof(struct pollfd)*pollwalk->len)) {
+ err = -EFAULT;
goto out_fds;
+ }
+ i -= pollwalk->len;
}
+
+ fdcount = do_poll(nfds, &polllist.head,
+ timeout!=0?&table:NULL, timeout);

+ /* OK, now copy the revents fields back to user space. */
+ i = nfds;
+ pollwalk = &polllist.head;
err = -EFAULT;
- for (i=0; i < nchunks; i++)
- if (copy_from_user(fds[i], ufds + i*POLLFD_PER_PAGE, PAGE_SIZE))
- goto out_fds1;
- if (nleft) {
- if (copy_from_user(fds[nchunks], ufds + nchunks*POLLFD_PER_PAGE,
- nleft * sizeof(struct pollfd)))
- goto out_fds1;
+ while(pollwalk != NULL) {
+ struct pollfd * fds = (struct pollfd*)(pollwalk+1);
+ int j;
+
+ for (j=0; j < pollwalk->len; j++, ufds++) {
+ if(__put_user(fds[j].revents, &ufds->revents))
+ goto out_fds;
+ }
+ i -= pollwalk->len;
+ pollwalk = pollwalk->next;
}
-
- fdcount = do_poll(nfds, nchunks, nleft, fds, &table, timeout);
-
- /* OK, now copy the revents fields back to user space. */
- for(i=0; i < nchunks; i++)
- for (j=0; j < POLLFD_PER_PAGE; j++, ufds++)
- __put_user((fds[i] + j)->revents, &ufds->revents);
- if (nleft)
- for (j=0; j < nleft; j++, ufds++)
- __put_user((fds[nchunks] + j)->revents, &ufds->revents);
-
err = fdcount;
if (!fdcount && signal_pending(current))
err = -EINTR;

-out_fds1:
- if (nleft)
- free_page((unsigned long)(fds[nchunks]));
out_fds:
- for (i=0; i < nchunks; i++)
- free_page((unsigned long)(fds[i]));
- if (nfds != 0)
- kfree(fds);
-out:
+ pollwalk = polllist.head.next;
+ while(pollwalk!=NULL) {
+ struct poll_list *pp = pollwalk->next;
+ kfree(pollwalk);
+ pollwalk = pp;
+ }
poll_freewait(&table);
return err;
}
--- 2.4/include/linux/poll.h Wed Jul 12 20:06:25 2000
+++ build-2.4/include/linux/poll.h Wed Jul 12 20:16:52 2000
@@ -23,9 +23,12 @@
struct poll_table_entry * entry;
};

+#define POLL_TABLE_INTERNAL 8
typedef struct poll_table_struct {
int error;
+ int nr;
struct poll_table_page * table;
+ struct poll_table_entry internal[POLL_TABLE_INTERNAL];
} poll_table;

#define __MAX_POLL_TABLE_ENTRIES ((PAGE_SIZE - sizeof (struct poll_table_page)) / sizeof (struct poll_table_entry))
@@ -41,6 +44,7 @@
static inline void poll_initwait(poll_table* pt)
{
pt->error = 0;
+ pt->nr = 0;
pt->table = NULL;
}
extern void poll_freewait(poll_table* pt);





-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.rutgers.edu
Please read the FAQ at http://www.tux.org/lkml/

\
 
 \ /
  Last update: 2005-03-22 13:57    [W:1.193 / U:0.080 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site