lkml.org 
[lkml]   [1999]   [Nov]   [7]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[patch] ipc updates
    my ipc patch is finished, the important changes are:

    * sysctl added for all important limits (msg and sem)
    * most implementation limits got removed or vastly increased
    * new ipc helper functions: ipc/sem.c is now 2 lines shorter although new
    features were added :-)

    * fixes the DoS attack in ipc/msg found by Scott Maxwell
    * fixes a stupid bug in sem_exit() (AFAICS, it could only be triggered
    by oops'es)
    * compatibility fix: IPC_RM, IPC_STAT, ... failed if nsems<0, but they
    must ignore nsems.

    Could you please test it?

    --
    Manfred

    <<<<<<<<<<<<<<<
    // $Header: /pub/cvs/ms/patches/patch-ipcmerge,v 1.15 1999/11/07 12:32:52 manfreds Exp $
    // Kernel Version:
    // VERSION = 2
    // PATCHLEVEL = 3
    // SUBLEVEL = 26
    // EXTRAVERSION =
    --- 2.3/ipc/sem.c Sun Nov 7 10:43:28 1999
    +++ build-2.3/ipc/sem.c Sun Nov 7 11:33:48 1999
    @@ -51,7 +51,8 @@
    *
    * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
    *
    - * SMP-threaded (c) 1999 Manfred Spraul <manfreds@colorfullife.com>
    + * SMP-threaded, sysctl's added
    + * (c) 1999 Manfred Spraul <manfreds@colorfullife.com>
    */

    #include <linux/config.h>
    @@ -59,120 +60,91 @@
    #include <linux/spinlock.h>
    #include <linux/init.h>
    #include <linux/proc_fs.h>
    -
    #include <asm/uaccess.h>
    +#include "util.h"
    +
    +
    +#define sem_lock(id) ((struct semid_ds*)ipc_lock(&sem_ids,id))
    +#define sem_unlock(id) ipc_unlock(&sem_ids,id)
    +#define sem_rmid(id) ((struct semid_ds*)ipc_rmid(&sem_ids,id))
    +#define sem_checkid(sma, semid) \
    + ipc_checkid(&sem_ids,&sma->sem_perm,semid)
    +#define sem_buildid(id, seq) \
    + ipc_buildid(&sem_ids, id, seq)
    +static struct ipc_ids sem_ids;

    -extern int ipcperms (struct ipc_perm *ipcp, short semflg);
    static int newary (key_t, int, int);
    -static int findkey (key_t key);
    static void freeary (int id);
    #ifdef CONFIG_PROC_FS
    static int sysvipc_sem_read_proc(char *buffer, char **start, off_t offset, int length, int *eof, void *data);
    #endif

    -struct semid_ary
    -{
    - spinlock_t lock;
    - struct semid_ds* s;
    -};
    -
    -static struct semid_ary semary[SEMMNI];
    -
    -static DECLARE_MUTEX(sem_lock);
    -static int max_semid = 0;
    -static int used_sems = 0;
    -static int used_semids = 0;
    -
    -static unsigned short sem_seq = 0;
    +#define SEMMSL_FAST 256 /* 512 bytes on stack */
    +#define SEMOPM_FAST 64 /* ~ 372 bytes on stack */

    -/* anti-deadlock ordering:
    - * sem_lock < semary[].lock
    +/*
    * linked list protection:
    * sem_undo.id_next,
    * semid_ds.sem_pending{,last},
    - * semid_ds.sem_undo: semary[].lock for read/write
    + * semid_ds.sem_undo: sem_lock() for read/write
    * sem_undo.proc_next: only "current" is allowed to read/write that field.
    *
    */

    +int sem_ctls[4] = {SEMMSL, SEMMNS, SEMOPM, SEMMNI};
    +#define sc_semmsl (sem_ctls[0])
    +#define sc_semmns (sem_ctls[1])
    +#define sc_semopm (sem_ctls[2])
    +#define sc_semmni (sem_ctls[3])
    +
    +static int used_sems = 0;
    +
    void __init sem_init (void)
    {
    - int i;
    + used_sems = 0;
    + ipc_init_ids(&sem_ids,sc_semmni);

    - used_sems = used_semids = max_semid = sem_seq = 0;
    - for (i = 0; i < SEMMNI; i++) {
    - semary[i].lock = SPIN_LOCK_UNLOCKED;
    - semary[i].s = NULL;
    - }
    #ifdef CONFIG_PROC_FS
    create_proc_read_entry("sysvipc/sem", 0, 0, sysvipc_sem_read_proc, NULL);
    #endif
    - return;
    -}
    -
    -static int findkey (key_t key)
    -{
    - int id;
    - struct semid_ds *sma;
    -
    - for (id = 0; id <= max_semid; id++) {
    - sma = semary[id].s;
    - if(sma==NULL)
    - continue;
    -
    - if (key == sma->sem_perm.key)
    - return id;
    - }
    - return -1;
    }

    static int newary (key_t key, int nsems, int semflg)
    {
    int id;
    struct semid_ds *sma;
    - struct ipc_perm *ipcp;
    int size;

    if (!nsems)
    return -EINVAL;
    - if (used_sems + nsems > SEMMNS)
    + if (used_sems + nsems > sc_semmns)
    return -ENOSPC;
    - for (id = 0; id < SEMMNI; id++) {
    - if(semary[id].s == NULL)
    - goto found;
    - }
    - return -ENOSPC;
    -found:
    +
    size = sizeof (*sma) + nsems * sizeof (struct sem);
    - used_sems += nsems;
    - sma = (struct semid_ds *) kmalloc (size, GFP_KERNEL);
    + sma = (struct semid_ds *) ipc_alloc(size);
    if (!sma) {
    - used_sems -= nsems;
    return -ENOMEM;
    }
    memset (sma, 0, size);
    + id = ipc_addid(&sem_ids, &sma->sem_perm, sc_semmni);
    + if(id == -1) {
    + ipc_free(sma, size);
    + return -ENOSPC;
    + }
    + used_sems += nsems;
    +
    + sma->sem_perm.mode = (semflg & S_IRWXUGO);
    + sma->sem_perm.key = key;
    +
    sma->sem_base = (struct sem *) &sma[1];
    - ipcp = &sma->sem_perm;
    - ipcp->mode = (semflg & S_IRWXUGO);
    - ipcp->key = key;
    - ipcp->cuid = ipcp->uid = current->euid;
    - ipcp->gid = ipcp->cgid = current->egid;
    - /* sma->sem_perm.seq*MSGMNI must be a positive integer.
    - * this limits MSGMNI to 32768
    - */
    - sma->sem_perm.seq = sem_seq++;
    /* sma->sem_pending = NULL; */
    sma->sem_pending_last = &sma->sem_pending;
    /* sma->undo = NULL; */
    sma->sem_nsems = nsems;
    sma->sem_ctime = CURRENT_TIME;
    - if (id > max_semid)
    - max_semid = id;
    - used_semids++;
    - spin_lock(&semary[id].lock);
    - semary[id].s = sma;
    - spin_unlock(&semary[id].lock);
    - return (unsigned int) sma->sem_perm.seq * SEMMNI + id;
    + sem_unlock(id);
    +
    + return sem_buildid(id, sma->sem_perm.seq);
    }

    asmlinkage long sys_semget (key_t key, int nsems, int semflg)
    @@ -180,13 +152,13 @@
    int id, err = -EINVAL;
    struct semid_ds *sma;

    - if (nsems < 0 || nsems > SEMMSL)
    + if (nsems < 0 || nsems > sc_semmsl)
    return -EINVAL;
    - down(&sem_lock);
    + down(&sem_ids.sem);

    if (key == IPC_PRIVATE) {
    err = newary(key, nsems, semflg);
    - } else if ((id = findkey (key)) == -1) { /* key not used */
    + } else if ((id = ipc_findkey(&sem_ids, key)) == -1) { /* key not used */
    if (!(semflg & IPC_CREAT))
    err = -ENOENT;
    else
    @@ -194,19 +166,46 @@
    } else if (semflg & IPC_CREAT && semflg & IPC_EXCL) {
    err = -EEXIST;
    } else {
    - sma = semary[id].s;
    + sma = sem_lock(id);
    + if(sma==NULL)
    + BUG();
    if (nsems > sma->sem_nsems)
    err = -EINVAL;
    else if (ipcperms(&sma->sem_perm, semflg))
    err = -EACCES;
    else
    - err = (int) sma->sem_perm.seq * SEMMNI + id;
    + err = sem_buildid(id, sma->sem_perm.seq);
    + sem_unlock(id);
    }

    - up(&sem_lock);
    + up(&sem_ids.sem);
    return err;
    }

    +/* doesn't acquire the sem_lock on error! */
    +static int sem_revalidate(int semid, struct semid_ds* sma, int nsems, short flg)
    +{
    + struct semid_ds* smanew;
    +
    + smanew = sem_lock(semid);
    + if(smanew==NULL)
    + return -EIDRM;
    + if(smanew != sma)
    + goto out_EIDRM;
    + if(sem_checkid(sma,semid))
    + goto out_EIDRM;
    + if(sma->sem_nsems != nsems) {
    +out_EIDRM:
    + sem_unlock(semid);
    + return -EIDRM;
    + }
    +
    + if (ipcperms(&sma->sem_perm, flg)) {
    + sem_unlock(semid);
    + return -EACCES;
    + }
    + return 0;
    +}
    /* Manage the doubly linked list sma->sem_pending as a FIFO:
    * insert new queue elements at the tail sma->sem_pending_last.
    */
    @@ -387,15 +386,9 @@
    struct semid_ds *sma;
    struct sem_undo *un;
    struct sem_queue *q;
    + int size;

    - /* we own both locks, noone can get in */
    - sma = semary[id].s;
    - semary[id].s = NULL;
    -
    - used_sems -= sma->sem_nsems;
    - if (id == max_semid)
    - while (max_semid && (semary[--max_semid].s == NULL));
    - used_semids--;
    + sma = sem_rmid(id);

    /* Invalidate the existing undo structures for this semaphore set.
    * (They will be freed without any further action in sem_exit()
    @@ -410,40 +403,46 @@
    q->prev = NULL;
    wake_up_process(q->sleeper); /* doesn't sleep */
    }
    + sem_unlock(id);

    - kfree(sma);
    + used_sems -= sma->sem_nsems;
    + size = sizeof (*sma) + sma->sem_nsems * sizeof (struct sem);
    + ipc_free(sma, size);
    }

    int semctl_nolock(int semid, int semnum, int cmd, union semun arg)
    {
    int err = -EINVAL;
    - int lid = semid % SEMMNI;

    switch(cmd) {
    case IPC_INFO:
    case SEM_INFO:
    {
    struct seminfo seminfo;
    + int max_id;

    - seminfo.semmni = SEMMNI;
    - seminfo.semmns = SEMMNS;
    - seminfo.semmsl = SEMMSL;
    - seminfo.semopm = SEMOPM;
    + memset(&seminfo,0,sizeof(seminfo));
    + seminfo.semmni = sc_semmni;
    + seminfo.semmns = sc_semmns;
    + seminfo.semmsl = sc_semmsl;
    + seminfo.semopm = sc_semopm;
    seminfo.semvmx = SEMVMX;
    seminfo.semmnu = SEMMNU;
    seminfo.semmap = SEMMAP;
    seminfo.semume = SEMUME;
    - seminfo.semusz = SEMUSZ;
    - seminfo.semaem = SEMAEM;
    + down(&sem_ids.sem);
    if (cmd == SEM_INFO) {
    - down(&sem_lock);
    - seminfo.semusz = used_semids;
    + seminfo.semusz = sem_ids.in_use;
    seminfo.semaem = used_sems;
    - up(&sem_lock);
    + } else {
    + seminfo.semusz = SEMUSZ;
    + seminfo.semaem = SEMAEM;
    }
    + max_id = sem_ids.max_id;
    + up(&sem_ids.sem);
    if (copy_to_user (arg.__buf, &seminfo, sizeof(struct seminfo)))
    return -EFAULT;
    - return max_semid;
    + return (max_id < 0) ? 0: max_id;
    }
    case SEM_STAT:
    {
    @@ -451,24 +450,24 @@
    struct semid_ds tbuf;
    int id;

    - if (semid > max_semid)
    + if(semid > sem_ids.size)
    + return -EINVAL;
    +
    + sma = sem_lock(semid);
    + if(sma == NULL)
    return -EINVAL;

    - spin_lock(&semary[lid].lock);
    - err = -EINVAL;
    - sma = semary[semid].s;
    - if (sma == NULL)
    - goto out_unlock;
    err = -EACCES;
    if (ipcperms (&sma->sem_perm, S_IRUGO))
    goto out_unlock;
    - id = (unsigned int) sma->sem_perm.seq * SEMMNI + semid;
    + id = sem_buildid(semid, sma->sem_perm.seq);
    +
    memset(&tbuf,0,sizeof(tbuf));
    tbuf.sem_perm = sma->sem_perm;
    tbuf.sem_otime = sma->sem_otime;
    tbuf.sem_ctime = sma->sem_ctime;
    tbuf.sem_nsems = sma->sem_nsems;
    - spin_unlock(&semary[lid].lock);
    + sem_unlock(semid);
    if (copy_to_user (arg.buf, &tbuf, sizeof(tbuf)))
    return -EFAULT;
    return id;
    @@ -478,99 +477,136 @@
    }
    return err;
    out_unlock:
    - spin_unlock(&semary[lid].lock);
    + sem_unlock(semid);
    return err;
    }

    -int semctl_locked_unlock(int semid, int semnum, int cmd, union semun arg)
    +int semctl_main(int semid, int semnum, int cmd, union semun arg)
    {
    struct semid_ds *sma;
    - struct semid_ds tbuf;
    + struct sem* curr;
    int err;
    - int lid = semid % SEMMNI;
    + ushort fast_sem_io[SEMMSL_FAST];
    + ushort* sem_io = fast_sem_io;
    + int nsems;
    +
    + sma = sem_lock(semid);
    + if(sma==NULL)
    + return -EINVAL;

    - sma = semary[lid].s;
    - err=-EINVAL;
    - if (sma == NULL)
    - goto out_unlock;
    err=-EIDRM;
    - if (sma->sem_perm.seq != (unsigned int) semid / SEMMNI)
    + if (sem_checkid(sma,semid))
    goto out_unlock;

    err = -EACCES;
    - if (ipcperms(&sma->sem_perm, S_IRUGO))
    - goto out_unlock;
    -
    + if (ipcperms (&sma->sem_perm, (cmd==SETVAL||cmd==SETALL)?S_IWUGO:S_IRUGO))
    + goto out_unlock;
    +
    + nsems = sma->sem_nsems;
    switch (cmd) {
    case GETALL:
    {
    ushort *array = arg.array;
    - ushort sem_io[SEMMSL];
    int i;
    - int nsems = sma->sem_nsems;
    +
    + if(nsems > SEMMSL_FAST) {
    + sem_unlock(semid);
    + sem_io = ipc_alloc(sizeof(ushort)*nsems);
    + if(sem_io == NULL)
    + return -ENOMEM;
    + err = sem_revalidate(semid, sma, nsems, S_IRUGO);
    + if(err)
    + goto out_free;
    + }

    for (i = 0; i < sma->sem_nsems; i++)
    sem_io[i] = sma->sem_base[i].semval;
    - spin_unlock(&semary[lid].lock);
    - if (copy_to_user (array, sem_io, nsems*sizeof(ushort)))
    - return -EFAULT;
    - return 0;
    + sem_unlock(semid);
    + err = 0;
    + if(copy_to_user(array, sem_io, nsems*sizeof(ushort)))
    + err = -EFAULT;
    + goto out_free;
    + }
    + case SETALL:
    + {
    + int i;
    + struct sem_undo *un;
    +
    + sem_unlock(semid);
    +
    + if(nsems > SEMMSL_FAST) {
    + sem_io = ipc_alloc(sizeof(ushort)*nsems);
    + if(sem_io == NULL)
    + return -ENOMEM;
    + }
    +
    + if (copy_from_user (sem_io, arg.array, nsems*sizeof(ushort))) {
    + err = -EFAULT;
    + goto out_free;
    + }
    +
    + for (i = 0; i < nsems; i++) {
    + if (sem_io[i] > SEMVMX) {
    + err = -ERANGE;
    + goto out_free;
    + }
    + }
    + err = sem_revalidate(semid, sma, nsems, S_IWUGO);
    + if(err)
    + goto out_free;
    +
    + for (i = 0; i < nsems; i++)
    + sma->sem_base[i].semval = sem_io[i];
    + for (un = sma->undo; un; un = un->id_next)
    + for (i = 0; i < nsems; i++)
    + un->semadj[i] = 0;
    + sma->sem_ctime = CURRENT_TIME;
    + /* maybe some queued-up processes were waiting for this */
    + update_queue(sma);
    + err = 0;
    + goto out_unlock;
    }
    case IPC_STAT:
    + {
    + struct semid_ds tbuf;
    memset(&tbuf,0,sizeof(tbuf));
    tbuf.sem_perm = sma->sem_perm;
    tbuf.sem_otime = sma->sem_otime;
    tbuf.sem_ctime = sma->sem_ctime;
    tbuf.sem_nsems = sma->sem_nsems;
    - spin_unlock(&semary[lid].lock);
    + sem_unlock(semid);
    if (copy_to_user (arg.buf, &tbuf, sizeof(tbuf)))
    return -EFAULT;
    return 0;
    -default:
    - err = -EINVAL;
    }
    -out_unlock:
    - spin_unlock(&semary[lid].lock);
    - return err;
    -
    -}
    -
    -int semctl_locked(int semid, int semnum, int cmd, union semun arg)
    -{
    - struct semid_ds *sma;
    - int lid = semid % SEMMNI;
    - struct sem *curr;
    -
    - sma = semary[lid].s;
    - if (sma == NULL)
    - return -EINVAL;
    -
    - if (ipcperms (&sma->sem_perm, (cmd==SETVAL)?S_IWUGO:S_IRUGO))
    - return -EACCES;
    -
    - if (sma->sem_perm.seq != (unsigned int) semid / SEMMNI)
    - return -EIDRM;
    -
    - if (semnum >= sma->sem_nsems)
    - return -EINVAL;
    + /* GETVAL, GETPID, GETNCTN, GETZCNT, SETVAL: fall-through */
    + }
    + err = -EINVAL;
    + if(semnum < 0 || semnum >= nsems)
    + goto out_unlock;

    curr = &sma->sem_base[semnum];

    switch (cmd) {
    case GETVAL:
    - return curr->semval;
    + err = curr->semval;
    + goto out_unlock;
    case GETPID:
    - return curr->sempid & 0xffff;
    + err = curr->sempid & 0xffff;
    + goto out_unlock;
    case GETNCNT:
    - return count_semncnt(sma,semnum);
    + err = count_semncnt(sma,semnum);
    + goto out_unlock;
    case GETZCNT:
    - return count_semzcnt(sma,semnum);
    + err = count_semzcnt(sma,semnum);
    + goto out_unlock;
    case SETVAL:
    {
    int val = arg.val;
    struct sem_undo *un;
    + err = -ERANGE;
    if (val > SEMVMX || val < 0)
    - return -ERANGE;
    + goto out_unlock;

    for (un = sma->undo; un; un = un->id_next)
    un->semadj[semnum] = 0;
    @@ -578,17 +614,22 @@
    sma->sem_ctime = CURRENT_TIME;
    /* maybe some queued-up processes were waiting for this */
    update_queue(sma);
    - return 0;
    + err = 0;
    + goto out_unlock;
    }
    }
    - return -EINVAL;
    +out_unlock:
    + sem_unlock(semid);
    +out_free:
    + if(sem_io != fast_sem_io)
    + ipc_free(sem_io, sizeof(ushort)*nsems);
    + return err;
    }

    int semctl_down(int semid, int semnum, int cmd, union semun arg)
    {
    struct semid_ds *sma;
    int err;
    - int lid = semid % SEMMNI;
    struct semid_ds tbuf;
    struct ipc_perm *ipcp;

    @@ -596,66 +637,25 @@
    if(copy_from_user (&tbuf, arg.buf, sizeof (tbuf)))
    return -EFAULT;
    }
    - spin_lock(&semary[lid].lock);
    - sma = semary[lid].s;
    - err=-EINVAL;
    - if (sma == NULL)
    + sma = sem_lock(semid);
    + if(sma==NULL)
    + return -EINVAL;
    +
    + if (sem_checkid(sma,semid)) {
    + err=-EIDRM;
    goto out_unlock;
    + }
    ipcp = &sma->sem_perm;

    - if(cmd == SETALL) {
    - int i;
    - struct sem_undo *un;
    - unsigned int nsems;
    - ushort sem_io[SEMMSL];
    - /* SETALL doesn't belong into this
    - * group, but I need the semaphore
    - * for atomically reading nsems
    - * and changing the semaphore values
    - */
    - err=-EACCES;
    - if (ipcperms (ipcp, S_IWUGO))
    - goto out_unlock;
    - nsems=sma->sem_nsems;
    - spin_unlock(&semary[lid].lock);
    -
    - if (copy_from_user (sem_io, arg.array, nsems*sizeof(ushort)))
    - return -EFAULT;
    - for (i = 0; i < nsems; i++) {
    - if (sem_io[i] > SEMVMX) {
    - return -ERANGE;
    - }
    - }
    - /* we still own sem_lock, ie neither ownership
    - * nor permissions of the sem array could
    - * have changed. Just continue.
    - */
    - spin_lock(&semary[lid].lock);
    - for (i = 0; i < nsems; i++)
    - sma->sem_base[i].semval = sem_io[i];
    - for (un = sma->undo; un; un = un->id_next)
    - for (i = 0; i < nsems; i++)
    - un->semadj[i] = 0;
    - sma->sem_ctime = CURRENT_TIME;
    - /* maybe some queued-up processes were waiting for this */
    - update_queue(sma);
    - err = 0;
    - goto out_unlock;
    - }
    -
    if (current->euid != ipcp->cuid &&
    current->euid != ipcp->uid && !capable(CAP_SYS_ADMIN)) {
    err=-EPERM;
    goto out_unlock;
    }
    -
    - if (sma->sem_perm.seq != (unsigned int) semid / SEMMNI) {
    - err=-EIDRM;
    - goto out_unlock;
    - }
    +
    switch(cmd){
    case IPC_RMID:
    - freeary(lid);
    + freeary(semid);
    err = 0;
    break;
    case IPC_SET:
    @@ -664,27 +664,28 @@
    ipcp->mode = (ipcp->mode & ~S_IRWXUGO)
    | (tbuf.sem_perm.mode & S_IRWXUGO);
    sma->sem_ctime = CURRENT_TIME;
    + sem_unlock(semid);
    err = 0;
    break;
    default:
    + sem_unlock(semid);
    err = -EINVAL;
    + break;
    }
    + return err;

    out_unlock:
    - spin_unlock(&semary[lid].lock);
    + sem_unlock(semid);
    return err;
    }

    asmlinkage long sys_semctl (int semid, int semnum, int cmd, union semun arg)
    {
    - int lid; /* lock id */
    int err = -EINVAL;

    - if (semid < 0 || semnum < 0 || cmd < 0)
    + if (semid < 0)
    return -EINVAL;

    - lid = semid % SEMMNI;
    -
    switch(cmd) {
    case IPC_INFO:
    case SEM_INFO:
    @@ -692,25 +693,20 @@
    err = semctl_nolock(semid,semnum,cmd,arg);
    return err;
    case GETALL:
    - case IPC_STAT:
    - spin_lock(&semary[lid].lock);
    - err = semctl_locked_unlock(semid,semnum,cmd,arg);
    - return err;
    case GETVAL:
    case GETPID:
    case GETNCNT:
    case GETZCNT:
    + case IPC_STAT:
    case SETVAL:
    - spin_lock(&semary[lid].lock);
    - err= semctl_locked(semid,semnum,cmd,arg);
    - spin_unlock(&semary[lid].lock);
    - return err;
    case SETALL:
    + err = semctl_main(semid,semnum,cmd,arg);
    + return err;
    case IPC_RMID:
    case IPC_SET:
    - down(&sem_lock);
    - err= semctl_down(semid,semnum,cmd,arg);
    - up(&sem_lock);
    + down(&sem_ids.sem);
    + err = semctl_down(semid,semnum,cmd,arg);
    + up(&sem_ids.sem);
    return err;
    default:
    return -EINVAL;
    @@ -734,33 +730,27 @@
    return un->proc_next;
    }

    +/* returns without sem_lock on error! */
    static int alloc_undo(struct semid_ds *sma, struct sem_undo** unp, int semid, int alter)
    {
    - int size;
    + int size, nsems, error;
    struct sem_undo *un;
    - int error,id;
    - id = (unsigned int) semid % SEMMNI;
    - size = sizeof(struct sem_undo) + sizeof(short)*sma->sem_nsems;
    - spin_unlock(&semary[id].lock);
    +
    + nsems = sma->sem_nsems;
    + size = sizeof(struct sem_undo) + sizeof(short)*nsems;
    + sem_unlock(semid);

    un = (struct sem_undo *) kmalloc(size, GFP_KERNEL);
    - spin_lock(&semary[id].lock);
    - if (!un) {
    + if (!un)
    return -ENOMEM;
    - }
    - sma = semary[id].s;
    - error = -EIDRM;
    - if (sma == NULL)
    - goto out;
    - if (sma->sem_perm.seq != (unsigned int) semid / SEMMNI)
    - goto out;
    - if (size != sizeof(struct sem_undo) + sizeof(short)*sma->sem_nsems)
    - goto out;

    - error = -EACCES;
    - if (ipcperms(&sma->sem_perm, alter ? S_IWUGO : S_IRUGO))
    - goto out;
    memset(un, 0, size);
    + error = sem_revalidate(semid, sma, nsems, alter ? S_IWUGO : S_IRUGO);
    + if(error) {
    + kfree(un);
    + return error;
    + }
    +
    un->semadj = (short *) &un[1];
    un->semid = semid;
    un->proc_next = current->semundo;
    @@ -769,42 +759,42 @@
    sma->undo = un;
    *unp = un;
    return 0;
    -out:
    - kfree(un);
    - return error;
    }

    asmlinkage long sys_semop (int semid, struct sembuf *tsops, unsigned nsops)
    {
    - int id, error = -EINVAL;
    + int error = -EINVAL;
    struct semid_ds *sma;
    - struct sembuf sops[SEMOPM], *sop;
    + struct sembuf fast_sops[SEMOPM_FAST];
    + struct sembuf* sops = fast_sops, *sop;
    struct sem_undo *un;
    int undos = 0, decrease = 0, alter = 0;
    struct sem_queue queue;

    if (nsops < 1 || semid < 0)
    return -EINVAL;
    -
    - if (nsops > SEMOPM)
    + if (nsops > sc_semopm)
    return -E2BIG;
    - if (copy_from_user (sops, tsops, nsops * sizeof(*tsops)))
    - return -EFAULT;
    -
    - id = (unsigned int) semid % SEMMNI;
    - spin_lock(&semary[id].lock);
    - sma = semary[id].s;
    - error = -EINVAL;
    - if (sma == NULL)
    - goto out;
    + if(nsops > SEMOPM_FAST) {
    + sops = kmalloc(sizeof(*sops)*nsops,GFP_KERNEL);
    + if(sops==NULL)
    + return -ENOMEM;
    + }
    + if (copy_from_user (sops, tsops, nsops * sizeof(*tsops))) {
    + error=-EFAULT;
    + goto out_free;
    + }
    + sma = sem_lock(semid);
    + error=-EINVAL;
    + if(sma==NULL)
    + goto out_free;
    error = -EIDRM;
    - if (sma->sem_perm.seq != (unsigned int) semid / SEMMNI)
    - goto out;
    -
    + if (sem_checkid(sma,semid))
    + goto out_unlock_free;
    error = -EFBIG;
    for (sop = sops; sop < sops + nsops; sop++) {
    if (sop->sem_num >= sma->sem_nsems)
    - goto out;
    + goto out_unlock_free;
    if (sop->sem_flg & SEM_UNDO)
    undos++;
    if (sop->sem_op < 0)
    @@ -816,7 +806,7 @@

    error = -EACCES;
    if (ipcperms(&sma->sem_perm, alter ? S_IWUGO : S_IRUGO))
    - goto out;
    + goto out_unlock_free;
    if (undos) {
    /* Make sure we have an undo structure
    * for this process and this semaphore set.
    @@ -832,8 +822,8 @@
    }
    if (!un) {
    error = alloc_undo(sma,&un,semid,alter);
    - if(error<0)
    - goto out;
    + if(error)
    + goto out_free;
    }
    } else
    un = NULL;
    @@ -852,25 +842,30 @@
    queue.undo = un;
    queue.pid = current->pid;
    queue.alter = decrease;
    - queue.id = id;
    - current->semsleeping = &queue;
    + queue.id = semid;
    if (alter)
    append_to_queue(sma ,&queue);
    else
    prepend_to_queue(sma ,&queue);
    + current->semsleeping = &queue;

    for (;;) {
    + struct semid_ds* tmp;
    queue.status = -EINTR;
    queue.sleeper = current;
    current->state = TASK_INTERRUPTIBLE;
    - spin_unlock(&semary[id].lock);
    + sem_unlock(semid);

    schedule();

    - /* we can lock the semary even if it was
    - * deleted.
    - */
    - spin_lock(&semary[id].lock);
    + tmp = sem_lock(semid);
    + if(tmp==NULL) {
    + if(queue.status != -EIDRM)
    + BUG();
    + current->semsleeping = NULL;
    + error = -EIDRM;
    + goto out_free;
    + }
    /*
    * If queue.status == 1 we where woken up and
    * have to retry else we simply return.
    @@ -890,7 +885,7 @@
    break;
    /* Everything done by update_queue */
    current->semsleeping = NULL;
    - goto out;
    + goto out_unlock_free;
    }
    }
    current->semsleeping = NULL;
    @@ -898,8 +893,11 @@
    update:
    if (alter)
    update_queue (sma);
    -out:
    - spin_unlock(&semary[id].lock);
    +out_unlock_free:
    + sem_unlock(semid);
    +out_free:
    + if(sops != fast_sops)
    + kfree(sops);
    return error;
    }

    @@ -925,34 +923,32 @@
    /* If the current process was sleeping for a semaphore,
    * remove it from the queue.
    */
    - /* semsleeping is part of "current", and it
    - * is never modified by another thread.
    - * No synchronization required.
    - */
    if ((q = current->semsleeping)) {
    - spin_lock(&semary[current->semsleeping->id].lock);
    + int semid = q->id;
    + sma = sem_lock(semid);
    + current->semsleeping = NULL;

    - if (q->prev)
    + if (q->prev) {
    + if(sma==NULL)
    + BUG();
    remove_from_queue(q->sma,q);
    - current->semsleeping = NULL;
    - spin_unlock(&semary[current->semsleeping->id].lock);
    + }
    + if(sma!=NULL)
    + sem_unlock(semid);
    }

    for (up = &current->semundo; (u = *up); *up = u->proc_next, kfree(u)) {
    int semid = u->semid;
    - int lid;
    if(semid == -1)
    continue;
    - lid = semid % SEMMNI;
    - spin_lock(&semary[lid].lock);
    + sma = sem_lock(semid);
    + if (sma == NULL)
    + continue;

    if (u->semid == -1)
    goto next_entry;

    - sma = semary[lid].s;
    - if (sma == NULL)
    - goto next_entry;
    - if (sma->sem_perm.seq != (unsigned int) u->semid / SEMMNI)
    + if (sem_checkid(sma,u->semid))
    goto next_entry;

    /* remove u from the sma->undo list */
    @@ -977,7 +973,7 @@
    /* maybe some queued-up processes were waiting for this */
    update_queue(sma);
    next_entry:
    - spin_unlock(&semary[lid].lock);
    + sem_unlock(semid);
    }
    current->semundo = NULL;
    }
    @@ -990,35 +986,37 @@
    int i, len = 0;

    len += sprintf(buffer, " key semid perms nsems uid gid cuid cgid otime ctime\n");
    - down(&sem_lock);
    + down(&sem_ids.sem);

    - for(i = 0; i < SEMMNI; i++)
    - if(semary[i].s != NULL) {
    - spin_lock(&semary[i].lock);
    + for(i = 0; i <= sem_ids.max_id; i++) {
    + struct semid_ds *sma;
    + sma = sem_lock(i);
    + if(sma) {
    len += sprintf(buffer + len, "%10d %10d %4o %5u %5u %5u %5u %5u %10lu %10lu\n",
    - semary[i].s->sem_perm.key,
    - semary[i].s->sem_perm.seq * SEMMNI + i,
    - semary[i].s->sem_perm.mode,
    - semary[i].s->sem_nsems,
    - semary[i].s->sem_perm.uid,
    - semary[i].s->sem_perm.gid,
    - semary[i].s->sem_perm.cuid,
    - semary[i].s->sem_perm.cgid,
    - semary[i].s->sem_otime,
    - semary[i].s->sem_ctime);
    - spin_unlock(&semary[i].lock);
    + sma->sem_perm.key,
    + sem_buildid(i,sma->sem_perm.seq),
    + sma->sem_perm.mode,
    + sma->sem_nsems,
    + sma->sem_perm.uid,
    + sma->sem_perm.gid,
    + sma->sem_perm.cuid,
    + sma->sem_perm.cgid,
    + sma->sem_otime,
    + sma->sem_ctime);
    + sem_unlock(i);

    pos += len;
    if(pos < offset) {
    len = 0;
    - begin = pos;
    + begin = pos;
    }
    if(pos > offset + length)
    goto done;
    }
    + }
    *eof = 1;
    done:
    - up(&sem_lock);
    + up(&sem_ids.sem);
    *start = buffer + (offset - begin);
    len -= (offset - begin);
    if(len > length)
    --- 2.3/ipc/util.c Sun Nov 7 10:43:28 1999
    +++ build-2.3/ipc/util.c Sun Nov 7 10:56:17 1999
    @@ -6,6 +6,8 @@
    * get BSD style process accounting right.
    * Occurs in several places in the IPC code.
    * Chris Evans, <chris@ferret.lmh.ox.ac.uk>
    + * Nov 1999 - ipc helper functions, unified SMP locking
    + * Manfred Spraul <manfreds@colorfullife.com>
    */

    #include <linux/config.h>
    @@ -13,12 +15,13 @@
    #include <linux/shm.h>
    #include <linux/init.h>
    #include <linux/msg.h>
    -
    -#include "util.h"
    +#include <linux/smp_lock.h>
    +#include <linux/vmalloc.h>
    +#include <linux/malloc.h>

    #if defined(CONFIG_SYSVIPC)

    -extern void sem_init (void), msg_init (void), shm_init (void);
    +#include "util.h"

    void __init ipc_init (void)
    {
    @@ -26,6 +29,157 @@
    msg_init();
    shm_init();
    return;
    +}
    +
    +void __init ipc_init_ids(struct ipc_ids* ids, int size)
    +{
    + int i;
    + sema_init(&ids->sem,1);
    + ids->size = size;
    + if(size == 0)
    + return;
    + if(size > IPCMNI)
    + size = IPCMNI;
    +
    + ids->in_use = 0;
    + ids->max_id = -1;
    + ids->seq = 0;
    + {
    + int seq_limit = INT_MAX/SEQ_MULTIPLIER;
    + if(seq_limit > USHRT_MAX)
    + ids->seq_max = USHRT_MAX;
    + else
    + ids->seq_max = seq_limit;
    + }
    +
    + ids->entries = ipc_alloc(sizeof(struct ipc_id)*size);
    +
    + if(ids->entries == NULL) {
    + printk(KERN_ERR "ipc_init_ids() failed, ipc service disabled.\n");
    + ids->size = 0;
    + }
    + ids->ary = SPIN_LOCK_UNLOCKED;
    + for(i=0;i<size;i++) {
    + ids->entries[i].p = NULL;
    + }
    +}
    +
    +int ipc_findkey(struct ipc_ids* ids, key_t key)
    +{
    + int id;
    + struct ipc_perm* p;
    +
    + for (id = 0; id <= ids->max_id; id++) {
    + p = ids->entries[id].p;
    + if(p==NULL)
    + continue;
    + if (key == p->key)
    + return id;
    + }
    + return -1;
    +}
    +
    +static int grow_ary(struct ipc_ids* ids, int newsize)
    +{
    + struct ipc_id* new;
    + struct ipc_id* old;
    + int i;
    +
    + if(newsize > IPCMNI)
    + newsize = IPCMNI;
    + if(newsize <= ids->size)
    + return newsize;
    +
    + new = ipc_alloc(sizeof(struct ipc_id)*newsize);
    + if(new == NULL)
    + return ids->size;
    + memcpy(new, ids->entries, sizeof(struct ipc_id)*ids->size);
    + for(i=ids->size;i<newsize;i++) {
    + new[i].p = NULL;
    + }
    + spin_lock(&ids->ary);
    +
    + old = ids->entries;
    + ids->entries = new;
    + i = ids->size;
    + ids->size = newsize;
    + spin_unlock(&ids->ary);
    + ipc_free(old, sizeof(struct ipc_id)*i);
    + return ids->size;
    +}
    +
    +int ipc_addid(struct ipc_ids* ids, struct ipc_perm* new, int size)
    +{
    + int id;
    +
    + size = grow_ary(ids,size);
    + for (id = 0; id < size; id++) {
    + if(ids->entries[id].p == NULL)
    + goto found;
    + }
    + return -1;
    +found:
    + ids->in_use++;
    + if (id > ids->max_id)
    + ids->max_id = id;
    +
    + new->cuid = new->uid = current->euid;
    + new->gid = new->cgid = current->egid;
    +
    + new->seq = ids->seq++;
    + if(ids->seq > ids->seq_max)
    + ids->seq = 0;
    +
    + ipc_lock(ids,id);
    + ids->entries[id].p = new;
    + return id;
    +}
    +
    +struct ipc_perm* ipc_rmid(struct ipc_ids* ids, int id)
    +{
    + struct ipc_perm* p;
    + int lid = id % SEQ_MULTIPLIER;
    + if(lid > ids->size)
    + BUG();
    + p = ids->entries[lid].p;
    + ids->entries[lid].p = NULL;
    + if(p==NULL)
    + BUG();
    + ids->in_use--;
    +
    + if (lid == ids->max_id) {
    + do {
    + lid--;
    + if(lid == -1)
    + break;
    + } while (ids->entries[lid].p == NULL);
    + ids->max_id = lid;
    + }
    + return p;
    +}
    +
    +void* ipc_alloc(int size)
    +{
    + void* out;
    + if(size > PAGE_SIZE) {
    + lock_kernel();
    + out = vmalloc(size);
    + unlock_kernel();
    + } else {
    + out = kmalloc(size, GFP_KERNEL);
    + }
    + return out;
    +}
    +
    +void ipc_free(void* ptr, int size)
    +{
    + if(size > PAGE_SIZE) {
    + lock_kernel();
    + vfree(ptr);
    + unlock_kernel();
    + } else {
    + kfree(ptr);
    + }
    }

    /*
    --- 2.3/ipc/util.h Sun Nov 7 10:43:28 1999
    +++ build-2.3/ipc/util.h Sun Nov 7 10:50:11 1999
    @@ -1,12 +1,78 @@
    /*
    * linux/ipc/util.h
    * Copyright (C) 1999 Christoph Rohland
    + *
    + * ipc helper functions (c) 1999 Manfred Spraul <manfreds@colorfullife.com>
    */

    -/*
    - * IPCMNI is the absolute maximum for ipc identifier. This is used to
    - * detect stale identifiers
    +#define USHRT_MAX 0xffff
    +#define SEQ_MULTIPLIER (IPCMNI)
    +
    +void sem_init (void);
    +void msg_init (void);
    +void shm_init (void);
    +
    +struct ipc_ids {
    + int size;
    + int in_use;
    + int max_id;
    + unsigned short seq;
    + unsigned short seq_max;
    + struct semaphore sem;
    + spinlock_t ary;
    + struct ipc_id* entries;
    +};
    +
    +struct ipc_id {
    + struct ipc_perm* p;
    +};
    +
    +
    +void __init ipc_init_ids(struct ipc_ids* ids, int size);
    +
    +/* must be called with ids->sem acquired.*/
    +int ipc_findkey(struct ipc_ids* ids, key_t key);
    +int ipc_addid(struct ipc_ids* ids, struct ipc_perm* new, int size);
    +
    +/* must be called with both locks acquired. */
    +struct ipc_perm* ipc_rmid(struct ipc_ids* ids, int id);
    +
    +int ipcperms (struct ipc_perm *ipcp, short flg);
    +
    +/* for rare, potentially huge allocations.
    + * both function can sleep
    */
    -#define IPCMNI (1<<15)
    +void* ipc_alloc(int size);
    +void ipc_free(void* ptr, int size);
    +
    +extern inline struct ipc_perm* ipc_lock(struct ipc_ids* ids, int id)
    +{
    + struct ipc_perm* out;
    + int lid = id % SEQ_MULTIPLIER;
    + if(lid > ids->size)
    + return NULL;
    +
    + spin_lock(&ids->ary);
    + out = ids->entries[lid].p;
    + if(out==NULL)
    + spin_unlock(&ids->ary);
    + return out;
    +}
    +
    +extern inline void ipc_unlock(struct ipc_ids* ids, int id)
    +{
    + spin_unlock(&ids->ary);
    +}
    +
    +extern inline int ipc_buildid(struct ipc_ids* ids, int id, int seq)
    +{
    + return SEQ_MULTIPLIER*seq + id;
    +}
    +
    +extern inline int ipc_checkid(struct ipc_ids* ids, struct ipc_perm* ipcp, int uid)
    +{
    + if(uid/SEQ_MULTIPLIER != ipcp->seq)
    + return 1;
    + return 0;
    +}

    -extern int ipcperms (struct ipc_perm *ipcp, short shmflg);
    --- 2.3/ipc/msg.c Sun Nov 7 10:43:28 1999
    +++ build-2.3/ipc/msg.c Sun Nov 7 12:50:15 1999
    @@ -11,6 +11,7 @@
    * /proc/sysvipc/msg support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
    *
    * mostly rewritten, threaded and wake-one semantics added
    + * MSGMAX limit removed, sysctl's added
    * (c) 1999 Manfred Spraul <manfreds@colorfullife.com>
    */

    @@ -21,11 +22,15 @@
    #include <linux/init.h>
    #include <linux/proc_fs.h>
    #include <linux/list.h>
    -
    #include <asm/uaccess.h>
    +#include "util.h"
    +
    +/* sysctl: */
    +int msg_ctlmax = MSGMAX;
    +int msg_ctlmnb = MSGMNB;
    +int msg_ctlmni = MSGMNI;

    -#define USHRT_MAX 0xffff
    -/* one ms_receiver structure for each sleeping receiver */
    +/* one msg_receiver structure for each sleeping receiver */
    struct msg_receiver {
    struct list_head r_list;
    struct task_struct* r_tsk;
    @@ -37,14 +42,27 @@
    struct msg_msg* volatile r_msg;
    };

    +/* one msg_sender for each sleeping sender */
    +struct msg_sender {
    + struct list_head list;
    + struct task_struct* tsk;
    +};
    +
    +struct msg_msgseg {
    + struct msg_msgseg* next;
    + /* the next part of the message follows immediately */
    +};
    /* one msg_msg structure for each message */
    struct msg_msg {
    struct list_head m_list;
    long m_type;
    int m_ts; /* message text size */
    + struct msg_msgseg* next;
    /* the actual message follows immediately */
    };

    +#define DATALEN_MSG (PAGE_SIZE-sizeof(struct msg_msg))
    +#define DATALEN_SEG (PAGE_SIZE-sizeof(struct msg_msgseg))

    /* one msq_queue structure for each present queue on the system */
    struct msg_queue {
    @@ -60,13 +78,7 @@

    struct list_head q_messages;
    struct list_head q_receivers;
    - wait_queue_head_t q_rwait;
    -};
    -
    -/* one msq_array structure for each possible queue on the system */
    -struct msg_array {
    - spinlock_t lock;
    - struct msg_queue* q;
    + struct list_head q_senders;
    };

    #define SEARCH_ANY 1
    @@ -74,99 +86,181 @@
    #define SEARCH_NOTEQUAL 3
    #define SEARCH_LESSEQUAL 4

    -static DECLARE_MUTEX(msg_lock);
    -static struct msg_array msg_que[MSGMNI];
    -
    -static unsigned short msg_seq = 0;
    -static int msg_used_queues = 0;
    -static int msg_max_id = -1;
    -
    static atomic_t msg_bytes = ATOMIC_INIT(0);
    static atomic_t msg_hdrs = ATOMIC_INIT(0);

    +static struct ipc_ids msg_ids;
    +
    +#define msg_lock(id) ((struct msg_queue*)ipc_lock(&msg_ids,id))
    +#define msg_unlock(id) ipc_unlock(&msg_ids,id)
    +#define msg_rmid(id) ((struct msg_queue*)ipc_rmid(&msg_ids,id))
    +#define msg_checkid(msq, msgid) \
    + ipc_checkid(&msg_ids,&msq->q_perm,msgid)
    +#define msg_buildid(id, seq) \
    + ipc_buildid(&msg_ids, id, seq)
    +
    static void freeque (int id);
    static int newque (key_t key, int msgflg);
    -static int findkey (key_t key);
    #ifdef CONFIG_PROC_FS
    static int sysvipc_msg_read_proc(char *buffer, char **start, off_t offset, int length, int *eof, void *data);
    #endif

    -/* implemented in ipc/util.c, thread-safe */
    -extern int ipcperms (struct ipc_perm *ipcp, short msgflg);
    -
    void __init msg_init (void)
    {
    - int id;
    + ipc_init_ids(&msg_ids,msg_ctlmni);

    - for (id = 0; id < MSGMNI; id++) {
    - msg_que[id].lock = SPIN_LOCK_UNLOCKED;
    - msg_que[id].q = NULL;
    - }
    #ifdef CONFIG_PROC_FS
    create_proc_read_entry("sysvipc/msg", 0, 0, sysvipc_msg_read_proc, NULL);
    #endif
    }

    -static int findkey (key_t key)
    -{
    - int id;
    - struct msg_queue *msq;
    -
    - for (id = 0; id <= msg_max_id; id++) {
    - msq = msg_que[id].q;
    - if(msq == NULL)
    - continue;
    - if (key == msq->q_perm.key)
    - return id;
    - }
    - return -1;
    -}
    -
    static int newque (key_t key, int msgflg)
    {
    int id;
    struct msg_queue *msq;
    - struct ipc_perm *ipcp;
    -
    - for (id = 0; id < MSGMNI; id++) {
    - if (msg_que[id].q == NULL)
    - break;
    - }
    - if(id == MSGMNI)
    - return -ENOSPC;

    msq = (struct msg_queue *) kmalloc (sizeof (*msq), GFP_KERNEL);
    if (!msq)
    return -ENOMEM;
    -
    - ipcp = &msq->q_perm;
    - ipcp->mode = (msgflg & S_IRWXUGO);
    - ipcp->key = key;
    - ipcp->cuid = ipcp->uid = current->euid;
    - ipcp->gid = ipcp->cgid = current->egid;
    -
    - /* ipcp->seq*MSGMNI must be a positive integer.
    - * this limits MSGMNI to 32768
    - */
    - ipcp->seq = msg_seq++;
    + id = ipc_addid(&msg_ids, &msq->q_perm, msg_ctlmni);
    + if(id == -1) {
    + kfree(msq);
    + return -ENOSPC;
    + }
    + msq->q_perm.mode = (msgflg & S_IRWXUGO);
    + msq->q_perm.key = key;

    msq->q_stime = msq->q_rtime = 0;
    msq->q_ctime = CURRENT_TIME;
    msq->q_cbytes = msq->q_qnum = 0;
    - msq->q_qbytes = MSGMNB;
    + msq->q_qbytes = msg_ctlmnb;
    msq->q_lspid = msq->q_lrpid = 0;
    INIT_LIST_HEAD(&msq->q_messages);
    INIT_LIST_HEAD(&msq->q_receivers);
    - init_waitqueue_head(&msq->q_rwait);
    + INIT_LIST_HEAD(&msq->q_senders);
    + msg_unlock(id);
    +
    + return msg_buildid(id,msq->q_perm.seq);
    +}
    +
    +static void free_msg(struct msg_msg* msg)
    +{
    + struct msg_msgseg* seg;
    + seg = msg->next;
    + kfree(msg);
    + while(seg != NULL) {
    + struct msg_msgseg* tmp = seg->next;
    + kfree(seg);
    + seg = tmp;
    + }
    +}
    +
    +static struct msg_msg* load_msg(void* src, int len)
    +{
    + struct msg_msg* msg;
    + struct msg_msgseg** pseg;
    + int err;
    + int alen;
    +
    + alen = len;
    + if(alen > DATALEN_MSG)
    + alen = DATALEN_MSG;
    +
    + msg = (struct msg_msg *) kmalloc (sizeof(*msg) + alen, GFP_KERNEL);
    + if(msg==NULL)
    + return ERR_PTR(-ENOMEM);
    +
    + msg->next = NULL;
    +
    + if (copy_from_user(msg+1, src, alen)) {
    + err = -EFAULT;
    + goto out_err;
    + }
    +
    + len -= alen;
    + src = ((char*)src)+alen;
    + pseg = &msg->next;
    + while(len > 0) {
    + struct msg_msgseg* seg;
    + alen = len;
    + if(alen > DATALEN_SEG)
    + alen = DATALEN_SEG;
    + seg = (struct msg_msgseg *) kmalloc (sizeof(*seg) + alen, GFP_KERNEL);
    + if(seg==NULL) {
    + err=-ENOMEM;
    + goto out_err;
    + }
    + *pseg = seg;
    + seg->next = NULL;
    + if(copy_from_user (seg+1, src, alen)) {
    + err = -EFAULT;
    + goto out_err;
    + }
    + pseg = &seg->next;
    + len -= alen;
    + src = ((char*)src)+alen;
    + }
    + return msg;
    +
    +out_err:
    + free_msg(msg);
    + return ERR_PTR(err);
    +}
    +
    +static int store_msg(void* dest, struct msg_msg* msg, int len)
    +{
    + int alen;
    + struct msg_msgseg *seg;
    +
    + alen = len;
    + if(alen > DATALEN_MSG)
    + alen = DATALEN_MSG;
    + if(copy_to_user (dest, msg+1, alen))
    + return -1;
    +
    + len -= alen;
    + dest = ((char*)dest)+alen;
    + seg = msg->next;
    + while(len > 0) {
    + alen = len;
    + if(alen > DATALEN_SEG)
    + alen = DATALEN_SEG;
    + if(copy_to_user (dest, seg+1, alen))
    + return -1;
    + len -= alen;
    + dest = ((char*)dest)+alen;
    + seg=seg->next;
    + }
    + return 0;
    +}
    +
    +static inline void ss_add(struct msg_queue* msq, struct msg_sender* mss)
    +{
    + mss->tsk=current;
    + current->state=TASK_INTERRUPTIBLE;
    + list_add_tail(&mss->list,&msq->q_senders);
    +}
    +
    +static inline void ss_del(struct msg_sender* mss)
    +{
    + if(mss->list.next != NULL)
    + list_del(&mss->list);
    +}

    - if (id > msg_max_id)
    - msg_max_id = id;
    - spin_lock(&msg_que[id].lock);
    - msg_que[id].q = msq;
    - spin_unlock(&msg_que[id].lock);
    - msg_used_queues++;
    +static void ss_wakeup(struct list_head* h, int kill)
    +{
    + struct list_head *tmp;

    - return (int)msq->q_perm.seq * MSGMNI + id;
    + tmp = h->next;
    + while (tmp != h) {
    + struct msg_sender* mss;
    +
    + mss = list_entry(tmp,struct msg_sender,list);
    + tmp = tmp->next;
    + if(kill)
    + mss->list.next=NULL;
    + wake_up_process(mss->tsk);
    + }
    }

    static void expunge_all(struct msg_queue* msq, int res)
    @@ -189,48 +283,32 @@
    struct msg_queue *msq;
    struct list_head *tmp;

    - msq=msg_que[id].q;
    - msg_que[id].q = NULL;
    - if (id == msg_max_id) {
    - while ((msg_que[msg_max_id].q == NULL)) {
    - if(msg_max_id--== 0)
    - break;
    - }
    - }
    - msg_used_queues--;
    + msq = msg_rmid(id);

    expunge_all(msq,-EIDRM);
    -
    - while(waitqueue_active(&msq->q_rwait)) {
    - wake_up(&msq->q_rwait);
    - spin_unlock(&msg_que[id].lock);
    - current->policy |= SCHED_YIELD;
    - schedule();
    - spin_lock(&msg_que[id].lock);
    - }
    - spin_unlock(&msg_que[id].lock);
    + ss_wakeup(&msq->q_senders,1);
    + msg_unlock(id);

    tmp = msq->q_messages.next;
    while(tmp != &msq->q_messages) {
    struct msg_msg* msg = list_entry(tmp,struct msg_msg,m_list);
    tmp = tmp->next;
    atomic_dec(&msg_hdrs);
    - kfree(msg);
    + free_msg(msg);
    }
    atomic_sub(msq->q_cbytes, &msg_bytes);
    kfree(msq);
    }

    -
    asmlinkage long sys_msgget (key_t key, int msgflg)
    {
    int id, ret = -EPERM;
    struct msg_queue *msq;

    - down(&msg_lock);
    + down(&msg_ids.sem);
    if (key == IPC_PRIVATE)
    ret = newque(key, msgflg);
    - else if ((id = findkey (key)) == -1) { /* key not used */
    + else if ((id = ipc_findkey(&msg_ids, key)) == -1) { /* key not used */
    if (!(msgflg & IPC_CREAT))
    ret = -ENOENT;
    else
    @@ -238,55 +316,62 @@
    } else if (msgflg & IPC_CREAT && msgflg & IPC_EXCL) {
    ret = -EEXIST;
    } else {
    - msq = msg_que[id].q;
    + msq = msg_lock(id);
    + if(msq==NULL)
    + BUG();
    if (ipcperms(&msq->q_perm, msgflg))
    ret = -EACCES;
    else
    - ret = (unsigned int) msq->q_perm.seq * MSGMNI + id;
    + ret = msg_buildid(id, msq->q_perm.seq);
    + msg_unlock(id);
    }
    - up(&msg_lock);
    + up(&msg_ids.sem);
    return ret;
    }

    asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds *buf)
    {
    - int id, err;
    + int err;
    struct msg_queue *msq;
    struct msqid_ds tbuf;
    struct ipc_perm *ipcp;

    if (msqid < 0 || cmd < 0)
    return -EINVAL;
    - id = msqid % MSGMNI;
    +
    switch (cmd) {
    case IPC_INFO:
    case MSG_INFO:
    {
    struct msginfo msginfo;
    + int max_id;
    if (!buf)
    return -EFAULT;
    /* We must not return kernel stack data.
    - * due to variable alignment, it's not enough
    + * due to padding, it's not enough
    * to set all member fields.
    */
    memset(&msginfo,0,sizeof(msginfo));
    - msginfo.msgmni = MSGMNI;
    - msginfo.msgmax = MSGMAX;
    - msginfo.msgmnb = MSGMNB;
    - msginfo.msgmap = MSGMAP;
    - msginfo.msgpool = MSGPOOL;
    - msginfo.msgtql = MSGTQL;
    + msginfo.msgmni = msg_ctlmni;
    + msginfo.msgmax = msg_ctlmax;
    + msginfo.msgmnb = msg_ctlmnb;
    msginfo.msgssz = MSGSSZ;
    msginfo.msgseg = MSGSEG;
    + down(&msg_ids.sem);
    if (cmd == MSG_INFO) {
    - msginfo.msgpool = msg_used_queues;
    + msginfo.msgpool = msg_ids.in_use;
    msginfo.msgmap = atomic_read(&msg_hdrs);
    msginfo.msgtql = atomic_read(&msg_bytes);
    + } else {
    + msginfo.msgmap = MSGMAP;
    + msginfo.msgpool = MSGPOOL;
    + msginfo.msgtql = MSGTQL;
    }
    -
    + max_id = msg_ids.max_id;
    + up(&msg_ids.sem);
    if (copy_to_user (buf, &msginfo, sizeof(struct msginfo)))
    return -EFAULT;
    - return (msg_max_id < 0) ? 0: msg_max_id;
    + return (max_id < 0) ? 0: max_id;
    }
    case MSG_STAT:
    case IPC_STAT:
    @@ -294,19 +379,18 @@
    int success_return;
    if (!buf)
    return -EFAULT;
    - if(cmd == MSG_STAT && msqid > MSGMNI)
    + if(cmd == MSG_STAT && msqid > msg_ids.size)
    return -EINVAL;

    - spin_lock(&msg_que[id].lock);
    - msq = msg_que[id].q;
    - err = -EINVAL;
    + msq = msg_lock(msqid);
    if (msq == NULL)
    - goto out_unlock;
    + return -EINVAL;
    +
    if(cmd == MSG_STAT) {
    - success_return = (unsigned int) msq->q_perm.seq * MSGMNI + msqid;
    + success_return = msg_buildid(msqid, msq->q_perm.seq);
    } else {
    err = -EIDRM;
    - if (msq->q_perm.seq != (unsigned int) msqid / MSGMNI)
    + if (msg_checkid(msq,msqid))
    goto out_unlock;
    success_return = 0;
    }
    @@ -339,7 +423,7 @@

    tbuf.msg_lspid = msq->q_lspid;
    tbuf.msg_lrpid = msq->q_lrpid;
    - spin_unlock(&msg_que[id].lock);
    + msg_unlock(msqid);
    if (copy_to_user (buf, &tbuf, sizeof(*buf)))
    return -EFAULT;
    return success_return;
    @@ -356,32 +440,31 @@
    return -EINVAL;
    }

    - down(&msg_lock);
    - spin_lock(&msg_que[id].lock);
    - msq = msg_que[id].q;
    - err = -EINVAL;
    + down(&msg_ids.sem);
    + msq = msg_lock(msqid);
    + err=-EINVAL;
    if (msq == NULL)
    - goto out_unlock_up;
    + goto out_up;
    +
    err = -EIDRM;
    - if (msq->q_perm.seq != (unsigned int) msqid / MSGMNI)
    + if (msg_checkid(msq,msqid))
    goto out_unlock_up;
    ipcp = &msq->q_perm;
    + err = -EPERM;
    + if (current->euid != ipcp->cuid &&
    + current->euid != ipcp->uid && !capable(CAP_SYS_ADMIN))
    + /* We _could_ check for CAP_CHOWN above, but we don't */
    + goto out_unlock_up;

    switch (cmd) {
    case IPC_SET:
    {
    int newqbytes;
    - err = -EPERM;
    - if (current->euid != ipcp->cuid &&
    - current->euid != ipcp->uid && !capable(CAP_SYS_ADMIN))
    - /* We _could_ check for CAP_CHOWN above, but we don't */
    - goto out_unlock_up;
    -
    if(tbuf.msg_qbytes == 0)
    newqbytes = tbuf.msg_lqbytes;
    else
    newqbytes = tbuf.msg_qbytes;
    - if (newqbytes > MSGMNB && !capable(CAP_SYS_RESOURCE))
    + if (newqbytes > msg_ctlmnb && !capable(CAP_SYS_RESOURCE))
    goto out_unlock_up;
    msq->q_qbytes = newqbytes;

    @@ -397,27 +480,23 @@
    /* sleeping senders might be able to send
    * due to a larger queue size.
    */
    - wake_up(&msq->q_rwait);
    - spin_unlock(&msg_que[id].lock);
    + ss_wakeup(&msq->q_senders,0);
    + msg_unlock(msqid);
    break;
    }
    case IPC_RMID:
    - err = -EPERM;
    - if (current->euid != ipcp->cuid &&
    - current->euid != ipcp->uid && !capable(CAP_SYS_ADMIN))
    - goto out_unlock;
    - freeque (id);
    + freeque (msqid);
    break;
    }
    err = 0;
    out_up:
    - up(&msg_lock);
    + up(&msg_ids.sem);
    return err;
    out_unlock_up:
    - spin_unlock(&msg_que[id].lock);
    + msg_unlock(msqid);
    goto out_up;
    out_unlock:
    - spin_unlock(&msg_que[id].lock);
    + msg_unlock(msqid);
    return err;
    }

    @@ -471,67 +550,61 @@

    asmlinkage long sys_msgsnd (int msqid, struct msgbuf *msgp, size_t msgsz, int msgflg)
    {
    - int id;
    struct msg_queue *msq;
    struct msg_msg *msg;
    long mtype;
    int err;

    - if (msgsz > MSGMAX || (long) msgsz < 0 || msqid < 0)
    + if (msgsz > msg_ctlmax || (long) msgsz < 0 || msqid < 0)
    return -EINVAL;
    if (get_user(mtype, &msgp->mtype))
    return -EFAULT;
    if (mtype < 1)
    return -EINVAL;

    - msg = (struct msg_msg *) kmalloc (sizeof(*msg) + msgsz, GFP_KERNEL);
    - if(msg==NULL)
    - return -ENOMEM;
    + msg = load_msg(msgp->mtext, msgsz);
    + if(IS_ERR(msg))
    + return PTR_ERR(msg);

    - if (copy_from_user(msg+1, msgp->mtext, msgsz)) {
    - kfree(msg);
    - return -EFAULT;
    - }
    msg->m_type = mtype;
    msg->m_ts = msgsz;

    - id = (unsigned int) msqid % MSGMNI;
    - spin_lock(&msg_que[id].lock);
    - err= -EINVAL;
    -retry:
    - msq = msg_que[id].q;
    - if (msq == NULL)
    + msq = msg_lock(msqid);
    + err=-EINVAL;
    + if(msq==NULL)
    goto out_free;
    -
    +retry:
    err= -EIDRM;
    - if (msq->q_perm.seq != (unsigned int) msqid / MSGMNI)
    - goto out_free;
    + if (msg_checkid(msq,msqid))
    + goto out_unlock_free;

    err=-EACCES;
    if (ipcperms(&msq->q_perm, S_IWUGO))
    - goto out_free;
    + goto out_unlock_free;

    - if(msgsz + msq->q_cbytes > msq->q_qbytes) {
    - DECLARE_WAITQUEUE(wait,current);
    + if(msgsz + msq->q_cbytes > msq->q_qbytes ||
    + 1 + msq->q_qnum > msq->q_qbytes) {
    + struct msg_sender s;

    if(msgflg&IPC_NOWAIT) {
    err=-EAGAIN;
    - goto out_free;
    + goto out_unlock_free;
    }
    - current->state = TASK_INTERRUPTIBLE;
    - add_wait_queue(&msq->q_rwait,&wait);
    - spin_unlock(&msg_que[id].lock);
    + ss_add(msq, &s);
    + msg_unlock(msqid);
    schedule();
    current->state= TASK_RUNNING;
    +
    + msq = msg_lock(msqid);
    + err = -EIDRM;
    + if(msq==NULL)
    + goto out_free;
    + ss_del(&s);

    - remove_wait_queue(&msq->q_rwait,&wait);
    if (signal_pending(current)) {
    - kfree(msg);
    - return -EINTR;
    + err=-EINTR;
    + goto out_unlock_free;
    }
    -
    - spin_lock(&msg_que[id].lock);
    - err = -EIDRM;
    goto retry;
    }

    @@ -549,10 +622,11 @@
    msq->q_lspid = current->pid;
    msq->q_stime = CURRENT_TIME;

    +out_unlock_free:
    + msg_unlock(msqid);
    out_free:
    if(msg!=NULL)
    - kfree(msg);
    - spin_unlock(&msg_que[id].lock);
    + free_msg(msg);
    return err;
    }

    @@ -582,7 +656,6 @@
    struct msg_receiver msr_d;
    struct list_head* tmp;
    struct msg_msg* msg, *found_msg;
    - int id;
    int err;
    int mode;

    @@ -590,13 +663,10 @@
    return -EINVAL;
    mode = convert_mode(&msgtyp,msgflg);

    - id = (unsigned int) msqid % MSGMNI;
    - spin_lock(&msg_que[id].lock);
    + msq = msg_lock(msqid);
    + if(msq==NULL)
    + return -EINVAL;
    retry:
    - msq = msg_que[id].q;
    - err=-EINVAL;
    - if (msq == NULL)
    - goto out_unlock;
    err=-EACCES;
    if (ipcperms (&msq->q_perm, S_IRUGO))
    goto out_unlock;
    @@ -630,21 +700,19 @@
    msq->q_cbytes -= msg->m_ts;
    atomic_sub(msg->m_ts,&msg_bytes);
    atomic_dec(&msg_hdrs);
    - if(waitqueue_active(&msq->q_rwait))
    - wake_up(&msq->q_rwait);
    -out_success_unlock:
    - spin_unlock(&msg_que[id].lock);
    + ss_wakeup(&msq->q_senders,0);
    + msg_unlock(msqid);
    out_success:
    msgsz = (msgsz > msg->m_ts) ? msg->m_ts : msgsz;
    if (put_user (msg->m_type, &msgp->mtype) ||
    - copy_to_user (msgp->mtext, msg+1, msgsz))
    - {
    + store_msg(msgp->mtext, msg, msgsz)) {
    msgsz = -EFAULT;
    }
    - kfree(msg);
    + free_msg(msg);
    return msgsz;
    } else
    {
    + struct msg_queue *t;
    /* no message waiting. Prepare for pipelined
    * receive.
    */
    @@ -657,12 +725,13 @@
    msr_d.r_msgtype = msgtyp;
    msr_d.r_mode = mode;
    if(msgflg & MSG_NOERROR)
    - msr_d.r_maxsize = MSGMAX;
    + msr_d.r_maxsize = INT_MAX;
    else
    msr_d.r_maxsize = msgsz;
    msr_d.r_msg = ERR_PTR(-EAGAIN);
    current->state = TASK_INTERRUPTIBLE;
    - spin_unlock(&msg_que[id].lock);
    + msg_unlock(msqid);
    +
    schedule();
    current->state = TASK_RUNNING;

    @@ -670,16 +739,22 @@
    if(!IS_ERR(msg))
    goto out_success;

    - spin_lock(&msg_que[id].lock);
    + t = msg_lock(msqid);
    + if(t==NULL)
    + msqid=-1;
    msg = (struct msg_msg*)msr_d.r_msg;
    if(!IS_ERR(msg)) {
    /* our message arived while we waited for
    * the spinlock. Process it.
    */
    - goto out_success_unlock;
    + if(msqid!=-1)
    + msg_unlock(msqid);
    + goto out_success;
    }
    err = PTR_ERR(msg);
    if(err == -EAGAIN) {
    + if(msqid==-1)
    + BUG();
    list_del(&msr_d.r_list);
    if (signal_pending(current))
    err=-EINTR;
    @@ -688,7 +763,8 @@
    }
    }
    out_unlock:
    - spin_unlock(&msg_que[id].lock);
    + if(msqid!=-1)
    + msg_unlock(msqid);
    return err;
    }

    @@ -699,28 +775,29 @@
    off_t begin = 0;
    int i, len = 0;

    - down(&msg_lock);
    + down(&msg_ids.sem);
    len += sprintf(buffer, " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n");

    - for(i = 0; i <= msg_max_id; i++) {
    - spin_lock(&msg_que[i].lock);
    - if(msg_que[i].q != NULL) {
    + for(i = 0; i <= msg_ids.max_id; i++) {
    + struct msg_queue * msq;
    + msq = msg_lock(i);
    + if(msq != NULL) {
    len += sprintf(buffer + len, "%10d %10d %4o %5u %5u %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n",
    - msg_que[i].q->q_perm.key,
    - msg_que[i].q->q_perm.seq * MSGMNI + i,
    - msg_que[i].q->q_perm.mode,
    - msg_que[i].q->q_cbytes,
    - msg_que[i].q->q_qnum,
    - msg_que[i].q->q_lspid,
    - msg_que[i].q->q_lrpid,
    - msg_que[i].q->q_perm.uid,
    - msg_que[i].q->q_perm.gid,
    - msg_que[i].q->q_perm.cuid,
    - msg_que[i].q->q_perm.cgid,
    - msg_que[i].q->q_stime,
    - msg_que[i].q->q_rtime,
    - msg_que[i].q->q_ctime);
    - spin_unlock(&msg_que[i].lock);
    + msq->q_perm.key,
    + msg_buildid(i,msq->q_perm.seq),
    + msq->q_perm.mode,
    + msq->q_cbytes,
    + msq->q_qnum,
    + msq->q_lspid,
    + msq->q_lrpid,
    + msq->q_perm.uid,
    + msq->q_perm.gid,
    + msq->q_perm.cuid,
    + msq->q_perm.cgid,
    + msq->q_stime,
    + msq->q_rtime,
    + msq->q_ctime);
    + msg_unlock(i);

    pos += len;
    if(pos < offset) {
    @@ -729,13 +806,12 @@
    }
    if(pos > offset + length)
    goto done;
    - } else {
    - spin_unlock(&msg_que[i].lock);
    }
    +
    }
    *eof = 1;
    done:
    - up(&msg_lock);
    + up(&msg_ids.sem);
    *start = buffer + (offset - begin);
    len -= (offset - begin);
    if(len > length)
    @@ -745,4 +821,3 @@
    return len;
    }
    #endif
    -
    --- 2.3/include/linux/sem.h Sun Nov 7 10:43:28 1999
    +++ build-2.3/include/linux/sem.h Sun Nov 7 10:21:49 1999
    @@ -60,10 +60,10 @@
    int semaem;
    };

    -#define SEMMNI 128 /* <= 32767 max # of semaphore identifiers */
    -#define SEMMSL 250 /* <= 512 max num of semaphores per id */
    -#define SEMMNS (SEMMNI*SEMMSL) /* <= MAX_INT max # of semaphores in system */
    -#define SEMOPM 32 /* <= 160 max num of ops per semop call */
    +#define SEMMNI 128 /* <= IPCMNI max # of semaphore identifiers */
    +#define SEMMSL 250 /* <= 60 000 max num of semaphores per id */
    +#define SEMMNS (SEMMNI*SEMMSL) /* <= INT_MAX max # of semaphores in system */
    +#define SEMOPM 32 /* <= 20 000 max num of ops per semop call */
    #define SEMVMX 32767 /* <= 32767 semaphore maximum value */

    /* unused */
    --- 2.3/include/linux/msg.h Sun Nov 7 10:43:28 1999
    +++ build-2.3/include/linux/msg.h Sun Nov 7 10:21:49 1999
    @@ -45,9 +45,9 @@
    unsigned short msgseg;
    };

    -#define MSGMNI 128 /* <= 32768 */ /* max # of msg queue identifiers */
    -#define MSGMAX 4056 /* <= 4056 (?)*/ /* max size of message (bytes) */
    -#define MSGMNB 16384 /* <= MAX_INT */ /* default max size of a message queue */
    +#define MSGMNI 128 /* <= IPCMNI */ /* max # of msg queue identifiers */
    +#define MSGMAX 8192 /* <= INT_MAX */ /* max size of message (bytes) */
    +#define MSGMNB 16384 /* <= INT_MAX */ /* default max size of a message queue */

    /* unused */
    #define MSGPOOL (MSGMNI*MSGMNB/1024) /* size in kilobytes of message pool */
    --- 2.3/include/linux/ipc.h Sun Nov 7 10:43:28 1999
    +++ build-2.3/include/linux/ipc.h Sun Nov 7 10:21:49 1999
    @@ -42,6 +42,8 @@
    #define IPC_UNUSED ((void *) -1)
    #define IPC_NOID ((void *) -2) /* being allocated/destroyed */

    +#define IPCMNI 32768 /* <= MAX_INT limit for ipc arrays (including sysctl changes) */
    +
    #endif /* __KERNEL__ */

    #endif /* _LINUX_IPC_H */
    --- 2.3/include/linux/sysctl.h Sun Nov 7 10:43:28 1999
    +++ build-2.3/include/linux/sysctl.h Sun Nov 7 10:21:49 1999
    @@ -103,7 +103,9 @@
    KERN_MSGPOOL=37, /* int: Maximum system message pool size */
    KERN_SYSRQ=38, /* int: Sysreq enable */
    KERN_MAX_THREADS=39, /* int: Maximum nr of threads in the system */
    - KERN_RANDOM=40 /* Random driver */
    + KERN_RANDOM=40, /* Random driver */
    + KERN_MSGMNI=41, /* int: msg queue identifiers */
    + KERN_SEM=42 /* int: sysv semaphore limits */
    };


    --- 2.3/kernel/sysctl.c Sun Nov 7 10:43:28 1999
    +++ build-2.3/kernel/sysctl.c Sun Nov 7 10:21:49 1999
    @@ -50,6 +50,10 @@
    #endif
    #ifdef CONFIG_SYSVIPC
    extern size_t shm_prm[];
    +extern int msg_ctlmax;
    +extern int msg_ctlmnb;
    +extern int msg_ctlmni;
    +extern int sem_ctls[];
    #endif

    #ifdef __sparc__
    @@ -215,6 +219,14 @@
    #ifdef CONFIG_SYSVIPC
    {KERN_SHMMAX, "shmmax", &shm_prm, 3*sizeof (size_t),
    0644, NULL, &proc_doulongvec_minmax},
    + {KERN_MSGMAX, "msgmax", &msg_ctlmax, sizeof (int),
    + 0644, NULL, &proc_dointvec},
    + {KERN_MSGMNI, "msgmni", &msg_ctlmni, sizeof (int),
    + 0644, NULL, &proc_dointvec},
    + {KERN_MSGMNB, "msgmnb", &msg_ctlmnb, sizeof (int),
    + 0644, NULL, &proc_dointvec},
    + {KERN_SEM, "sem", &sem_ctls, 4*sizeof (int),
    + 0644, NULL, &proc_dointvec},
    #endif
    #ifdef CONFIG_MAGIC_SYSRQ
    {KERN_SYSRQ, "sysrq", &sysrq_enabled, sizeof (int),





    -
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.rutgers.edu
    Please read the FAQ at http://www.tux.org/lkml/

    \
     
     \ /
      Last update: 2005-03-22 13:54    [W:0.131 / U:0.636 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site