lkml.org 
[lkml]   [1999]   [Feb]   [23]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patches in this message
/
Date
From
Subject[patches] patch-2.2.2-nonx86, patch-2.2.2-filter
	As promised, here are some patches to allow 2.2.2 from the Linus 
tree to build cleanly on the Alpha platform and kludges a fix for the
filter problems. Some of the new timer code (in sched.c) did not jive
well with my system, and caused it to hang while initializing a 2940UW.
There are several patches to fix the new filter code floating
around, so I made the ksyms.c and sched.c patches separate.
Here's the deal: I am going to start work on a similar axp
patch for 2.2.2ac1. I would like some of the other people with
non-x86 systems (m68k, ppc, whatever) to give patch-2.2.2-nonx86
a whirl on their systems and report back whether it works for them
as well or not.
As always, the two included patches have been tested on
my Alpha: 21164a (EV56), egcs 1.1.1 compiler, glibc-2.07, etc.
I am running a patched 2.2.2 kernel, and everything is smooth so far.

Thanks,
Daniel
--
--- pristine/kernel/ksyms.c Tue Feb 23 18:45:45 1999
+++ linux-2.2.2-axp/kernel/ksyms.c Tue Feb 23 20:21:28 1999
@@ -322,8 +322,6 @@
EXPORT_SYMBOL(sprintf);
EXPORT_SYMBOL(vsprintf);
EXPORT_SYMBOL(kdevname);
-EXPORT_SYMBOL(bdevname);
-EXPORT_SYMBOL(cdevname);
EXPORT_SYMBOL(simple_strtoul);
EXPORT_SYMBOL(system_utsname); /* UTS data */
EXPORT_SYMBOL(uts_sem); /* UTS semaphore */
@@ -372,7 +370,6 @@
EXPORT_SYMBOL(event);
EXPORT_SYMBOL(__down);
EXPORT_SYMBOL(__down_interruptible);
-EXPORT_SYMBOL(__down_trylock);
EXPORT_SYMBOL(__up);
EXPORT_SYMBOL(brw_page);

--- pristine/kernel/sched.c Tue Feb 23 18:45:45 1999
+++ linux-2.2.2-axp/kernel/sched.c Tue Feb 23 20:16:35 1999
@@ -36,7 +36,6 @@
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/mmu_context.h>
-#include <asm/semaphore-helper.h>

#include <linux/timex.h>

@@ -464,17 +463,8 @@
unsigned long flags;

spin_lock_irqsave(&timerlist_lock, flags);
- if (timer->prev)
- goto bug;
internal_add_timer(timer);
-out:
spin_unlock_irqrestore(&timerlist_lock, flags);
- return;
-
-bug:
- printk("bug: kernel timer added twice at %p.\n",
- __builtin_return_address(0));
- goto out;
}

static inline int detach_timer(struct timer_list *timer)
@@ -873,28 +863,30 @@
struct task_struct *tsk = current; \
struct wait_queue wait = { tsk, NULL };

-#define DOWN_HEAD(task_state) \
- \
- \
- tsk->state = (task_state); \
- add_wait_queue(&sem->wait, &wait); \
- \
- /* \
- * Ok, we're set up. sem->count is known to be less than zero \
- * so we must wait. \
- * \
- * We can let go the lock for purposes of waiting. \
- * We re-acquire it after awaking so as to protect \
- * all semaphore operations. \
- * \
- * If "up()" is called before we call waking_non_zero() then \
- * we will catch it right away. If it is called later then \
- * we will have to go through a wakeup cycle to catch it. \
- * \
- * Multiple waiters contend for the semaphore lock to see \
- * who gets to gate through and who has to wait some more. \
- */ \
- for (;;) {
+#define DOWN_HEAD(task_state) \
+ \
+ \
+ tsk->state = (task_state); \
+ add_wait_queue(&sem->wait, &wait); \
+ \
+ /* \
+ * Ok, we're set up. sem->count is known to be less than zero \
+ * so we must wait. \
+ * \
+ * We can let go the lock for purposes of waiting. \
+ * We re-acquire it after awaking so as to protect \
+ * all semaphore operations. \
+ * \
+ * If "up()" is called before we call waking_non_zero() then \
+ * we will catch it right away. If it is called later then \
+ * we will have to go through a wakeup cycle to catch it. \
+ * \
+ * Multiple waiters contend for the semaphore lock to see \
+ * who gets to gate through and who has to wait some more. \
+ */ \
+ for (;;) { \
+ if (waking_non_zero(sem, tsk)) /* are we waking up? */ \
+ break; /* yes, exit loop */

#define DOWN_TAIL(task_state) \
tsk->state = (task_state); \
@@ -906,8 +898,6 @@
{
DOWN_VAR
DOWN_HEAD(TASK_UNINTERRUPTIBLE)
- if (waking_non_zero(sem))
- break;
schedule();
DOWN_TAIL(TASK_UNINTERRUPTIBLE)
}
@@ -917,23 +907,15 @@
DOWN_VAR
int ret = 0;
DOWN_HEAD(TASK_INTERRUPTIBLE)
-
- ret = waking_non_zero_interruptible(sem, tsk);
- if (ret)
+ if (signal_pending(tsk))
{
- if (ret == 1)
- /* ret != 0 only if we get interrupted -arca */
- ret = 0;
+ ret = -EINTR; /* interrupted */
+ atomic_inc(&sem->count); /* give up on down operation */
break;
}
schedule();
DOWN_TAIL(TASK_INTERRUPTIBLE)
return ret;
-}
-
-int __down_trylock(struct semaphore * sem)
-{
- return waking_non_zero_trylock(sem);
}

#define SLEEP_ON_VAR \--- pristine/net/core/filter.c Tue Feb 23 18:45:45 1999
+++ linux-2.2.2-axp/net/core/filter.c Tue Feb 23 20:16:35 1999
@@ -11,8 +11,6 @@
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
- *
- * Andi Kleen - Fix a few bad bugs and races.
*/

#include <linux/config.h>
@@ -38,22 +36,6 @@
#include <asm/uaccess.h>
#include <linux/filter.h>

-/* No hurry in this branch */
-
-static u8 *load_pointer(struct sk_buff *skb, int k)
-{
- u8 *ptr = NULL;
-
- if (k>=SKF_NET_OFF)
- ptr = skb->nh.raw + k - SKF_NET_OFF;
- else if (k>=SKF_LL_OFF)
- ptr = skb->mac.raw + k - SKF_LL_OFF;
-
- if (ptr<skb->head && ptr < skb->tail)
- return ptr;
- return NULL;
-}
-
/*
* Decode and apply filter instructions to the skb->data.
* Return length to keep, 0 for none. skb is the data we are
@@ -61,19 +43,15 @@
* len is the number of filter blocks in the array.
*/

-int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen)
+int sk_run_filter(unsigned char *data, int len, struct sock_filter *filter, int flen)
{
- unsigned char *data = skb->data;
- /* len is UNSIGNED. Byte wide insns relies only on implicit
- type casts to prevent reading arbitrary memory locations.
- */
- unsigned int len = skb->len;
struct sock_filter *fentry; /* We walk down these */
u32 A = 0; /* Accumulator */
u32 X = 0; /* Index Register */
u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */
int k;
int pc;
+ int *t;

/*
* Process array of filter instructions.
@@ -82,75 +60,53 @@
for(pc = 0; pc < flen; pc++)
{
fentry = &filter[pc];
+ if(fentry->code & BPF_X)
+ t=&X;
+ else
+ t=&fentry->k;

switch(fentry->code)
{
case BPF_ALU|BPF_ADD|BPF_X:
- A += X;
- continue;
-
case BPF_ALU|BPF_ADD|BPF_K:
- A += fentry->k;
+ A += *t;
continue;

case BPF_ALU|BPF_SUB|BPF_X:
- A -= X;
- continue;
-
case BPF_ALU|BPF_SUB|BPF_K:
- A -= fentry->k;
+ A -= *t;
continue;

case BPF_ALU|BPF_MUL|BPF_X:
- A *= X;
- continue;
-
case BPF_ALU|BPF_MUL|BPF_K:
- A *= X;
+ A *= *t;
continue;

case BPF_ALU|BPF_DIV|BPF_X:
- if(X == 0)
- return (0);
- A /= X;
- continue;
-
case BPF_ALU|BPF_DIV|BPF_K:
- if(fentry->k == 0)
+ if(*t == 0)
return (0);
- A /= fentry->k;
+ A /= *t;
continue;

case BPF_ALU|BPF_AND|BPF_X:
- A &= X;
- continue;
-
case BPF_ALU|BPF_AND|BPF_K:
- A &= fentry->k;
+ A &= *t;
continue;

case BPF_ALU|BPF_OR|BPF_X:
- A |= X;
- continue;
-
case BPF_ALU|BPF_OR|BPF_K:
- A |= fentry->k;
+ A |= *t;
continue;

case BPF_ALU|BPF_LSH|BPF_X:
- A <<= X;
- continue;
-
case BPF_ALU|BPF_LSH|BPF_K:
- A <<= fentry->k;
+ A <<= *t;
continue;

case BPF_ALU|BPF_RSH|BPF_X:
- A >>= X;
- continue;
-
case BPF_ALU|BPF_RSH|BPF_K:
- A >>= fentry->k;
+ A >>= *t;
continue;

case BPF_ALU|BPF_NEG:
@@ -192,62 +148,26 @@
case BPF_JMP|BPF_JSET|BPF_X:
pc += (A & X) ? fentry->jt : fentry->jf;
continue;
-
case BPF_LD|BPF_W|BPF_ABS:
k = fentry->k;
-load_w:
- if(k+sizeof(u32) <= len) {
- A = ntohl(*(u32*)&data[k]);
- continue;
- }
- if (k<0) {
- u8 *ptr;
-
- if (k>=SKF_AD_OFF)
- break;
- if ((ptr = load_pointer(skb, k)) != NULL) {
- A = ntohl(*(u32*)ptr);
- continue;
- }
- }
- return 0;
+ if(k + sizeof(long) > len)
+ return (0);
+ A = ntohl(*(long*)&data[k]);
+ continue;

case BPF_LD|BPF_H|BPF_ABS:
k = fentry->k;
-load_h:
- if(k + sizeof(u16) <= len) {
- A = ntohs(*(u16*)&data[k]);
- continue;
- }
- if (k<0) {
- u8 *ptr;
-
- if (k>=SKF_AD_OFF)
- break;
- if ((ptr = load_pointer(skb, k)) != NULL) {
- A = ntohs(*(u16*)ptr);
- continue;
- }
- }
- return 0;
+ if(k + sizeof(short) > len)
+ return (0);
+ A = ntohs(*(short*)&data[k]);
+ continue;

case BPF_LD|BPF_B|BPF_ABS:
k = fentry->k;
-load_b:
- if(k < len) {
- A = data[k];
- continue;
- }
- if (k<0) {
- u8 *ptr;
-
- if (k>=SKF_AD_OFF)
- break;
- if ((ptr = load_pointer(skb, k)) != NULL) {
- A = *ptr;
- continue;
- }
- }
+ if(k >= len)
+ return (0);
+ A = data[k];
+ continue;

case BPF_LD|BPF_W|BPF_LEN:
A = len;
@@ -257,23 +177,35 @@
X = len;
continue;

- case BPF_LD|BPF_W|BPF_IND:
+ case BPF_LD|BPF_W|BPF_IND:
k = X + fentry->k;
- goto load_w;
+ if(k + sizeof(u32) > len)
+ return (0);
+ A = ntohl(*(u32 *)&data[k]);
+ continue;

case BPF_LD|BPF_H|BPF_IND:
k = X + fentry->k;
- goto load_h;
+ if(k + sizeof(u16) > len)
+ return (0);
+ A = ntohs(*(u16*)&data[k]);
+ continue;

case BPF_LD|BPF_B|BPF_IND:
k = X + fentry->k;
- goto load_b;
+ if(k >= len)
+ return (0);
+ A = data[k];
+ continue;

case BPF_LDX|BPF_B|BPF_MSH:
+ /*
+ * Hack for BPF to handle TOS etc
+ */
k = fentry->k;
if(k >= len)
return (0);
- X = (data[k] & 0xf) << 2;
+ X = (data[fentry->k] & 0xf) << 2;
continue;

case BPF_LD|BPF_IMM:
@@ -284,7 +216,7 @@
X = fentry->k;
continue;

- case BPF_LD|BPF_MEM:
+ case BPF_LD|BPF_MEM:
A = mem[fentry->k];
continue;

@@ -314,29 +246,15 @@
mem[fentry->k] = X;
continue;

+
+
default:
/* Invalid instruction counts as RET */
return (0);
}
-
- /* Handle ancillary data, which are impossible
- (or very difficult) to get parsing packet contents.
- */
- switch (k-SKF_AD_OFF) {
- case SKF_AD_PROTOCOL:
- A = htons(skb->protocol);
- continue;
- case SKF_AD_PKTTYPE:
- A = skb->pkt_type;
- continue;
- case SKF_AD_IFINDEX:
- A = skb->dev->ifindex;
- continue;
- default:
- return 0;
- }
}

+ printk(KERN_ERR "Filter ruleset ran off the end.\n");
return (0);
}

@@ -361,17 +279,13 @@

ftest = &filter[pc];
if(BPF_CLASS(ftest->code) == BPF_JMP)
- {
+ {
/*
* But they mustn't jump off the end.
*/
if(BPF_OP(ftest->code) == BPF_JA)
{
- /* Note, the large ftest->k might cause
- loops. Compare this with conditional
- jumps below, where offsets are limited. --ANK (981016)
- */
- if (ftest->k >= (unsigned)(flen-pc-1))
+ if(pc + ftest->k + 1>= (unsigned)flen)
return (-EINVAL);
}
else
@@ -388,18 +302,17 @@
* Check that memory operations use valid addresses.
*/

- if (ftest->k >= BPF_MEMWORDS)
+ if(ftest->k <0 || ftest->k >= BPF_MEMWORDS)
{
/*
* But it might not be a memory operation...
*/
- switch (ftest->code) {
- case BPF_ST:
- case BPF_STX:
- case BPF_LD|BPF_MEM:
- case BPF_LDX|BPF_MEM:
+
+ if (BPF_CLASS(ftest->code) == BPF_ST)
return -EINVAL;
- }
+ if((BPF_CLASS(ftest->code) == BPF_LD) &&
+ (BPF_MODE(ftest->code) == BPF_MEM))
+ return (-EINVAL);
}
}

@@ -419,35 +332,34 @@

int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
{
- struct sk_filter *fp;
- unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
+ struct sock_filter *fp, *old_filter;
+ int fsize = sizeof(struct sock_filter) * fprog->len;
int err;

/* Make sure new filter is there and in the right amounts. */
- if (fprog->filter == NULL || fprog->len > BPF_MAXINSNS)
+ if(fprog->filter == NULL || fprog->len == 0 || fsize > BPF_MAXINSNS)
return (-EINVAL);

- fp = (struct sk_filter *)sock_kmalloc(sk, fsize+sizeof(*fp), GFP_KERNEL);
- if(fp == NULL)
- return (-ENOMEM);
-
- if (copy_from_user(fp->insns, fprog->filter, fsize)) {
- sock_kfree_s(sk, fp, fsize+sizeof(*fp));
- return -EFAULT;
- }
+ if((err = sk_chk_filter(fprog->filter, fprog->len))==0)
+ {
+ /* If existing filter, remove it first */
+ if(sk->filter)
+ {
+ old_filter = sk->filter_data;
+ kfree_s(old_filter, (sizeof(old_filter) * sk->filter));
+ sk->filter_data = NULL;
+ }

- atomic_set(&fp->refcnt, 1);
- fp->len = fprog->len;
+ fp = (struct sock_filter *)kmalloc(fsize, GFP_KERNEL);
+ if(fp == NULL)
+ return (-ENOMEM);

- if ((err = sk_chk_filter(fp->insns, fp->len))==0) {
- struct sk_filter *old_fp = sk->filter;
- sk->filter = fp;
- wmb();
- fp = old_fp;
- }
+ memset(fp,0,sizeof(*fp));
+ memcpy(fp, fprog->filter, fsize); /* Copy instructions */

- if (fp)
- sk_filter_release(sk, fp);
+ sk->filter = fprog->len; /* Number of filter blocks */
+ sk->filter_data = fp; /* Filter instructions */
+ }

return (err);
}
--- pristine/net/core/sock.c Tue Feb 23 18:45:45 1999
+++ linux-2.2.2-axp/net/core/sock.c Tue Feb 23 20:16:35 1999
@@ -155,6 +155,10 @@
int err;
struct linger ling;
int ret = 0;
+
+#ifdef CONFIG_FILTER
+ struct sock_fprog fprog;
+#endif

/*
* Options without arguments
@@ -252,13 +256,12 @@

case SO_PRIORITY:
if (val >= 0 && val <= 7)
- {
- if(val==7 && !capable(CAP_NET_ADMIN))
- return -EPERM;
sk->priority = val;
- }
+ else
+ return(-EINVAL);
break;

+
case SO_LINGER:
if(optlen<sizeof(ling))
return -EINVAL; /* 1003.1g */
@@ -307,12 +310,10 @@
if (optlen > IFNAMSIZ)
optlen = IFNAMSIZ;
if (copy_from_user(devname, optval, optlen))
- return -EFAULT;
-
+ return -EFAULT;
+
/* Remove any cached route for this socket. */
- lock_sock(sk);
dst_release(xchg(&sk->dst_cache, NULL));
- release_sock(sk);

if (devname[0] == '\0') {
sk->bound_dev_if = 0;
@@ -330,32 +331,30 @@

#ifdef CONFIG_FILTER
case SO_ATTACH_FILTER:
- ret = -EINVAL;
- if (optlen == sizeof(struct sock_fprog)) {
- struct sock_fprog fprog;
+ if(optlen < sizeof(struct sock_fprog))
+ return -EINVAL;

+ if(copy_from_user(&fprog, optval, sizeof(fprog)))
+ {
ret = -EFAULT;
- if (copy_from_user(&fprog, optval, sizeof(fprog)))
- break;
-
- ret = sk_attach_filter(&fprog, sk);
+ break;
}
+
+ ret = sk_attach_filter(&fprog, sk);
break;

case SO_DETACH_FILTER:
- if(sk->filter) {
- struct sk_filter *filter;
-
- filter = sk->filter;
-
- sk->filter = NULL;
- wmb();
-
- if (filter)
- sk_filter_release(sk, filter);
+ if(sk->filter)
+ {
+ fprog.filter = sk->filter_data;
+ kfree_s(fprog.filter, (sizeof(fprog.filter) * sk->filter));
+ sk->filter_data = NULL;
+ sk->filter = 0;
return 0;
}
- return -ENOENT;
+ else
+ return -EINVAL;
+ break;
#endif
/* We implement the SO_SNDLOWAT etc to
not be settable (1003.1g 5.3) */
@@ -504,16 +503,6 @@
{
if (sk->destruct)
sk->destruct(sk);
-
-#ifdef CONFIG_FILTER
- if (sk->filter) {
- sk_filter_release(sk, sk->filter);
- sk->filter = NULL;
- }
-#endif
-
- if (atomic_read(&sk->omem_alloc))
- printk(KERN_DEBUG "sk_free: optmem leakage (%d bytes) detected.\n", atomic_read(&sk->omem_alloc));

kmem_cache_free(sk_cachep, sk);
}
--- pristine/net/ipv4/tcp_ipv4.c Tue Feb 23 18:45:46 1999
+++ linux-2.2.2-axp/net/ipv4/tcp_ipv4.c Tue Feb 23 20:16:35 1999
@@ -1323,10 +1323,6 @@
newsk->pair = NULL;
skb_queue_head_init(&newsk->back_log);
skb_queue_head_init(&newsk->error_queue);
-#ifdef CONFIG_FILTER
- if (newsk->filter)
- sk_filter_charge(newsk, newsk->filter);
-#endif

/* Now setup tcp_opt */
newtp = &(newsk->tp_pinfo.af_tcp);
@@ -1557,10 +1553,12 @@

int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
{
-
#ifdef CONFIG_FILTER
- if (sk->filter && sk_filter(skb, sk->filter))
- goto discard;
+ if (sk->filter)
+ {
+ if (sk_filter(skb, sk->filter_data, sk->filter))
+ goto discard;
+ }
#endif /* CONFIG_FILTER */

/*
\
 
 \ /
  Last update: 2005-03-22 13:50    [W:0.068 / U:1.420 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site