lkml.org 
[lkml]   [1999]   [Jun]   [27]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[patch] 2.3.8+ UP masq

This patch gets rid of numerous undefined lock problems. Since the locks have
now been surrounded in ifdef SMP declarations it made sense to do the same with
all the locks that were used in the code.

--
Matthew Harrell To err is human,
Simulation Technology Division, SAIC to purr feline.
mharrell@sito.saic.com
--- linux/net/ipv4/ip_masq.c-ori Sun Jun 27 00:28:58 1999
+++ linux/net/ipv4/ip_masq.c Sun Jun 27 00:29:20 1999
@@ -795,14 +795,18 @@
{
__u16 mport;

+#ifdef __SMP__
spin_lock_irq(&masq_port_lock);
+#endif
/*
* Try the next available port number
*/
mport = htons(masq_port++);
if (masq_port==PORT_MASQ_END) masq_port = PORT_MASQ_BEGIN;

+#ifdef __SMP__
spin_unlock_irq(&masq_port_lock);
+#endif
return mport;
}

--- linux/net/ipv4/ip_masq_mod.c-ori Sun Jun 27 00:30:26 1999
+++ linux/net/ipv4/ip_masq_mod.c Sun Jun 27 00:31:30 1999
@@ -96,28 +96,38 @@
{
struct ip_masq_mod **mmod_p;

+#ifdef __SMP__
write_lock_bh(&masq_mod_lock);
+#endif

for (mmod_p = &ip_masq_mod_lkp_base; *mmod_p ; mmod_p = &(*mmod_p)->next)
if (mmod == (*mmod_p)) {
*mmod_p = mmod->next;
mmod->next = NULL;
+#ifdef __SMP__
write_unlock_bh(&masq_mod_lock);
+#endif
return 0;
}

+#ifdef __SMP__
write_unlock_bh(&masq_mod_lock);
+#endif
return -EINVAL;
}

int ip_masq_mod_lkp_link(struct ip_masq_mod *mmod)
{
+#ifdef __SMP__
write_lock_bh(&masq_mod_lock);
+#endif

mmod->next = ip_masq_mod_lkp_base;
ip_masq_mod_lkp_base=mmod;

+#ifdef __SMP__
write_unlock_bh(&masq_mod_lock);
+#endif
return 0;
}

--- linux/net/ipv4/ip_masq_portfw.c-ori Sun Jun 27 00:50:10 1999
+++ linux/net/ipv4/ip_masq_portfw.c Sun Jun 27 00:55:05 1999
@@ -79,7 +79,9 @@

nent = atomic_read(&mmod_self->mmod_nent);

+#ifdef __SMP__
write_lock_bh(&portfw_lock);
+#endif

for (entry=list->next;entry != list;entry = entry->next) {
n = list_entry(entry, struct ip_portfw, list);
@@ -93,7 +95,9 @@
MOD_DEC_USE_COUNT;
}
}
+#ifdef __SMP__
write_unlock_bh(&portfw_lock);
+#endif

return nent==atomic_read(&mmod_self->mmod_nent)? ESRCH : 0;
}
@@ -109,7 +113,9 @@
struct list_head *e;
struct ip_portfw *n;

+#ifdef __SMP__
write_lock_bh(&portfw_lock);
+#endif

for (prot = 0; prot < 2;prot++) {
l = &portfw_list[prot];
@@ -122,7 +128,9 @@
}
}

+#ifdef __SMP__
write_unlock_bh(&portfw_lock);
+#endif
}

/*
@@ -167,8 +175,9 @@
struct list_head *l, *e;
int count = 0;

-
+#ifdef __SMP__
read_lock_bh(&portfw_lock);
+#endif

l = &portfw_list[prot];

@@ -184,7 +193,9 @@
}
}

+#ifdef __SMP__
read_unlock_bh(&portfw_lock);
+#endif

return count;
}
@@ -227,14 +238,18 @@
atomic_set(&npf->pref_cnt, npf->pref);
INIT_LIST_HEAD(&npf->list);

+#ifdef __SMP__
write_lock_bh(&portfw_lock);
+#endif

/*
* Add at head
*/
list_add(&npf->list, &portfw_list[prot]);

+#ifdef __SMP__
write_unlock_bh(&portfw_lock);
+#endif

ip_masq_mod_inc_nent(mmod_self);
return 0;
@@ -328,7 +343,9 @@
}
pos = 64;

+#ifdef __SMP__
read_lock_bh(&portfw_lock);
+#endif

for(ind = 0; ind < 2; ind++)
{
@@ -354,7 +371,10 @@
}
}
done:
+
+#ifdef __SMP__
read_unlock_bh(&portfw_lock);
+#endif

begin = len - (pos - offset);
*start = buffer + begin;
@@ -391,9 +411,13 @@
rt->u.dst.dev->name,
NIPQUAD(iph->daddr), ntohs(portp[1]));

+#ifdef __SMP__
read_lock(&portfw_lock);
+#endif
pfw = ip_portfw_lookup(iph->protocol, portp[1], iph->daddr, NULL, NULL);
+#ifdef __SMP__
read_unlock(&portfw_lock);
+#endif
return (pfw!=0);
}

@@ -414,7 +438,9 @@
/*
* Lock for writing.
*/
+#ifdef __SMP__
write_lock(&portfw_lock);
+#endif

if ((pf=ip_portfw_lookup(iph->protocol,
portp[1], iph->daddr,
@@ -450,7 +476,9 @@
}
}
out:
+#ifdef __SMP__
write_unlock(&portfw_lock);
+#endif
return ms;
}

--- linux/net/ipv4/ip_masq_mfw.c-ori Sun Jun 27 00:56:17 1999
+++ linux/net/ipv4/ip_masq_mfw.c Sun Jun 27 01:05:53 1999
@@ -193,10 +193,14 @@
if (!mfw_host)
return -ENOMEM;

+#ifdef __SMP__
write_lock_bh(&mfw->lock);
+#endif
list_add(&mfw_host->list, attail? mfw->hosts.prev : &mfw->hosts);
atomic_inc(&mfw->nhosts);
+#ifdef __SMP__
write_unlock_bh(&mfw->lock);
+#endif

return 0;
}
@@ -214,7 +218,9 @@
int n_del = 0;
l = &mfw->hosts;

+#ifdef __SMP__
write_lock_bh(&mfw->lock);
+#endif
for (e=l->next; e!=l; e=e->next)
{
h = list_entry(e, struct ip_masq_mfw_host, list);
@@ -229,7 +235,9 @@
}

}
+#ifdef __SMP__
write_unlock_bh(&mfw->lock);
+#endif
return n_del? 0 : -ESRCH;
}

@@ -344,16 +352,24 @@
struct ip_masq_mfw *mfw;
struct ip_masq_mfw_host *h = NULL;

+#ifdef __SMP__
read_lock(&mfw_lock);
+#endif
mfw = __mfw_get(fwmark);

if (mfw) {
+#ifdef __SMP__
write_lock(&mfw->lock);
+#endif
h = __mfw_sched(mfw, 0);
+#ifdef __SMP__
write_unlock(&mfw->lock);
+#endif
}

+#ifdef __SMP__
read_unlock(&mfw_lock);
+#endif
return h;
}

@@ -382,10 +398,14 @@

for(idx = 0; idx < IP_MASQ_MFW_HSIZE; idx++)
{
+#ifdef __SMP__
read_lock(&mfw_lock);
+#endif
for(mfw = ip_masq_mfw_table[idx]; mfw ; mfw = mfw->next)
{
+#ifdef __SMP__
read_lock_bh(&mfw->lock);
+#endif
l=&mfw->hosts;

for(e=l->next;l!=e;e=e->next) {
@@ -448,7 +468,9 @@
int idx;
struct list_head *l,*e;

+#ifdef __SMP__
write_lock_bh(&mfw_lock);
+#endif
memcpy(local_table, ip_masq_mfw_table, sizeof ip_masq_mfw_table);
memset(ip_masq_mfw_table, 0, sizeof ip_masq_mfw_table);
write_unlock_bh(&mfw_lock);
@@ -559,9 +581,13 @@
*/
down(&mfw_sema);

+#ifdef __SMP__
read_lock(&mfw_lock);
+#endif
mfw = __mfw_get(mu->fwmark);
+#ifdef __SMP__
read_unlock(&mfw_lock);
+#endif

/*
* If first host, create m-entry
@@ -583,9 +609,13 @@
* Already protected by global lock.
*/
if (ret == 0 && atomic_read(&mfw->nhosts) == 1) {
+#ifdef __SMP__
write_lock_bh(&mfw_lock);
+#endif
__mfw_add(mfw);
+#ifdef __SMP__
write_unlock_bh(&mfw_lock);
+#endif
}
if (atomic_read(&mfw->nhosts) == 0) {
mfw_destroy(mfw);
@@ -599,9 +629,13 @@
case IP_MASQ_CMD_DEL:
down(&mfw_sema);

+#ifdef __SMP__
read_lock(&mfw_lock);
+#endif
mfw = __mfw_get(mu->fwmark);
+#ifdef __SMP__
read_unlock(&mfw_lock);
+#endif

if (mfw) {
ret = mfw_delhost(mfw, mu);
@@ -611,9 +645,13 @@
* XXX check logic XXX
*/
if (atomic_read(&mfw->nhosts) == 0) {
+#ifdef __SMP__
write_lock_bh(&mfw_lock);
+#endif
__mfw_del(mfw);
+#ifdef __SMP__
write_unlock_bh(&mfw_lock);
+#endif
mfw_destroy(mfw);
}
} else
@@ -633,11 +671,15 @@
* No need to semaphorize here, main list is not
* modified.
*/
+#ifdef __SMP__
read_lock(&mfw_lock);
+#endif

mfw = __mfw_get(mu->fwmark);
if (mfw) {
+#ifdef __SMP__
write_lock_bh(&mfw->lock);
+#endif

if (mu->flags & IP_MASQ_MFW_SCHED) {
struct ip_masq_mfw_host *h;
@@ -649,10 +691,14 @@
ret = __mfw_edithost(mfw, mu);
}

+#ifdef __SMP__
write_unlock_bh(&mfw->lock);
+#endif
}

+#ifdef __SMP__
read_unlock(&mfw_lock);
+#endif
break;
}
out:
@@ -671,9 +717,13 @@
static int mfw_in_rule(const struct sk_buff *skb, const struct iphdr *iph)
{
int val;
+#ifdef __SMP__
read_lock(&mfw_lock);
+#endif
val = ( __mfw_get(skb->fwmark) != 0);
+#ifdef __SMP__
read_unlock(&mfw_lock);
+#endif
return val;
}
\
 
 \ /
  Last update: 2005-03-22 13:52    [W:0.053 / U:0.016 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site