lkml.org 
[lkml]   [2001]   [Sep]   [28]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[patch] softirq-2.4.10-B3

On Fri, 28 Sep 2001 kuznet@ms2.inr.ac.ru wrote:

> > - '[ksoftirqd_CPU0]' is confusing on UP systems, changed it to
> > '[ksoftirqd]' instead.
>
> It is useless to argue about preferences, but universal naming scheme
> looks as less confusing yet. :-)

since you are the second one to complain, i'm convinced :) reverted.

i've also added back the BUG() check for smp_processor_id() == cpu.

-B3 attached.

Ingo
--- linux/kernel/ksyms.c.orig Wed Sep 26 17:04:40 2001
+++ linux/kernel/ksyms.c Wed Sep 26 17:04:48 2001
@@ -538,8 +538,6 @@
EXPORT_SYMBOL(tasklet_kill);
EXPORT_SYMBOL(__run_task_queue);
EXPORT_SYMBOL(do_softirq);
-EXPORT_SYMBOL(raise_softirq);
-EXPORT_SYMBOL(cpu_raise_softirq);
EXPORT_SYMBOL(__tasklet_schedule);
EXPORT_SYMBOL(__tasklet_hi_schedule);

--- linux/kernel/sched.c.orig Wed Sep 26 17:04:40 2001
+++ linux/kernel/sched.c Wed Sep 26 17:04:48 2001
@@ -366,6 +366,28 @@
}

/**
+ * unwakeup - undo wakeup if possible.
+ * @p: task
+ * @state: new task state
+ *
+ * Undo a previous wakeup of the specified task - if the process
+ * is not running already. The main interface to be used is
+ * unwakeup_process(), it will do a lockless test whether the task
+ * is on the runqueue.
+ */
+void __unwakeup_process(struct task_struct * p, long state)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&runqueue_lock, flags);
+ if (!p->has_cpu && (p != current) && task_on_runqueue(p)) {
+ del_from_runqueue(p);
+ p->state = state;
+ }
+ spin_unlock_irqrestore(&runqueue_lock, flags);
+}
+
+/**
* schedule_timeout - sleep until timeout
* @timeout: timeout value in jiffies
*
--- linux/kernel/softirq.c.orig Wed Sep 26 17:04:40 2001
+++ linux/kernel/softirq.c Fri Sep 28 18:16:21 2001
@@ -58,12 +58,35 @@
wake_up_process(tsk);
}

+/*
+ * If a softirqs were fully handled after ksoftirqd was woken
+ * up then try to undo the wakeup.
+ */
+static inline void unwakeup_softirqd(unsigned cpu)
+{
+ struct task_struct * tsk = ksoftirqd_task(cpu);
+
+ if (tsk)
+ unwakeup_process(tsk, TASK_INTERRUPTIBLE);
+}
+
+/*
+ * We restart softirq processing MAX_SOFTIRQ_RESTART times,
+ * and we fall back to softirqd after that.
+ *
+ * This number has been established via experimentation.
+ * The two things to balance is latency against fairness -
+ * we want to handle softirqs as soon as possible, but they
+ * should not be able to lock up the box.
+ */
+#define MAX_SOFTIRQ_RESTART 10
+
asmlinkage void do_softirq()
{
+ int max_restart = MAX_SOFTIRQ_RESTART;
int cpu = smp_processor_id();
__u32 pending;
long flags;
- __u32 mask;

if (in_interrupt())
return;
@@ -75,7 +98,6 @@
if (pending) {
struct softirq_action *h;

- mask = ~pending;
local_bh_disable();
restart:
/* Reset the pending bitmask before enabling irqs */
@@ -95,55 +117,37 @@
local_irq_disable();

pending = softirq_pending(cpu);
- if (pending & mask) {
- mask &= ~pending;
+ if (pending && --max_restart && (current->need_resched != 1))
goto restart;
- }
__local_bh_enable();

if (pending)
+ /*
+ * In the normal case ksoftirqd is rarely activated,
+ * increased scheduling hurts performance.
+ * It's a safety measure: if external load starts
+ * to flood the system with softirqs then we
+ * will mitigate softirq work to the softirq thread.
+ */
wakeup_softirqd(cpu);
+ else
+ /*
+ * All softirqs are handled - undo any possible
+ * wakeup of softirqd. This reduces context switch
+ * overhead.
+ */
+ unwakeup_softirqd(cpu);
}

local_irq_restore(flags);
}

-/*
- * This function must run with irq disabled!
- */
-inline void cpu_raise_softirq(unsigned int cpu, unsigned int nr)
-{
- __cpu_raise_softirq(cpu, nr);
-
- /*
- * If we're in an interrupt or bh, we're done
- * (this also catches bh-disabled code). We will
- * actually run the softirq once we return from
- * the irq or bh.
- *
- * Otherwise we wake up ksoftirqd to make sure we
- * schedule the softirq soon.
- */
- if (!(local_irq_count(cpu) | local_bh_count(cpu)))
- wakeup_softirqd(cpu);
-}
-
-void raise_softirq(unsigned int nr)
-{
- long flags;
-
- local_irq_save(flags);
- cpu_raise_softirq(smp_processor_id(), nr);
- local_irq_restore(flags);
-}
-
void open_softirq(int nr, void (*action)(struct softirq_action*), void *data)
{
softirq_vec[nr].data = data;
softirq_vec[nr].action = action;
}

-
/* Tasklets */

struct tasklet_head tasklet_vec[NR_CPUS] __cacheline_aligned;
@@ -157,8 +161,9 @@
local_irq_save(flags);
t->next = tasklet_vec[cpu].list;
tasklet_vec[cpu].list = t;
- cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
+ __cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
local_irq_restore(flags);
+ rerun_softirqs(cpu);
}

void __tasklet_hi_schedule(struct tasklet_struct *t)
@@ -169,8 +174,9 @@
local_irq_save(flags);
t->next = tasklet_hi_vec[cpu].list;
tasklet_hi_vec[cpu].list = t;
- cpu_raise_softirq(cpu, HI_SOFTIRQ);
+ __cpu_raise_softirq(cpu, HI_SOFTIRQ);
local_irq_restore(flags);
+ rerun_softirqs(cpu);
}

static void tasklet_action(struct softirq_action *a)
@@ -241,7 +247,6 @@
}
}

-
void tasklet_init(struct tasklet_struct *t,
void (*func)(unsigned long), unsigned long data)
{
@@ -268,8 +273,6 @@
clear_bit(TASKLET_STATE_SCHED, &t->state);
}

-
-
/* Old style BHs */

static void (*bh_base[32])(void);
@@ -325,7 +328,7 @@
{
int i;

- for (i=0; i<32; i++)
+ for (i = 0; i < 32; i++)
tasklet_init(bh_task_vec+i, bh_action, i);

open_softirq(TASKLET_SOFTIRQ, tasklet_action, NULL);
@@ -361,38 +364,41 @@

static int ksoftirqd(void * __bind_cpu)
{
- int bind_cpu = *(int *) __bind_cpu;
- int cpu = cpu_logical_map(bind_cpu);
+ int cpu = cpu_logical_map((int)__bind_cpu);

daemonize();
- current->nice = 19;
+
sigfillset(&current->blocked);

/* Migrate to the right CPU */
- current->cpus_allowed = 1UL << cpu;
- while (smp_processor_id() != cpu)
- schedule();
-
- sprintf(current->comm, "ksoftirqd_CPU%d", bind_cpu);
+ current->cpus_allowed = 1 << cpu;

- __set_current_state(TASK_INTERRUPTIBLE);
- mb();
+ sprintf(current->comm, "ksoftirqd CPU%d", cpu);

+ current->nice = 19;
+ schedule();
ksoftirqd_task(cpu) = current;

- for (;;) {
- if (!softirq_pending(cpu))
- schedule();
-
- __set_current_state(TASK_RUNNING);
+ if (smp_processor_id() != cpu)
+ BUG();

+ for (;;) {
while (softirq_pending(cpu)) {
do_softirq();
if (current->need_resched)
- schedule();
+ goto preempt;
}

__set_current_state(TASK_INTERRUPTIBLE);
+ /* This has to be here to make the test IRQ-correct. */
+ barrier();
+ if (!softirq_pending(cpu)) {
+preempt:
+ if (current->counter > 1)
+ current->counter = 1;
+ schedule();
+ }
+ __set_current_state(TASK_RUNNING);
}
}

@@ -400,17 +406,10 @@
{
int cpu;

- for (cpu = 0; cpu < smp_num_cpus; cpu++) {
- if (kernel_thread(ksoftirqd, (void *) &cpu,
+ for (cpu = 0; cpu < smp_num_cpus; cpu++)
+ if (kernel_thread(ksoftirqd, (void *) cpu,
CLONE_FS | CLONE_FILES | CLONE_SIGNAL) < 0)
- printk("spawn_ksoftirqd() failed for cpu %d\n", cpu);
- else {
- while (!ksoftirqd_task(cpu_logical_map(cpu))) {
- current->policy |= SCHED_YIELD;
- schedule();
- }
- }
- }
+ BUG();

return 0;
}
--- linux/include/linux/netdevice.h.orig Wed Sep 26 17:04:36 2001
+++ linux/include/linux/netdevice.h Fri Sep 28 07:44:01 2001
@@ -486,8 +486,9 @@
local_irq_save(flags);
dev->next_sched = softnet_data[cpu].output_queue;
softnet_data[cpu].output_queue = dev;
- cpu_raise_softirq(cpu, NET_TX_SOFTIRQ);
+ __cpu_raise_softirq(cpu, NET_TX_SOFTIRQ);
local_irq_restore(flags);
+ rerun_softirqs(cpu);
}
}

@@ -535,8 +536,9 @@
local_irq_save(flags);
skb->next = softnet_data[cpu].completion_queue;
softnet_data[cpu].completion_queue = skb;
- cpu_raise_softirq(cpu, NET_TX_SOFTIRQ);
+ __cpu_raise_softirq(cpu, NET_TX_SOFTIRQ);
local_irq_restore(flags);
+ rerun_softirqs(cpu);
}
}

--- linux/include/linux/interrupt.h.orig Wed Sep 26 17:04:40 2001
+++ linux/include/linux/interrupt.h Fri Sep 28 07:44:01 2001
@@ -74,9 +74,14 @@
asmlinkage void do_softirq(void);
extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data);
extern void softirq_init(void);
-#define __cpu_raise_softirq(cpu, nr) do { softirq_pending(cpu) |= 1UL << (nr); } while (0)
-extern void FASTCALL(cpu_raise_softirq(unsigned int cpu, unsigned int nr));
-extern void FASTCALL(raise_softirq(unsigned int nr));
+#define __cpu_raise_softirq(cpu, nr) \
+ do { softirq_pending(cpu) |= 1UL << (nr); } while (0)
+
+#define rerun_softirqs(cpu) \
+do { \
+ if (!(local_irq_count(cpu) | local_bh_count(cpu))) \
+ do_softirq(); \
+} while (0);



--- linux/include/linux/sched.h.orig Wed Sep 26 17:04:40 2001
+++ linux/include/linux/sched.h Fri Sep 28 07:44:01 2001
@@ -556,6 +556,7 @@

extern void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, int nr));
extern void FASTCALL(__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr));
+extern void FASTCALL(__unwakeup_process(struct task_struct * p, long state));
extern void FASTCALL(sleep_on(wait_queue_head_t *q));
extern long FASTCALL(sleep_on_timeout(wait_queue_head_t *q,
signed long timeout));
@@ -574,6 +575,13 @@
#define wake_up_interruptible_all(x) __wake_up((x),TASK_INTERRUPTIBLE, 0)
#define wake_up_interruptible_sync(x) __wake_up_sync((x),TASK_INTERRUPTIBLE, 1)
#define wake_up_interruptible_sync_nr(x) __wake_up_sync((x),TASK_INTERRUPTIBLE, nr)
+
+#define unwakeup_process(tsk,state) \
+do { \
+ if (task_on_runqueue(tsk)) \
+ __unwakeup_process(tsk,state); \
+} while (0)
+
asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struct rusage * ru);

extern int in_group_p(gid_t);
--- linux/include/asm-mips/softirq.h.orig Wed Sep 26 20:58:00 2001
+++ linux/include/asm-mips/softirq.h Wed Sep 26 20:58:07 2001
@@ -40,6 +40,4 @@

#define in_softirq() (local_bh_count(smp_processor_id()) != 0)

-#define __cpu_raise_softirq(cpu, nr) set_bit(nr, &softirq_pending(cpu))
-
#endif /* _ASM_SOFTIRQ_H */
--- linux/include/asm-mips64/softirq.h.orig Wed Sep 26 20:58:20 2001
+++ linux/include/asm-mips64/softirq.h Wed Sep 26 20:58:26 2001
@@ -39,19 +39,4 @@

#define in_softirq() (local_bh_count(smp_processor_id()) != 0)

-extern inline void __cpu_raise_softirq(int cpu, int nr)
-{
- unsigned int *m = (unsigned int *) &softirq_pending(cpu);
- unsigned int temp;
-
- __asm__ __volatile__(
- "1:\tll\t%0, %1\t\t\t# __cpu_raise_softirq\n\t"
- "or\t%0, %2\n\t"
- "sc\t%0, %1\n\t"
- "beqz\t%0, 1b"
- : "=&r" (temp), "=m" (*m)
- : "ir" (1UL << nr), "m" (*m)
- : "memory");
-}
-
#endif /* _ASM_SOFTIRQ_H */
--- linux/net/core/dev.c.orig Wed Sep 26 17:04:41 2001
+++ linux/net/core/dev.c Wed Sep 26 17:04:48 2001
@@ -1218,8 +1218,9 @@
dev_hold(skb->dev);
__skb_queue_tail(&queue->input_pkt_queue,skb);
/* Runs from irqs or BH's, no need to wake BH */
- cpu_raise_softirq(this_cpu, NET_RX_SOFTIRQ);
+ __cpu_raise_softirq(this_cpu, NET_RX_SOFTIRQ);
local_irq_restore(flags);
+ rerun_softirqs(this_cpu);
#ifndef OFFLINE_SAMPLE
get_sample_stats(this_cpu);
#endif
@@ -1529,8 +1530,9 @@
local_irq_disable();
netdev_rx_stat[this_cpu].time_squeeze++;
/* This already runs in BH context, no need to wake up BH's */
- cpu_raise_softirq(this_cpu, NET_RX_SOFTIRQ);
+ __cpu_raise_softirq(this_cpu, NET_RX_SOFTIRQ);
local_irq_enable();
+ rerun_softirqs(this_cpu);

NET_PROFILE_LEAVE(softnet_process);
return;
--- linux/linux/include/linux/netdevice.h.orig Wed Sep 26 21:14:55 2001
+++ linux/linux/include/linux/netdevice.h Thu Sep 27 07:51:00 2001
@@ -390,6 +390,8 @@
unsigned char *haddr);
int (*neigh_setup)(struct net_device *dev, struct neigh_parms *);
int (*accept_fastpath)(struct net_device *, struct dst_entry*);
+#define HAVE_POLL_CONTROLLER
+ void (*poll_controller)(struct net_device *dev);

/* open/release and usage marking */
struct module *owner;
--- linux/linux/drivers/net/tulip/tulip_core.c.orig Wed Sep 26 21:14:58 2001
+++ linux/linux/drivers/net/tulip/tulip_core.c Fri Sep 28 09:23:22 2001
@@ -243,6 +243,7 @@
static struct net_device_stats *tulip_get_stats(struct net_device *dev);
static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static void set_rx_mode(struct net_device *dev);
+static void poll_tulip(struct net_device *dev);



@@ -1671,6 +1672,9 @@
dev->get_stats = tulip_get_stats;
dev->do_ioctl = private_ioctl;
dev->set_multicast_list = set_rx_mode;
+#ifdef HAVE_POLL_CONTROLLER
+ dev->poll_controller = &poll_tulip;
+#endif

if (register_netdev(dev))
goto err_out_free_ring;
@@ -1839,6 +1843,24 @@

/* pci_power_off (pdev, -1); */
}
+
+
+#ifdef HAVE_POLL_CONTROLLER
+
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+
+static void poll_tulip (struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ tulip_interrupt (dev->irq, dev, NULL);
+ enable_irq(dev->irq);
+}
+
+#endif


static struct pci_driver tulip_driver = {
--- linux/linux/drivers/net/eepro100.c.orig Wed Sep 26 21:14:58 2001
+++ linux/linux/drivers/net/eepro100.c Thu Sep 27 08:11:41 2001
@@ -542,6 +542,7 @@
static int speedo_rx(struct net_device *dev);
static void speedo_tx_buffer_gc(struct net_device *dev);
static void speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
+static void poll_speedo (struct net_device *dev);
static int speedo_close(struct net_device *dev);
static struct net_device_stats *speedo_get_stats(struct net_device *dev);
static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
@@ -837,6 +838,9 @@
dev->get_stats = &speedo_get_stats;
dev->set_multicast_list = &set_rx_mode;
dev->do_ioctl = &speedo_ioctl;
+#ifdef HAVE_POLL_CONTROLLER
+ dev->poll_controller = &poll_speedo;
+#endif

return 0;
}
@@ -1594,6 +1598,23 @@
clear_bit(0, (void*)&sp->in_interrupt);
return;
}
+
+#ifdef HAVE_POLL_CONTROLLER
+
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+
+static void poll_speedo (struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ speedo_interrupt (dev->irq, dev, NULL);
+ enable_irq(dev->irq);
+}
+
+#endif

static inline struct RxFD *speedo_rx_alloc(struct net_device *dev, int entry)
{
--- linux/linux/drivers/net/netconsole.c.orig Wed Sep 26 21:15:15 2001
+++ linux/linux/drivers/net/netconsole.c Thu Sep 27 16:12:10 2001
@@ -0,0 +1,331 @@
+/*
+ * linux/drivers/net/netconsole.c
+ *
+ * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
+ *
+ * This file contains the implementation of an IRQ-safe, crash-safe
+ * kernel console implementation that outputs kernel messages to the
+ * network.
+ *
+ * Modification history:
+ *
+ * 2001-09-17 started by Ingo Molnar.
+ */
+
+/****************************************************************
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ ****************************************************************/
+
+#include <net/tcp.h>
+#include <net/udp.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <asm/unaligned.h>
+#include <linux/console.h>
+#include <linux/smp_lock.h>
+#include <linux/netdevice.h>
+#include <linux/tty_driver.h>
+#include <linux/etherdevice.h>
+
+static struct net_device *netconsole_dev;
+static u16 source_port, target_port;
+static u32 source_ip, target_ip;
+static unsigned char daddr[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} ;
+
+#define NETCONSOLE_VERSION 0x01
+#define HEADER_LEN 5
+
+#define MAX_UDP_CHUNK 1460
+#define MAX_PRINT_CHUNK (MAX_UDP_CHUNK-HEADER_LEN)
+
+/*
+ * We maintain a small pool of fully-sized skbs,
+ * to make sure the message gets out even in
+ * extreme OOM situations.
+ */
+#define MAX_NETCONSOLE_SKBS 32
+
+static spinlock_t netconsole_lock = SPIN_LOCK_UNLOCKED;
+static int nr_netconsole_skbs;
+static struct sk_buff *netconsole_skbs;
+
+#define MAX_SKB_SIZE \
+ (MAX_UDP_CHUNK + sizeof(struct udphdr) + \
+ sizeof(struct iphdr) + sizeof(struct ethhdr))
+
+static void __refill_netconsole_skbs(void)
+{
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&netconsole_lock, flags);
+ while (nr_netconsole_skbs < MAX_NETCONSOLE_SKBS) {
+ skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
+ if (!skb)
+ break;
+ if (netconsole_skbs)
+ skb->next = netconsole_skbs;
+ else
+ skb->next = NULL;
+ netconsole_skbs = skb;
+ nr_netconsole_skbs++;
+ }
+ spin_unlock_irqrestore(&netconsole_lock, flags);
+}
+
+static struct sk_buff * get_netconsole_skb(void)
+{
+ struct sk_buff *skb;
+
+ unsigned long flags;
+
+ spin_lock_irqsave(&netconsole_lock, flags);
+ skb = netconsole_skbs;
+ if (skb)
+ netconsole_skbs = skb->next;
+ skb->next = NULL;
+ nr_netconsole_skbs--;
+ spin_unlock_irqrestore(&netconsole_lock, flags);
+
+ return skb;
+}
+
+static spinlock_t sequence_lock = SPIN_LOCK_UNLOCKED;
+static unsigned int offset;
+
+static void send_netconsole_skb(struct net_device *dev, const char *msg, unsigned int msg_len)
+{
+ int total_len, eth_len, ip_len, udp_len;
+ unsigned long flags;
+ struct sk_buff *skb;
+ struct udphdr *udph;
+ struct iphdr *iph;
+ struct ethhdr *eth;
+
+ udp_len = msg_len + HEADER_LEN + sizeof(*udph);
+ ip_len = eth_len = udp_len + sizeof(*iph);
+ total_len = eth_len + ETH_HLEN;
+
+ if (nr_netconsole_skbs < MAX_NETCONSOLE_SKBS)
+ __refill_netconsole_skbs();
+
+ skb = alloc_skb(total_len, GFP_ATOMIC);
+ if (!skb) {
+ skb = get_netconsole_skb();
+ if (!skb)
+ /* tough! */
+ return;
+ }
+
+ atomic_set(&skb->users, 1);
+ skb_reserve(skb, total_len - msg_len - HEADER_LEN);
+ skb->data[0] = NETCONSOLE_VERSION;
+
+ spin_lock_irqsave(&sequence_lock, flags);
+ put_unaligned(htonl(offset), (u32 *) (skb->data + 1));
+ offset += msg_len;
+ spin_unlock_irqrestore(&sequence_lock, flags);
+
+ memcpy(skb->data + HEADER_LEN, msg, msg_len);
+ skb->len += msg_len + HEADER_LEN;
+
+ udph = (struct udphdr *) skb_push(skb, sizeof(*udph));
+ udph->source = source_port;
+ udph->dest = target_port;
+ udph->len = htons(udp_len);
+ udph->check = 0;
+
+ iph = (struct iphdr *)skb_push(skb, sizeof(*iph));
+
+ iph->version = 4;
+ iph->ihl = 5;
+ iph->tos = 0;
+ iph->tot_len = htons(ip_len);
+ iph->id = 0;
+ iph->frag_off = 0;
+ iph->ttl = 64;
+ iph->protocol = IPPROTO_UDP;
+ iph->check = 0;
+ iph->saddr = source_ip;
+ iph->daddr = target_ip;
+ iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+
+ eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
+
+ eth->h_proto = htons(ETH_P_IP);
+ memcpy(eth->h_source, dev->dev_addr, dev->addr_len);
+ memcpy(eth->h_dest, daddr, dev->addr_len);
+
+repeat:
+ spin_lock(&dev->xmit_lock);
+ dev->xmit_lock_owner = smp_processor_id();
+
+ if (netif_queue_stopped(dev)) {
+ dev->xmit_lock_owner = -1;
+ spin_unlock(&dev->xmit_lock);
+
+ dev->poll_controller(dev);
+ goto repeat;
+ }
+
+ dev->hard_start_xmit(skb, dev);
+
+ dev->xmit_lock_owner = -1;
+ spin_unlock(&dev->xmit_lock);
+}
+
+static void write_netconsole_msg(struct console *con, const char *msg, unsigned int msg_len)
+{
+ int len, left;
+ struct net_device *dev;
+
+ dev = netconsole_dev;
+ if (!dev)
+ return;
+
+ if (dev->poll_controller && netif_running(dev)) {
+ unsigned long flags;
+
+ __save_flags(flags);
+ __cli();
+ left = msg_len;
+repeat:
+ if (left > MAX_PRINT_CHUNK)
+ len = MAX_PRINT_CHUNK;
+ else
+ len = left;
+ send_netconsole_skb(dev, msg, len);
+ msg += len;
+ left -= len;
+ if (left)
+ goto repeat;
+ __restore_flags(flags);
+ }
+}
+
+static char *dev;
+static int target_eth_byte0 = 255;
+static int target_eth_byte1 = 255;
+static int target_eth_byte2 = 255;
+static int target_eth_byte3 = 255;
+static int target_eth_byte4 = 255;
+static int target_eth_byte5 = 255;
+
+MODULE_PARM(target_ip, "i");
+MODULE_PARM(target_eth_byte0, "i");
+MODULE_PARM(target_eth_byte1, "i");
+MODULE_PARM(target_eth_byte2, "i");
+MODULE_PARM(target_eth_byte3, "i");
+MODULE_PARM(target_eth_byte4, "i");
+MODULE_PARM(target_eth_byte5, "i");
+MODULE_PARM(source_port, "h");
+MODULE_PARM(target_port, "h");
+MODULE_PARM(dev, "s");
+
+static struct console netconsole =
+ { flags: CON_ENABLED, write: write_netconsole_msg };
+
+static int init_netconsole(void)
+{
+ struct net_device *ndev = NULL;
+ struct in_device *in_dev;
+
+ printk(KERN_INFO "netconsole: using network device <%s>\n", dev);
+ // this will be valid once the device goes up.
+ if (dev)
+ ndev = dev_get_by_name(dev);
+ if (!ndev) {
+ printk(KERN_ERR "netconsole: network device %s does not exist, aborting.\n", dev);
+ return -1;
+ }
+ if (!ndev->poll_controller) {
+ printk(KERN_ERR "netconsole: %s's network driver does not implement netlogging yet, aborting.\n", dev);
+ return -1;
+ }
+ in_dev = in_dev_get(ndev);
+ if (!in_dev) {
+ printk(KERN_ERR "netconsole: network device %s is not an IP protocol device, aborting.\n", dev);
+ return -1;
+ }
+ source_ip = ntohl(in_dev->ifa_list->ifa_local);
+ if (!source_ip) {
+ printk(KERN_ERR "netconsole: network device %s has no local address, aborting.\n", dev);
+ return -1;
+ }
+#define IP(x) ((char *)&source_ip)[x]
+ printk(KERN_INFO "netconsole: using source IP %i.%i.%i.%i\n",
+ IP(3), IP(2), IP(1), IP(0));
+#undef IP
+ source_ip = htonl(source_ip);
+ if (!target_ip) {
+ printk(KERN_ERR "netconsole: target_ip parameter not specified, aborting.\n");
+ return -1;
+ }
+#define IP(x) ((char *)&target_ip)[x]
+ printk(KERN_INFO "netconsole: using target IP %i.%i.%i.%i\n",
+ IP(3), IP(2), IP(1), IP(0));
+#undef IP
+ target_ip = htonl(target_ip);
+ if (!source_port) {
+ printk(KERN_ERR "netconsole: source_port parameter not specified, aborting.\n");
+ return -1;
+ }
+ printk(KERN_INFO "netconsole: using source UDP port: %i\n", source_port);
+ source_port = htons(source_port);
+ if (!target_port) {
+ printk(KERN_ERR "netconsole: target_port parameter not specified, aborting.\n");
+ return -1;
+ }
+ printk(KERN_INFO "netconsole: using target UDP port: %i\n", target_port);
+ target_port = htons(target_port);
+
+ daddr[0] = target_eth_byte0;
+ daddr[1] = target_eth_byte1;
+ daddr[2] = target_eth_byte2;
+ daddr[3] = target_eth_byte3;
+ daddr[4] = target_eth_byte4;
+ daddr[5] = target_eth_byte5;
+
+ if ((daddr[0] & daddr[1] & daddr[2] & daddr[3] & daddr[4] & daddr[5]) == 255)
+ printk(KERN_INFO "netconsole: using broadcast ethernet frames to send packets.\n");
+ else
+ printk(KERN_INFO "netconsole: using target ethernet address %02x:%02x:%02x:%02x:%02x:%02x.\n", daddr[0], daddr[1], daddr[2], daddr[3], daddr[4], daddr[5]);
+
+ netconsole_dev = ndev;
+#define STARTUP_MSG "[...network console startup...]\n"
+ write_netconsole_msg(NULL, STARTUP_MSG, strlen(STARTUP_MSG));
+
+ register_console(&netconsole);
+ printk(KERN_INFO "netconsole: network logging started up successfully!\n");
+ return 0;
+}
+
+static void cleanup_netconsole(void)
+{
+ printk(KERN_INFO "netconsole: network logging shut down.\n");
+ unregister_console(&netconsole);
+
+#define SHUTDOWN_MSG "[...network console shutdown...]\n"
+ write_netconsole_msg(NULL, SHUTDOWN_MSG, strlen(SHUTDOWN_MSG));
+ netconsole_dev = NULL;
+}
+
+module_init(init_netconsole);
+module_exit(cleanup_netconsole);
+
+int dummy = MAX_SKB_SIZE;
--- linux/linux/drivers/net/Makefile.orig Wed Sep 26 21:14:58 2001
+++ linux/linux/drivers/net/Makefile Wed Sep 26 21:15:15 2001
@@ -216,6 +216,8 @@
obj-y += ../acorn/net/acorn-net.o
endif

+obj-$(CONFIG_NETCONSOLE) += netconsole.o
+
#
# HIPPI adapters
#
--- linux/linux/drivers/net/Config.in.orig Wed Sep 26 21:14:58 2001
+++ linux/linux/drivers/net/Config.in Wed Sep 26 21:15:15 2001
@@ -248,6 +248,8 @@
dep_tristate ' SysKonnect FDDI PCI support' CONFIG_SKFP $CONFIG_PCI
fi

+dep_tristate 'Network logging support' CONFIG_NETCONSOLE
+
if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
if [ "$CONFIG_INET" = "y" ]; then
bool 'HIPPI driver support (EXPERIMENTAL)' CONFIG_HIPPI
--- linux/linux/Documentation/networking/netlogging.txt.orig Wed Sep 26 21:15:15 2001
+++ linux/linux/Documentation/networking/netlogging.txt Wed Sep 26 21:15:15 2001
@@ -0,0 +1,46 @@
+
+started by Ingo Molnar <mingo@redhat.com>, 2001.09.17
+
+the network console can be configured over the module interface,
+by specifying the dev=, target_ip=, source_port=, target_port=
+module parameters. Sample use:
+
+insmod netconsole dev=eth0 target_ip=0x0a000701 \
+ source_port=6666 target_port=6666
+
+IP addresses must be specified in hexadecimal numbers. The
+above example uses target host IP 10.0.7.1.
+
+if the module is loaded then all kernel messages are sent to the
+target host via UDP packets. The remote host should run the
+client-side 'netconsole' daemon to display & log the messages.
+
+WARNING: the default setting uses the broadcast ethernet address
+to send packets, which can cause increased load on other systems
+on the same ethernet segment. If this is not desired, then the
+target ethernet address can be specified via the target_eth_byte0,
+target_eth_byte1 ... byte5 module parameters. Eg.:
+
+insmod netconsole dev=eth0 target_ip=0x0a000701 \
+ source_port=6666 target_port=6666 \
+ target_eth_byte0=0x00 \
+ target_eth_byte1=0x02\
+ target_eth_byte2=0xA5 \
+ target_eth_byte3=0x13 \
+ target_eth_byte4=0x51 \
+ target_eth_byte5=0xDD
+
+will instruct the netconsole code to use target ethernet address
+00:02:A5:13:51:DD.
+
+NOTE: the network device (eth0 in the above case) can run any kind
+of other network traffic, netconsole is not intrusive. Netconsole
+might cause slight delays in other traffic if the volume of kernel
+messages is high, but should have no other impact.
+
+netconsole was designed to be as instantaneous as possible, to
+enable the logging of even the most critical kernel bugs. It works
+from IRQ contexts as well, and does not enable interrupts while
+sending packets. Due to these unique needs, configuration can not
+be more automatic, and some fundamental limitations will remain:
+only IP networks, UDP packets and ethernet devices are supported.
--- linux/linux/Documentation/Configure.help.orig Wed Sep 26 21:14:55 2001
+++ linux/linux/Documentation/Configure.help Wed Sep 26 21:15:15 2001
@@ -9997,6 +9997,11 @@
say M here and read Documentation/modules.txt. This is recommended.
The module will be called skfp.o.

+Network logging support
+CONFIG_NETCONSOLE
+ If you want to log kernel messages over the network, then say
+ "M" here. See Documentation/networking/netlogging.txt for details.
+
HIgh Performance Parallel Interface support (EXPERIMENTAL)
CONFIG_HIPPI
HIgh Performance Parallel Interface (HIPPI) is a 800Mbit/sec and
\
 
 \ /
  Last update: 2005-03-22 13:03    [W:0.132 / U:0.440 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site