lkml.org 
[lkml]   [2002]   [Jan]   [6]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH]: 2.5.1pre9 change several if (x) BUG to BUG_ON(x)
Date


Yes, only that, even a trained monkey is able to make this patch, but i think
is a good way to make people confortable with BUG_ON
I have changed all the BUG to BUG_ON possible except the directories arch
include, net, fs and drivers.
If you apply this patch i'll change that directories at once.
I have no really change anything so it's totally sane apply the patch.


diff -u --recursive linux/ipc/msg.c v2.5.1/linux/ipc/msg.c
--- linux/ipc/msg.c Fri Sep 14 23:17:00 2001
+++ v2.5.1/linux/ipc/msg.c Sat Jan 5 17:23:59 2002
@@ -317,8 +317,7 @@
ret = -EEXIST;
} else {
msq = msg_lock(id);
- if(msq==NULL)
- BUG();
+ BUG_ON(msq==NULL);
if (ipcperms(&msq->q_perm, msgflg))
ret = -EACCES;
else
@@ -833,8 +832,7 @@
}
err = PTR_ERR(msg);
if(err == -EAGAIN) {
- if(msqid==-1)
- BUG();
+ BUG_ON(msqid==-1);
list_del(&msr_d.r_list);
if (signal_pending(current))
err=-EINTR;
diff -u --recursive linux/ipc/sem.c v2.5.1/linux/ipc/sem.c
--- linux/ipc/sem.c Sun Sep 30 21:26:42 2001
+++ v2.5.1/linux/ipc/sem.c Sat Jan 5 17:24:52 2002
@@ -169,8 +169,7 @@
err = -EEXIST;
} else {
sma = sem_lock(id);
- if(sma==NULL)
- BUG();
+ BUG_ON(sma==NULL);
if (nsems > sma->sem_nsems)
err = -EINVAL;
else if (ipcperms(&sma->sem_perm, semflg))
@@ -935,8 +934,7 @@

tmp = sem_lock(semid);
if(tmp==NULL) {
- if(queue.prev != NULL)
- BUG();
+ BUG_ON(queue.prev != NULL);
current->semsleeping = NULL;
error = -EIDRM;
goto out_free;
@@ -1004,8 +1002,7 @@
current->semsleeping = NULL;

if (q->prev) {
- if(sma==NULL)
- BUG();
+ BUG_ON(sma==NULL);
remove_from_queue(q->sma,q);
}
if(sma!=NULL)
diff -u --recursive linux/ipc/shm.c v2.5.1/linux/ipc/shm.c
--- linux/ipc/shm.c Sat Jan 5 17:05:11 2002
+++ v2.5.1/linux/ipc/shm.c Sat Jan 5 17:26:10 2002
@@ -99,8 +99,7 @@
static inline void shm_inc (int id) {
struct shmid_kernel *shp;

- if(!(shp = shm_lock(id)))
- BUG();
+ BUG_ON(!(shp = shm_lock(id)));
shp->shm_atim = CURRENT_TIME;
shp->shm_lprid = current->pid;
shp->shm_nattch++;
@@ -143,8 +142,7 @@

down (&shm_ids.sem);
/* remove from the list of attaches of the shm segment */
- if(!(shp = shm_lock(id)))
- BUG();
+ BUG_ON(!(shp = shm_lock(id)));
shp->shm_lprid = current->pid;
shp->shm_dtim = CURRENT_TIME;
shp->shm_nattch--;
@@ -242,8 +240,7 @@
err = -EEXIST;
} else {
shp = shm_lock(id);
- if(shp==NULL)
- BUG();
+ BUG_ON(shp==NULL);
if (shp->shm_segsz < size)
err = -EINVAL;
else if (ipcperms(&shp->shm_perm, shmflg))
@@ -648,8 +645,7 @@
up_write(&current->mm->mmap_sem);

down (&shm_ids.sem);
- if(!(shp = shm_lock(shmid)))
- BUG();
+ BUG_ON(!(shp = shm_lock(shmid)));
shp->shm_nattch--;
if(shp->shm_nattch == 0 &&
shp->shm_flags & SHM_DEST)
diff -u --recursive linux/ipc/util.c v2.5.1/linux/ipc/util.c
--- linux/ipc/util.c Mon Aug 13 02:37:53 2001
+++ v2.5.1/linux/ipc/util.c Sat Jan 5 17:26:48 2002
@@ -185,12 +185,10 @@
{
struct kern_ipc_perm* p;
int lid = id % SEQ_MULTIPLIER;
- if(lid >= ids->size)
- BUG();
+ BUG_ON(lid >= ids->size);
p = ids->entries[lid].p;
ids->entries[lid].p = NULL;
- if(p==NULL)
- BUG();
+ BUG_ON(p==NULL);
ids->in_use--;

if (lid == ids->max_id) {
diff -u --recursive linux/kernel/device.c v2.5.1/linux/kernel/device.c
--- linux/kernel/device.c Sat Jan 5 17:05:11 2002
+++ v2.5.1/linux/kernel/device.c Sat Jan 5 17:15:48 2002
@@ -458,9 +458,9 @@
iobus->parent = NULL;
unlock_iobus(iobus);

- if (!list_empty(&iobus->devices) ||
- !list_empty(&iobus->children))
- BUG();
+ BUG_ON(!list_empty(&iobus->devices) ||
+ !list_empty(&iobus->children));
+

/* disavow parent's knowledge */
if (parent) {
diff -u --recursive linux/kernel/exit.c v2.5.1/linux/kernel/exit.c
--- linux/kernel/exit.c Sat Jan 5 17:05:11 2002
+++ v2.5.1/linux/kernel/exit.c Sat Jan 5 17:17:57 2002
@@ -318,7 +318,7 @@
mm_release();
if (mm) {
atomic_inc(&mm->mm_count);
- if (mm != tsk->active_mm) BUG();
+ BUG_ON(mm != tsk->active_mm);
/* more a memory barrier than a real lock */
task_lock(tsk);
tsk->mm = NULL;
diff -u --recursive linux/kernel/fork.c v2.5.1/linux/kernel/fork.c
--- linux/kernel/fork.c Sat Jan 5 17:05:11 2002
+++ v2.5.1/linux/kernel/fork.c Sat Jan 5 17:18:22 2002
@@ -249,7 +249,7 @@
*/
inline void __mmdrop(struct mm_struct *mm)
{
- if (mm == &init_mm) BUG();
+ BUG_ON(mm == &init_mm);
pgd_free(mm->pgd);
destroy_context(mm);
free_mm(mm);
diff -u --recursive linux/kernel/pm.c v2.5.1/linux/kernel/pm.c
--- linux/kernel/pm.c Sat Jan 5 17:05:11 2002
+++ v2.5.1/linux/kernel/pm.c Sat Jan 5 17:19:22 2002
@@ -156,8 +156,7 @@
int status = 0;
int prev_state, next_state;

- if (in_interrupt())
- BUG();
+ BUG_ON (in_interrupt());

switch (rqst) {
case PM_SUSPEND:
diff -u --recursive linux/kernel/printk.c v2.5.1/linux/kernel/printk.c
--- linux/kernel/printk.c Sun Nov 11 19:20:21 2001
+++ v2.5.1/linux/kernel/printk.c Sat Jan 5 17:20:09 2002
@@ -330,8 +330,7 @@
unsigned long cur_index, start_print;
static int msg_level = -1;

- if (((long)(start - end)) > 0)
- BUG();
+ BUG_ON(((long)(start - end)) > 0);

cur_index = start;
start_print = start;
@@ -468,8 +467,7 @@
*/
void acquire_console_sem(void)
{
- if (in_interrupt())
- BUG();
+ BUG_ON(in_interrupt());
down(&console_sem);
console_may_schedule = 1;
}
diff -u --recursive linux/kernel/sched.c v2.5.1/linux/kernel/sched.c
--- linux/kernel/sched.c Sat Jan 5 17:05:11 2002
+++ v2.5.1/linux/kernel/sched.c Sat Jan 5 17:21:33 2002
@@ -565,7 +565,7 @@

spin_lock_prefetch(&runqueue_lock);

- if (!current->active_mm) BUG();
+ BUG_ON (!current->active_mm);
need_resched_back:
prev = current;
this_cpu = prev->processor;
@@ -682,12 +682,12 @@
struct mm_struct *mm = next->mm;
struct mm_struct *oldmm = prev->active_mm;
if (!mm) {
- if (next->active_mm) BUG();
+ BUG_ON(next->active_mm);
next->active_mm = oldmm;
atomic_inc(&oldmm->mm_count);
enter_lazy_tlb(oldmm, next, this_cpu);
} else {
- if (next->active_mm != mm) BUG();
+ BUG_ON (next->active_mm != mm);
switch_mm(oldmm, mm, next, this_cpu);
}

diff -u --recursive linux/kernel/softirq.c v2.5.1/linux/kernel/softirq.c
--- linux/kernel/softirq.c Sat Dec 1 00:53:28 2001
+++ v2.5.1/linux/kernel/softirq.c Sat Jan 5 17:22:50 2002
@@ -190,8 +190,7 @@

if (tasklet_trylock(t)) {
if (!atomic_read(&t->count)) {
- if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
- BUG();
+ BUG_ON(!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state));
t->func(t->data);
tasklet_unlock(t);
continue;
@@ -224,8 +223,7 @@

if (tasklet_trylock(t)) {
if (!atomic_read(&t->count)) {
- if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
- BUG();
+ BUG_ON (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state));
t->func(t->data);
tasklet_unlock(t);
continue;
diff -u --recursive linux/mm/bootmem.c v2.5.1/linux/mm/bootmem.c
--- linux/mm/bootmem.c Tue Nov 27 18:23:27 2001
+++ v2.5.1/linux/mm/bootmem.c Sun Jan 6 03:42:57 2002
@@ -82,18 +82,13 @@
PAGE_SIZE-1)/PAGE_SIZE;
unsigned long end = (addr + size + PAGE_SIZE-1)/PAGE_SIZE;

- if (!size) BUG();
+ BUG_ON (!size);

- if (sidx < 0)
- BUG();
- if (eidx < 0)
- BUG();
- if (sidx >= eidx)
- BUG();
- if ((addr >> PAGE_SHIFT) >= bdata->node_low_pfn)
- BUG();
- if (end > bdata->node_low_pfn)
- BUG();
+ BUG_ON(sidx < 0);
+ BUG_ON (eidx < 0);
+ BUG_ON (sidx >= eidx);
+ BUG_ON ((addr >> PAGE_SHIFT) >= bdata->node_low_pfn);
+ BUG_ON (end > bdata->node_low_pfn);
for (i = sidx; i < eidx; i++)
if (test_and_set_bit(i, bdata->node_bootmem_map))
printk("hm, page %08lx reserved twice.\n", i*PAGE_SIZE);
@@ -111,9 +106,8 @@
unsigned long eidx = (addr + size - bdata->node_boot_start)/PAGE_SIZE;
unsigned long end = (addr + size)/PAGE_SIZE;

- if (!size) BUG();
- if (end > bdata->node_low_pfn)
- BUG();
+ BUG_ON (!size);
+ BUG_ON (end > bdata->node_low_pfn);

/*
* Round up the beginning of the address.
@@ -122,8 +116,7 @@
sidx = start - (bdata->node_boot_start/PAGE_SIZE);

for (i = sidx; i < eidx; i++) {
- if (!test_and_clear_bit(i, bdata->node_bootmem_map))
- BUG();
+ BUG_ON (!test_and_clear_bit(i, bdata->node_bootmem_map));
}
}

@@ -150,10 +143,9 @@
unsigned long eidx = bdata->node_low_pfn - (bdata->node_boot_start >>
PAGE_SHIFT);

- if (!size) BUG();
+ BUG_ON(!size);

- if (align & (align-1))
- BUG();
+ BUG_ON (align & (align-1));

/*
* We try to allocate bootmem pages above 'goal'
@@ -190,8 +182,7 @@
}
return NULL;
found:
- if (start >= eidx)
- BUG();
+ BUG_ON(start >= eidx);

/*
* Is the next page of the previous allocation-end the start
@@ -201,8 +192,8 @@
if (align <= PAGE_SIZE
&& bdata->last_offset && bdata->last_pos+1 == start) {
offset = (bdata->last_offset+align-1) & ~(align-1);
- if (offset > PAGE_SIZE)
- BUG();
+ BUG_ON(offset > PAGE_SIZE);
+
remaining_size = PAGE_SIZE-offset;
if (size < remaining_size) {
areasize = 0;
@@ -228,8 +219,8 @@
* Reserve the area now:
*/
for (i = start; i < start+areasize; i++)
- if (test_and_set_bit(i, bdata->node_bootmem_map))
- BUG();
+ BUG_ON(test_and_set_bit(i, bdata->node_bootmem_map));
+
memset(ret, 0, size);
return ret;
}
@@ -241,7 +232,7 @@
unsigned long i, count, total = 0;
unsigned long idx;

- if (!bdata->node_bootmem_map) BUG();
+ BUG_ON(!bdata->node_bootmem_map);

count = 0;
idx = bdata->node_low_pfn - (bdata->node_boot_start >> PAGE_SHIFT);
diff -u --recursive linux/mm/filemap.c v2.5.1/linux/mm/filemap.c
--- linux/mm/filemap.c Sat Jan 5 17:05:12 2002
+++ v2.5.1/linux/mm/filemap.c Sun Jan 6 03:54:24 2002
@@ -114,7 +114,7 @@
*/
void __remove_inode_page(struct page *page)
{
- if (PageDirty(page)) BUG();
+ BUG_ON(PageDirty(page));
remove_page_from_inode_queue(page);
remove_page_from_hash_queue(page);
}
@@ -647,8 +647,7 @@
*/
void add_to_page_cache_locked(struct page * page, struct address_space *mapping, unsigned long index)
{
- if (!PageLocked(page))
- BUG();
+ BUG_ON(!PageLocked(page));

page->index = index;
page_cache_get(page);
@@ -792,8 +791,8 @@
{
clear_bit(PG_launder, &(page)->flags);
smp_mb__before_clear_bit();
- if (!test_and_clear_bit(PG_locked, &(page)->flags))
- BUG();
+ BUG_ON (!test_and_clear_bit(PG_locked, &(page)->flags));
+
smp_mb__after_clear_bit();
if (waitqueue_active(&(page)->wait))
wake_up(&(page)->wait);
@@ -2095,8 +2094,8 @@

dir = pgd_offset(vma->vm_mm, address);
flush_cache_range(vma->vm_mm, end - size, end);
- if (address >= end)
- BUG();
+ BUG_ON(address >= end);
+
do {
error |= filemap_sync_pmd_range(dir, address, end - address, vma, flags);
address = (address + PGDIR_SIZE) & PGDIR_MASK;
diff -u --recursive linux/mm/highmem.c v2.5.1/linux/mm/highmem.c
--- linux/mm/highmem.c Sat Jan 5 17:05:12 2002
+++ v2.5.1/linux/mm/highmem.c Sun Jan 6 03:56:19 2002
@@ -72,8 +72,7 @@
pkmap_count[i] = 0;

/* sanity check */
- if (pte_none(pkmap_page_table[i]))
- BUG();
+ BUG_ON(pte_none(pkmap_page_table[i]));

/*
* Don't need an atomic fetch-and-clear op here;
@@ -154,8 +153,8 @@
if (!vaddr)
vaddr = map_new_virtual(page);
pkmap_count[PKMAP_NR(vaddr)]++;
- if (pkmap_count[PKMAP_NR(vaddr)] < 2)
- BUG();
+ BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2);
+
spin_unlock(&kmap_lock);
return (void*) vaddr;
}
@@ -168,8 +167,8 @@

spin_lock(&kmap_lock);
vaddr = (unsigned long) page->virtual;
- if (!vaddr)
- BUG();
+ BUG_ON(!vaddr);
+
nr = PKMAP_NR(vaddr);

/*
@@ -212,8 +211,8 @@
return 0;

page_pool = mempool_create(POOL_SIZE, page_pool_alloc, page_pool_free, NULL);
- if (!page_pool)
- BUG();
+ BUG_ON(!page_pool);
+
printk("highmem bounce pool size: %d pages\n", POOL_SIZE);

return 0;
@@ -255,8 +254,8 @@
return 0;

isa_page_pool = mempool_create(ISA_POOL_SIZE, page_pool_alloc, page_pool_free, (void *) __GFP_DMA);
- if (!isa_page_pool)
- BUG();
+ BUG_ON(!isa_page_pool);
+

printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE);
return 0;
diff -u --recursive linux/mm/memory.c v2.5.1/linux/mm/memory.c
--- linux/mm/memory.c Sat Jan 5 17:05:12 2002
+++ v2.5.1/linux/mm/memory.c Sun Jan 6 03:58:53 2002
@@ -371,8 +371,8 @@
* even if kswapd happened to be looking at this
* process we _want_ it to get stuck.
*/
- if (address >= end)
- BUG();
+ BUG_ON (address >= end);
+
spin_lock(&mm->page_table_lock);
flush_cache_range(mm, address, end);
tlb = tlb_gather_mmu(mm);
@@ -520,7 +520,8 @@

pgcount = (va + len + PAGE_SIZE - 1)/PAGE_SIZE - va/PAGE_SIZE;
/* mapping 0 bytes is not permitted */
- if (!pgcount) BUG();
+ BUG_ON (!pgcount);
+
err = expand_kiobuf(iobuf, pgcount);
if (err)
return err;
@@ -767,8 +768,7 @@

dir = pgd_offset(mm, address);
flush_cache_range(mm, beg, end);
- if (address >= end)
- BUG();
+ BUG_ON (address >= end);

spin_lock(&mm->page_table_lock);
do {
@@ -849,8 +849,7 @@
phys_addr -= from;
dir = pgd_offset(mm, from);
flush_cache_range(mm, beg, end);
- if (from >= end)
- BUG();
+ BUG_ON (from >= end);

spin_lock(&mm->page_table_lock);
do {
@@ -1434,10 +1433,10 @@

vma = find_vma(current->mm, addr);
write = (vma->vm_flags & VM_WRITE) != 0;
- if (addr >= end)
- BUG();
- if (end > vma->vm_end)
- BUG();
+ BUG_ON (addr >= end);
+
+ BUG_ON (end > vma->vm_end);
+
len = (end+PAGE_SIZE-1)/PAGE_SIZE-addr/PAGE_SIZE;
ret = get_user_pages(current, current->mm, addr,
len, write, 0, NULL, NULL);
diff -u --recursive linux/mm/mempool.c v2.5.1/linux/mm/mempool.c
--- linux/mm/mempool.c Sat Jan 5 17:05:12 2002
+++ v2.5.1/linux/mm/mempool.c Sun Jan 6 03:59:53 2002
@@ -98,8 +98,7 @@
unsigned long flags;
struct list_head *tmp;

- if (new_min_nr <= 0)
- BUG();
+ BUG_ON (new_min_nr <= 0);

spin_lock_irqsave(&pool->lock, flags);
if (new_min_nr < pool->min_nr) {
@@ -109,8 +108,8 @@
*/
while (pool->curr_nr > pool->min_nr) {
tmp = pool->elements.next;
- if (tmp == &pool->elements)
- BUG();
+ BUG_ON (tmp == &pool->elements);
+
list_del(tmp);
element = tmp;
pool->curr_nr--;
@@ -164,8 +163,8 @@
pool->free(element, pool->pool_data);
pool->curr_nr--;
}
- if (pool->curr_nr)
- BUG();
+ BUG_ON (pool->curr_nr);
+
kfree(pool);
}

diff -u --recursive linux/mm/mmap.c v2.5.1/linux/mm/mmap.c
--- linux/mm/mmap.c Sun Nov 4 19:17:20 2001
+++ v2.5.1/linux/mm/mmap.c Sun Jan 6 04:01:49 2002
@@ -235,8 +235,8 @@
i = browse_rb(mm->mm_rb.rb_node);
if (i != mm->map_count)
printk("map_count %d rb %d\n", mm->map_count, i), bug = 1;
- if (bug)
- BUG();
+ BUG_ON (bug);
+
}
#else
#define validate_mm(mm) do { } while (0)
@@ -705,8 +705,8 @@
*pprev = NULL;
if (rb_prev)
*pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
- if ((rb_prev ? (*pprev)->vm_next : mm->mmap) != vma)
- BUG();
+ BUG_ON((rb_prev ? (*pprev)->vm_next : mm->mmap) != vma);
+
return vma;
}
}
@@ -1136,8 +1136,7 @@
flush_tlb_mm(mm);

/* This is just debugging */
- if (mm->map_count)
- BUG();
+ BUG_ON (mm->map_count);

clear_page_tables(mm, FIRST_USER_PGD_NR, USER_PTRS_PER_PGD);
}
@@ -1152,8 +1151,8 @@
rb_node_t ** rb_link, * rb_parent;

__vma = find_vma_prepare(mm, vma->vm_start, &prev, &rb_link, &rb_parent);
- if (__vma && __vma->vm_start < vma->vm_end)
- BUG();
+ BUG_ON(__vma && __vma->vm_start < vma->vm_end);
+
__vma_link(mm, vma, prev, rb_link, rb_parent);
mm->map_count++;
validate_mm(mm);
@@ -1165,8 +1164,8 @@
rb_node_t ** rb_link, * rb_parent;

__vma = find_vma_prepare(mm, vma->vm_start, &prev, &rb_link, &rb_parent);
- if (__vma && __vma->vm_start < vma->vm_end)
- BUG();
+ BUG_ON (__vma && __vma->vm_start < vma->vm_end);
+
vma_link(mm, vma, prev, rb_link, rb_parent);
validate_mm(mm);
}
diff -u --recursive linux/mm/mprotect.c v2.5.1/linux/mm/mprotect.c
--- linux/mm/mprotect.c Sat Jan 5 17:05:12 2002
+++ v2.5.1/linux/mm/mprotect.c Sun Jan 6 04:02:11 2002
@@ -79,8 +79,8 @@

dir = pgd_offset(current->mm, start);
flush_cache_range(current->mm, beg, end);
- if (start >= end)
- BUG();
+ BUG_ON (start >= end);
+
spin_lock(&current->mm->page_table_lock);
do {
change_pmd_range(dir, start, end - start, newprot);
diff -u --recursive linux/mm/mremap.c v2.5.1/linux/mm/mremap.c
--- linux/mm/mremap.c Sat Jan 5 17:05:12 2002
+++ v2.5.1/linux/mm/mremap.c Sun Jan 6 04:02:31 2002
@@ -140,8 +140,8 @@
prev->vm_end = new_addr + new_len;
spin_unlock(&mm->page_table_lock);
new_vma = prev;
- if (next != prev->vm_next)
- BUG();
+ BUG_ON (next != prev->vm_next);
+
if (prev->vm_end == next->vm_start && can_vma_merge(next, prev->vm_flags)) {
spin_lock(&mm->page_table_lock);
prev->vm_end = next->vm_end;
diff -u --recursive linux/mm/page_alloc.c v2.5.1/linux/mm/page_alloc.c
--- linux/mm/page_alloc.c Tue Nov 20 01:35:40 2001
+++ v2.5.1/linux/mm/page_alloc.c Sun Jan 6 04:08:14 2002
@@ -70,20 +70,13 @@
struct page *base;
zone_t *zone;

- if (page->buffers)
- BUG();
- if (page->mapping)
- BUG();
- if (!VALID_PAGE(page))
- BUG();
- if (PageSwapCache(page))
- BUG();
- if (PageLocked(page))
- BUG();
- if (PageLRU(page))
- BUG();
- if (PageActive(page))
- BUG();
+ BUG_ON(page->buffers);
+ BUG_ON(page->mapping);
+ BUG_ON(!VALID_PAGE(page));
+ BUG_ON (PageSwapCache(page));
+ BUG_ON (PageLocked(page));
+ BUG_ON (PageLRU(page));
+ BUG_ON(PageActive(page));
page->flags &= ~((1<<PG_referenced) | (1<<PG_dirty));

if (current->flags & PF_FREE_PAGES)
@@ -95,8 +88,8 @@
mask = (~0UL) << order;
base = zone->zone_mem_map;
page_idx = page - base;
- if (page_idx & ~mask)
- BUG();
+ BUG_ON(page_idx & ~mask);
+
index = page_idx >> (1 + order);

area = zone->free_area + order;
@@ -108,8 +101,8 @@
while (mask + (1 << (MAX_ORDER-1))) {
struct page *buddy1, *buddy2;

- if (area >= zone->free_area + MAX_ORDER)
- BUG();
+ BUG_ON(area >= zone->free_area + MAX_ORDER);
+
if (!__test_and_change_bit(index, area->map))
/*
* the buddy page is still allocated.
@@ -120,10 +113,9 @@
*/
buddy1 = base + (page_idx ^ -mask);
buddy2 = base + page_idx;
- if (BAD_RANGE(zone,buddy1))
- BUG();
- if (BAD_RANGE(zone,buddy2))
- BUG();
+ BUG_ON (BAD_RANGE(zone,buddy1));
+
+ BUG_ON (BAD_RANGE(zone,buddy2));

memlist_del(&buddy1->list);
mask <<= 1;
@@ -156,8 +148,7 @@
unsigned long size = 1 << high;

while (high > low) {
- if (BAD_RANGE(zone,page))
- BUG();
+ BUG_ON (BAD_RANGE(zone,page));
area--;
high--;
size >>= 1;
@@ -166,8 +157,7 @@
index += size;
page += size;
}
- if (BAD_RANGE(zone,page))
- BUG();
+ BUG_ON (BAD_RANGE(zone,page));
return page;
}

@@ -189,8 +179,8 @@
unsigned int index;

page = memlist_entry(curr, struct page, list);
- if (BAD_RANGE(zone,page))
- BUG();
+ BUG_ON (BAD_RANGE(zone,page));
+
memlist_del(curr);
index = page - zone->zone_mem_map;
if (curr_order != MAX_ORDER-1)
@@ -201,12 +191,9 @@
spin_unlock_irqrestore(&zone->lock, flags);

set_page_count(page, 1);
- if (BAD_RANGE(zone,page))
- BUG();
- if (PageLRU(page))
- BUG();
- if (PageActive(page))
- BUG();
+ BUG_ON (BAD_RANGE(zone,page));
+ BUG_ON (PageLRU(page));
+ BUG_ON (PageActive(page));
return page;
}
curr_order++;
@@ -233,8 +220,7 @@

if (!(gfp_mask & __GFP_WAIT))
goto out;
- if (in_interrupt())
- BUG();
+ BUG_ON (in_interrupt());

current->allocation_order = order;
current->flags |= PF_MEMALLOC | PF_FREE_PAGES;
@@ -261,22 +247,14 @@
set_page_count(tmp, 1);
page = tmp;

- if (page->buffers)
- BUG();
- if (page->mapping)
- BUG();
- if (!VALID_PAGE(page))
- BUG();
- if (PageSwapCache(page))
- BUG();
- if (PageLocked(page))
- BUG();
- if (PageLRU(page))
- BUG();
- if (PageActive(page))
- BUG();
- if (PageDirty(page))
- BUG();
+ BUG_ON (page->buffers);
+ BUG_ON (page->mapping);
+ BUG_ON (!VALID_PAGE(page));
+ BUG_ON (PageSwapCache(page));
+ BUG_ON (PageLocked(page));
+ BUG_ON (PageLRU(page));
+ BUG_ON (PageActive(page));
+ BUG_ON (PageDirty(page));

break;
}
@@ -289,8 +267,7 @@
list_del(entry);
tmp = list_entry(entry, struct page, list);
__free_pages_ok(tmp, tmp->index);
- if (!nr_pages--)
- BUG();
+ BUG_ON (!nr_pages--);
}
current->nr_local_pages = 0;
}
@@ -644,8 +621,8 @@
unsigned long totalpages, offset, realtotalpages;
const unsigned long zone_required_alignment = 1UL << (MAX_ORDER-1);

- if (zone_start_paddr & ~PAGE_MASK)
- BUG();
+ BUG_ON(zone_start_paddr & ~PAGE_MASK);
+

totalpages = 0;
for (i = 0; i < MAX_NR_ZONES; i++) {
diff -u --recursive linux/mm/shmem.c v2.5.1/linux/mm/shmem.c
--- linux/mm/shmem.c Wed Nov 21 18:57:57 2001
+++ v2.5.1/linux/mm/shmem.c Sun Jan 6 04:10:13 2002
@@ -291,8 +291,7 @@
len = max+1;
} else {
max -= ENTRIES_PER_PAGE*ENTRIES_PER_PAGE/2+SHMEM_NR_DIRECT;
- if (max >= ENTRIES_PER_PAGE*ENTRIES_PER_PAGE*ENTRIES_PER_PAGE/2)
- BUG();
+ BUG_ON (max >= ENTRIES_PER_PAGE*ENTRIES_PER_PAGE*ENTRIES_PER_PAGE/2);

baseidx = max & ~(ENTRIES_PER_PAGE*ENTRIES_PER_PAGE-1);
base = (swp_entry_t ***) info->i_indirect + ENTRIES_PER_PAGE/2 + baseidx/ENTRIES_PER_PAGE/ENTRIES_PER_PAGE ;
@@ -426,8 +425,7 @@
unsigned long index;
struct inode *inode;

- if (!PageLocked(page))
- BUG();
+ BUG_ON (!PageLocked(page));
if (!PageLaunder(page))
return fail_writepage(page);

@@ -444,11 +442,10 @@

spin_lock(&info->lock);
entry = shmem_swp_entry(info, index, 0);
- if (IS_ERR(entry)) /* this had been allocated on page allocation */
- BUG();
+ BUG_ON (IS_ERR(entry)); /* this had been allocated on page allocation */
+
shmem_recalc_inode(inode);
- if (entry->val)
- BUG();
+ BUG_ON (entry->val);

/* Remove it from the page cache */
remove_inode_page(page);
diff -u --recursive linux/mm/slab.c v2.5.1/linux/mm/slab.c
--- linux/mm/slab.c Sat Jan 5 17:05:12 2002
+++ v2.5.1/linux/mm/slab.c Sun Jan 6 04:20:04 2002
@@ -423,8 +423,7 @@

kmem_cache_estimate(0, cache_cache.objsize, 0,
&left_over, &cache_cache.num);
- if (!cache_cache.num)
- BUG();
+ BUG_ON (!cache_cache.num);

cache_cache.colour = left_over/cache_cache.colour_off;
cache_cache.colour_next = 0;
@@ -465,8 +464,8 @@
sprintf(name, "size-%Zd(DMA)",sizes->cs_size);
sizes->cs_dmacachep = kmem_cache_create(name, sizes->cs_size, 0,
SLAB_CACHE_DMA|SLAB_HWCACHE_ALIGN, NULL, NULL);
- if (!sizes->cs_dmacachep)
- BUG();
+ BUG_ON (!sizes->cs_dmacachep);
+
sizes++;
} while (sizes->cs_size);
}
@@ -565,11 +564,11 @@
void* objp = slabp->s_mem+cachep->objsize*i;
#if DEBUG
if (cachep->flags & SLAB_RED_ZONE) {
- if (*((unsigned long*)(objp)) != RED_MAGIC1)
- BUG();
- if (*((unsigned long*)(objp + cachep->objsize
- -BYTES_PER_WORD)) != RED_MAGIC1)
- BUG();
+ BUG_ON(*((unsigned long*)(objp)) != RED_MAGIC1);
+
+ BUG_ON(*((unsigned long*)(objp + cachep->objsize
+ -BYTES_PER_WORD)) != RED_MAGIC1);
+
objp += BYTES_PER_WORD;
}
#endif
@@ -579,9 +578,9 @@
if (cachep->flags & SLAB_RED_ZONE) {
objp -= BYTES_PER_WORD;
}
- if ((cachep->flags & SLAB_POISON) &&
- kmem_check_poison_obj(cachep, objp))
- BUG();
+ BUG_ON ((cachep->flags & SLAB_POISON) &&
+ kmem_check_poison_obj(cachep, objp));
+
#endif
}
}
@@ -631,14 +630,14 @@
/*
* Sanity checks... these are all serious usage bugs.
*/
- if ((!name) ||
+ BUG_ON ((!name) ||
((strlen(name) >= CACHE_NAMELEN - 1)) ||
in_interrupt() ||
(size < BYTES_PER_WORD) ||
(size > (1<<MAX_OBJ_ORDER)*PAGE_SIZE) ||
(dtor && !ctor) ||
- (offset < 0 || offset > size))
- BUG();
+ (offset < 0 || offset > size));
+

#if DEBUG
if ((flags & SLAB_DEBUG_INITIAL) && !ctor) {
@@ -668,8 +667,8 @@
* Always checks flags, a caller might be expecting debug
* support which isn't available.
*/
- if (flags & ~CREATE_MASK)
- BUG();
+ BUG_ON (flags & ~CREATE_MASK);
+

/* Get cache's description obj. */
cachep = (kmem_cache_t *) kmem_cache_alloc(&cache_cache, SLAB_KERNEL);
@@ -813,8 +812,8 @@
kmem_cache_t *pc = list_entry(p, kmem_cache_t, next);

/* The name field is constant - no lock needed. */
- if (!strcmp(pc->name, name))
- BUG();
+ BUG_ON (!strcmp(pc->name, name));
+
}
}

@@ -864,8 +863,8 @@
func(arg);
local_irq_enable();

- if (smp_call_function(func, arg, 1, 1))
- BUG();
+ BUG_ON (smp_call_function(func, arg, 1, 1));
+
}
typedef struct ccupdate_struct_s
{
@@ -932,8 +931,7 @@

slabp = list_entry(cachep->slabs_free.prev, slab_t, list);
#if DEBUG
- if (slabp->inuse)
- BUG();
+ BUG_ON (slabp->inuse);
#endif
list_del(&slabp->list);

@@ -955,8 +953,8 @@
*/
int kmem_cache_shrink(kmem_cache_t *cachep)
{
- if (!cachep || in_interrupt() || !is_chained_kmem_cache(cachep))
- BUG();
+ BUG_ON (!cachep || in_interrupt() || !is_chained_kmem_cache(cachep));
+

return __kmem_cache_shrink(cachep);
}
@@ -978,8 +976,7 @@
*/
int kmem_cache_destroy (kmem_cache_t * cachep)
{
- if (!cachep || in_interrupt() || cachep->growing)
- BUG();
+ BUG_ON (!cachep || in_interrupt() || cachep->growing);

/* Find the cache in the chain of caches. */
down(&cache_chain_sem);
@@ -1067,11 +1064,10 @@
/* need to poison the objs */
kmem_poison_obj(cachep, objp);
if (cachep->flags & SLAB_RED_ZONE) {
- if (*((unsigned long*)(objp)) != RED_MAGIC1)
- BUG();
- if (*((unsigned long*)(objp + cachep->objsize -
- BYTES_PER_WORD)) != RED_MAGIC1)
- BUG();
+ BUG_ON (*((unsigned long*)(objp)) != RED_MAGIC1);
+
+ BUG_ON (*((unsigned long*)(objp + cachep->objsize -
+ BYTES_PER_WORD)) != RED_MAGIC1);
}
#endif
slab_bufctl(slabp)[i] = i+1;
@@ -1097,8 +1093,7 @@
/* Be lazy and only check for valid flags here,
* keeping it out of the critical path in kmem_cache_alloc().
*/
- if (flags & ~(SLAB_DMA|SLAB_LEVEL_MASK|SLAB_NO_GROW))
- BUG();
+ BUG_ON (flags & ~(SLAB_DMA|SLAB_LEVEL_MASK|SLAB_NO_GROW));
if (flags & SLAB_NO_GROW)
return 0;

@@ -1108,8 +1103,7 @@
* in kmem_cache_alloc(). If a caller is seriously mis-behaving they
* will eventually be caught here (where it matters).
*/
- if (in_interrupt() && (flags & SLAB_LEVEL_MASK) != SLAB_ATOMIC)
- BUG();
+ BUG_ON(in_interrupt() && (flags & SLAB_LEVEL_MASK) != SLAB_ATOMIC);

ctor_flags = SLAB_CTOR_CONSTRUCTOR;
local_flags = (flags & SLAB_LEVEL_MASK);
@@ -1196,15 +1190,13 @@
int i;
unsigned int objnr = (objp-slabp->s_mem)/cachep->objsize;

- if (objnr >= cachep->num)
- BUG();
- if (objp != slabp->s_mem + objnr*cachep->objsize)
- BUG();
+ BUG_ON (objnr >= cachep->num);
+
+ BUG_ON (objp != slabp->s_mem + objnr*cachep->objsize);

/* Check slab's freelist to see if this obj is there. */
for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) {
- if (i == objnr)
- BUG();
+ BUG_ON (i == objnr);
}
return 0;
}
@@ -1213,11 +1205,9 @@
static inline void kmem_cache_alloc_head(kmem_cache_t *cachep, int flags)
{
if (flags & SLAB_DMA) {
- if (!(cachep->gfpflags & GFP_DMA))
- BUG();
+ BUG_ON (!(cachep->gfpflags & GFP_DMA));
} else {
- if (cachep->gfpflags & GFP_DMA)
- BUG();
+ BUG_ON (cachep->gfpflags & GFP_DMA);
}
}

@@ -1241,16 +1231,13 @@
}
#if DEBUG
if (cachep->flags & SLAB_POISON)
- if (kmem_check_poison_obj(cachep, objp))
- BUG();
+ BUG_ON (kmem_check_poison_obj(cachep, objp));
if (cachep->flags & SLAB_RED_ZONE) {
/* Set alloc red-zone, and check old one. */
- if (xchg((unsigned long *)objp, RED_MAGIC2) !=
- RED_MAGIC1)
- BUG();
- if (xchg((unsigned long *)(objp+cachep->objsize -
- BYTES_PER_WORD), RED_MAGIC2) != RED_MAGIC1)
- BUG();
+ BUG_ON (xchg((unsigned long *)objp, RED_MAGIC2) !=
+ RED_MAGIC1);
+ BUG_ON (xchg((unsigned long *)(objp+cachep->objsize -
+ BYTES_PER_WORD), RED_MAGIC2) != RED_MAGIC1);
objp += BYTES_PER_WORD;
}
#endif
@@ -1417,13 +1404,13 @@

if (cachep->flags & SLAB_RED_ZONE) {
objp -= BYTES_PER_WORD;
- if (xchg((unsigned long *)objp, RED_MAGIC1) != RED_MAGIC2)
+ BUG_ON(xchg((unsigned long *)objp, RED_MAGIC1) != RED_MAGIC2);
/* Either write before start, or a double free. */
- BUG();
- if (xchg((unsigned long *)(objp+cachep->objsize -
- BYTES_PER_WORD), RED_MAGIC1) != RED_MAGIC2)
- /* Either write past end, or a double free. */
- BUG();
+
+ BUG_ON (xchg((unsigned long *)(objp+cachep->objsize -
+ BYTES_PER_WORD), RED_MAGIC1) != RED_MAGIC2);
+ /* Either write past end, or a double free. */
+
}
if (cachep->flags & SLAB_POISON)
kmem_poison_obj(cachep, objp);
@@ -1561,8 +1548,8 @@
unsigned long flags;
#if DEBUG
CHECK_PAGE(virt_to_page(objp));
- if (cachep != GET_PAGE_CACHE(virt_to_page(objp)))
- BUG();
+
+ BUG_ON (cachep != GET_PAGE_CACHE(virt_to_page(objp)));
#endif

local_irq_save(flags);
@@ -1760,8 +1747,8 @@
while (p != &searchp->slabs_free) {
slabp = list_entry(p, slab_t, list);
#if DEBUG
- if (slabp->inuse)
- BUG();
+ BUG_ON(slabp->inuse);
+
#endif
full_free++;
p = p->next;
@@ -1813,8 +1800,8 @@
break;
slabp = list_entry(p,slab_t,list);
#if DEBUG
- if (slabp->inuse)
- BUG();
+ BUG_ON (slabp->inuse);
+
#endif
list_del(&slabp->list);
STATS_INC_REAPED(best_cachep);
@@ -1885,22 +1872,22 @@
num_slabs = 0;
list_for_each(q,&cachep->slabs_full) {
slabp = list_entry(q, slab_t, list);
- if (slabp->inuse != cachep->num)
- BUG();
+ BUG_ON(slabp->inuse != cachep->num);
+
active_objs += cachep->num;
active_slabs++;
}
list_for_each(q,&cachep->slabs_partial) {
slabp = list_entry(q, slab_t, list);
- if (slabp->inuse == cachep->num || !slabp->inuse)
- BUG();
+ BUG_ON (slabp->inuse == cachep->num || !slabp->inuse);
+
active_objs += slabp->inuse;
active_slabs++;
}
list_for_each(q,&cachep->slabs_free) {
slabp = list_entry(q, slab_t, list);
- if (slabp->inuse)
- BUG();
+ BUG_ON(slabp->inuse);
+
num_slabs++;
}
num_slabs+=active_slabs;
diff -u --recursive linux/mm/swap_state.c v2.5.1/linux/mm/swap_state.c
--- linux/mm/swap_state.c Thu Nov 1 00:31:03 2001
+++ v2.5.1/linux/mm/swap_state.c Sun Jan 6 04:25:13 2002
@@ -69,8 +69,8 @@

int add_to_swap_cache(struct page *page, swp_entry_t entry)
{
- if (page->mapping)
- BUG();
+ BUG_ON (page->mapping);
+
if (!swap_duplicate(entry)) {
INC_CACHE_INFO(noent_race);
return -ENOENT;
@@ -81,10 +81,9 @@
INC_CACHE_INFO(exist_race);
return -EEXIST;
}
- if (!PageLocked(page))
- BUG();
- if (!PageSwapCache(page))
- BUG();
+ BUG_ON(!PageLocked(page));
+ BUG_ON (!PageSwapCache(page));
+
INC_CACHE_INFO(add_total);
return 0;
}
@@ -95,10 +94,9 @@
*/
void __delete_from_swap_cache(struct page *page)
{
- if (!PageLocked(page))
- BUG();
- if (!PageSwapCache(page))
- BUG();
+ BUG_ON (!PageLocked(page));
+ BUG_ON (!PageSwapCache(page));
+
ClearPageDirty(page);
__remove_inode_page(page);
INC_CACHE_INFO(del_total);
@@ -114,8 +112,7 @@
{
swp_entry_t entry;

- if (!PageLocked(page))
- BUG();
+ BUG_ON (!PageLocked(page));

block_flushpage(page, 0);

diff -u --recursive linux/mm/swapfile.c v2.5.1/linux/mm/swapfile.c
--- linux/mm/swapfile.c Sat Jan 5 17:05:12 2002
+++ v2.5.1/linux/mm/swapfile.c Sun Jan 6 04:23:43 2002
@@ -262,8 +262,8 @@
{
int retval = 0;

- if (!PageLocked(page))
- BUG();
+ BUG_ON (!PageLocked(page));
+
switch (page_count(page)) {
case 3:
if (!page->buffers)
@@ -292,8 +292,8 @@
struct swap_info_struct * p;
swp_entry_t entry;

- if (!PageLocked(page))
- BUG();
+ BUG_ON (!PageLocked(page));
+
if (!PageSwapCache(page))
return 0;
if (page_count(page) - !!page->buffers != 2) /* 2: us + cache */
@@ -428,8 +428,7 @@
end = address + size;
if (end > PGDIR_SIZE)
end = PGDIR_SIZE;
- if (address >= end)
- BUG();
+ BUG_ON (address >= end);
do {
unuse_pmd(vma, pmd, address, end - address, offset, entry,
page);
@@ -444,8 +443,7 @@
{
unsigned long start = vma->vm_start, end = vma->vm_end;

- if (start >= end)
- BUG();
+ BUG_ON(start >= end);
do {
unuse_pgd(vma, pgdir, start, end - start, entry, page);
start = (start + PGDIR_SIZE) & PGDIR_MASK;
@@ -674,8 +672,8 @@
* Note shmem_unuse already deleted its from swap cache.
*/
swcount = *swap_map;
- if ((swcount > 0) != PageSwapCache(page))
- BUG();
+ BUG_ON((swcount > 0) != PageSwapCache(page));
+
if ((swcount > 1) && PageDirty(page)) {
rw_swap_page(WRITE, page);
lock_page(page);
diff -u --recursive linux/mm/vmscan.c v2.5.1/linux/mm/vmscan.c
--- linux/mm/vmscan.c Sat Jan 5 17:05:12 2002
+++ v2.5.1/linux/mm/vmscan.c Sun Jan 6 04:26:17 2002
@@ -235,8 +235,7 @@
pgdir = pgd_offset(mm, address);

end = vma->vm_end;
- if (address >= end)
- BUG();
+ BUG_ON (address >= end);
do {
count = swap_out_pgd(mm, vma, pgdir, address, end, count, classzone);
if (!count)
\
 
 \ /
  Last update: 2005-03-22 13:15    [W:0.072 / U:0.936 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site