lkml.org 
[lkml]   [2009]   [May]   [14]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[patch 09/28] mm: page_mkwrite change prototype to match fault
    2.6.27-stable review patch.  If anyone has any objections, please let us know.

    ------------------

    From: Nick Piggin <npiggin@suse.de>

    commit c2ec175c39f62949438354f603f4aa170846aabb upstream


    mm: page_mkwrite change prototype to match fault

    Change the page_mkwrite prototype to take a struct vm_fault, and return
    VM_FAULT_xxx flags. There should be no functional change.

    This makes it possible to return much more detailed error information to
    the VM (and also can provide more information eg. virtual_address to the
    driver, which might be important in some special cases).

    This is required for a subsequent fix. And will also make it easier to
    merge page_mkwrite() with fault() in future.

    Signed-off-by: Nick Piggin <npiggin@suse.de>
    Cc: Chris Mason <chris.mason@oracle.com>
    Cc: Trond Myklebust <trond.myklebust@fys.uio.no>
    Cc: Miklos Szeredi <miklos@szeredi.hu>
    Cc: Steven Whitehouse <swhiteho@redhat.com>
    Cc: Mark Fasheh <mfasheh@suse.com>
    Cc: Joel Becker <joel.becker@oracle.com>
    Cc: Artem Bityutskiy <dedekind@infradead.org>
    Cc: Felix Blyakher <felixb@sgi.com>
    Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
    Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
    Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>

    ---
    Documentation/filesystems/Locking | 2 +-
    drivers/video/fb_defio.c | 3 ++-
    fs/buffer.c | 6 +++++-
    fs/ext4/ext4.h | 2 +-
    fs/ext4/inode.c | 5 ++++-
    fs/fuse/file.c | 3 ++-
    fs/gfs2/ops_file.c | 5 ++++-
    fs/nfs/file.c | 5 ++++-
    fs/ocfs2/mmap.c | 6 ++++--
    fs/ubifs/file.c | 9 ++++++---
    fs/xfs/linux-2.6/xfs_file.c | 4 ++--
    include/linux/buffer_head.h | 2 +-
    include/linux/mm.h | 3 ++-
    mm/memory.c | 26 ++++++++++++++++++++++----
    14 files changed, 60 insertions(+), 21 deletions(-)

    --- a/Documentation/filesystems/Locking
    +++ b/Documentation/filesystems/Locking
    @@ -502,7 +502,7 @@ prototypes:
    void (*open)(struct vm_area_struct*);
    void (*close)(struct vm_area_struct*);
    int (*fault)(struct vm_area_struct*, struct vm_fault *);
    - int (*page_mkwrite)(struct vm_area_struct *, struct page *);
    + int (*page_mkwrite)(struct vm_area_struct *, struct vm_fault *);
    int (*access)(struct vm_area_struct *, unsigned long, void*, int, int);

    locking rules:
    --- a/drivers/video/fb_defio.c
    +++ b/drivers/video/fb_defio.c
    @@ -70,8 +70,9 @@ EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);

    /* vm_ops->page_mkwrite handler */
    static int fb_deferred_io_mkwrite(struct vm_area_struct *vma,
    - struct page *page)
    + struct vm_fault *vmf)
    {
    + struct page *page = vmf->page;
    struct fb_info *info = vma->vm_private_data;
    struct fb_deferred_io *fbdefio = info->fbdefio;
    struct page *cur;
    --- a/fs/buffer.c
    +++ b/fs/buffer.c
    @@ -2402,9 +2402,10 @@ int block_commit_write(struct page *page
    * unlock the page.
    */
    int
    -block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
    +block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
    get_block_t get_block)
    {
    + struct page *page = vmf->page;
    struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
    unsigned long end;
    loff_t size;
    @@ -2429,6 +2430,9 @@ block_page_mkwrite(struct vm_area_struct
    ret = block_commit_write(page, 0, end);

    out_unlock:
    + if (ret)
    + ret = VM_FAULT_SIGBUS;
    +
    unlock_page(page);
    return ret;
    }
    --- a/fs/ext4/ext4.h
    +++ b/fs/ext4/ext4.h
    @@ -1084,7 +1084,7 @@ extern int ext4_meta_trans_blocks(struct
    extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks);
    extern int ext4_block_truncate_page(handle_t *handle,
    struct address_space *mapping, loff_t from);
    -extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct page *page);
    +extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);

    /* ioctl.c */
    extern long ext4_ioctl(struct file *, unsigned int, unsigned long);
    --- a/fs/ext4/inode.c
    +++ b/fs/ext4/inode.c
    @@ -4861,8 +4861,9 @@ static int ext4_bh_unmapped(handle_t *ha
    return !buffer_mapped(bh);
    }

    -int ext4_page_mkwrite(struct vm_area_struct *vma, struct page *page)
    +int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
    {
    + struct page *page = vmf->page;
    loff_t size;
    unsigned long len;
    int ret = -EINVAL;
    @@ -4913,6 +4914,8 @@ int ext4_page_mkwrite(struct vm_area_str
    goto out_unlock;
    ret = 0;
    out_unlock:
    + if (ret)
    + ret = VM_FAULT_SIGBUS;
    up_read(&inode->i_alloc_sem);
    return ret;
    }
    --- a/fs/fuse/file.c
    +++ b/fs/fuse/file.c
    @@ -1219,8 +1219,9 @@ static void fuse_vma_close(struct vm_are
    * - sync(2)
    * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER
    */
    -static int fuse_page_mkwrite(struct vm_area_struct *vma, struct page *page)
    +static int fuse_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
    {
    + struct page *page = vmf->page;
    /*
    * Don't use page->mapping as it may become NULL from a
    * concurrent truncate.
    --- a/fs/gfs2/ops_file.c
    +++ b/fs/gfs2/ops_file.c
    @@ -338,8 +338,9 @@ static int gfs2_allocate_page_backing(st
    * blocks allocated on disk to back that page.
    */

    -static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct page *page)
    +static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
    {
    + struct page *page = vmf->page;
    struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
    struct gfs2_inode *ip = GFS2_I(inode);
    struct gfs2_sbd *sdp = GFS2_SB(inode);
    @@ -411,6 +412,8 @@ out_unlock:
    gfs2_glock_dq(&gh);
    out:
    gfs2_holder_uninit(&gh);
    + if (ret)
    + ret = VM_FAULT_SIGBUS;
    return ret;
    }

    --- a/fs/nfs/file.c
    +++ b/fs/nfs/file.c
    @@ -448,8 +448,9 @@ const struct address_space_operations nf
    .launder_page = nfs_launder_page,
    };

    -static int nfs_vm_page_mkwrite(struct vm_area_struct *vma, struct page *page)
    +static int nfs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
    {
    + struct page *page = vmf->page;
    struct file *filp = vma->vm_file;
    struct dentry *dentry = filp->f_path.dentry;
    unsigned pagelen;
    @@ -480,6 +481,8 @@ static int nfs_vm_page_mkwrite(struct vm
    ret = pagelen;
    out_unlock:
    unlock_page(page);
    + if (ret)
    + ret = VM_FAULT_SIGBUS;
    return ret;
    }

    --- a/fs/ocfs2/mmap.c
    +++ b/fs/ocfs2/mmap.c
    @@ -150,8 +150,9 @@ out:
    return ret;
    }

    -static int ocfs2_page_mkwrite(struct vm_area_struct *vma, struct page *page)
    +static int ocfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
    {
    + struct page *page = vmf->page;
    struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
    struct buffer_head *di_bh = NULL;
    sigset_t blocked, oldset;
    @@ -192,7 +193,8 @@ out:
    ret2 = ocfs2_vm_op_unblock_sigs(&oldset);
    if (ret2 < 0)
    mlog_errno(ret2);
    -
    + if (ret)
    + ret = VM_FAULT_SIGBUS;
    return ret;
    }

    --- a/fs/ubifs/file.c
    +++ b/fs/ubifs/file.c
    @@ -1140,8 +1140,9 @@ static int ubifs_releasepage(struct page
    * mmap()d file has taken write protection fault and is being made
    * writable. UBIFS must ensure page is budgeted for.
    */
    -static int ubifs_vm_page_mkwrite(struct vm_area_struct *vma, struct page *page)
    +static int ubifs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
    {
    + struct page *page = vmf->page;
    struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
    struct ubifs_info *c = inode->i_sb->s_fs_info;
    struct timespec now = ubifs_current_time(inode);
    @@ -1153,7 +1154,7 @@ static int ubifs_vm_page_mkwrite(struct
    ubifs_assert(!(inode->i_sb->s_flags & MS_RDONLY));

    if (unlikely(c->ro_media))
    - return -EROFS;
    + return VM_FAULT_SIGBUS; /* -EROFS */

    /*
    * We have not locked @page so far so we may budget for changing the
    @@ -1186,7 +1187,7 @@ static int ubifs_vm_page_mkwrite(struct
    if (err == -ENOSPC)
    ubifs_warn("out of space for mmapped file "
    "(inode number %lu)", inode->i_ino);
    - return err;
    + return VM_FAULT_SIGBUS;
    }

    lock_page(page);
    @@ -1226,6 +1227,8 @@ static int ubifs_vm_page_mkwrite(struct
    out_unlock:
    unlock_page(page);
    ubifs_release_budget(c, &req);
    + if (err)
    + err = VM_FAULT_SIGBUS;
    return err;
    }

    --- a/fs/xfs/linux-2.6/xfs_file.c
    +++ b/fs/xfs/linux-2.6/xfs_file.c
    @@ -427,9 +427,9 @@ xfs_file_ioctl_invis(
    STATIC int
    xfs_vm_page_mkwrite(
    struct vm_area_struct *vma,
    - struct page *page)
    + struct vm_fault *vmf)
    {
    - return block_page_mkwrite(vma, page, xfs_get_blocks);
    + return block_page_mkwrite(vma, vmf, xfs_get_blocks);
    }

    const struct file_operations xfs_file_operations = {
    --- a/include/linux/buffer_head.h
    +++ b/include/linux/buffer_head.h
    @@ -222,7 +222,7 @@ int cont_write_begin(struct file *, stru
    get_block_t *, loff_t *);
    int generic_cont_expand_simple(struct inode *inode, loff_t size);
    int block_commit_write(struct page *page, unsigned from, unsigned to);
    -int block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
    +int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
    get_block_t get_block);
    void block_sync_page(struct page *);
    sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
    --- a/include/linux/mm.h
    +++ b/include/linux/mm.h
    @@ -138,6 +138,7 @@ extern pgprot_t protection_map[16];

    #define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */
    #define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */
    +#define FAULT_FLAG_MKWRITE 0x04 /* Fault was mkwrite of existing pte */


    /*
    @@ -173,7 +174,7 @@ struct vm_operations_struct {

    /* notification that a previously read-only page is about to become
    * writable, if an error is returned it will cause a SIGBUS */
    - int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page);
    + int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);

    /* called by access_process_vm when get_user_pages() fails, typically
    * for use by special VMAs that can switch between memory and hardware
    --- a/mm/memory.c
    +++ b/mm/memory.c
    @@ -1801,6 +1801,15 @@ static int do_wp_page(struct mm_struct *
    * get_user_pages(.write=1, .force=1).
    */
    if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
    + struct vm_fault vmf;
    + int tmp;
    +
    + vmf.virtual_address = (void __user *)(address &
    + PAGE_MASK);
    + vmf.pgoff = old_page->index;
    + vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
    + vmf.page = old_page;
    +
    /*
    * Notify the address space that the page is about to
    * become writable so that it can prohibit this or wait
    @@ -1812,8 +1821,12 @@ static int do_wp_page(struct mm_struct *
    page_cache_get(old_page);
    pte_unmap_unlock(page_table, ptl);

    - if (vma->vm_ops->page_mkwrite(vma, old_page) < 0)
    + tmp = vma->vm_ops->page_mkwrite(vma, &vmf);
    + if (unlikely(tmp &
    + (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
    + ret = tmp;
    goto unwritable_page;
    + }

    /*
    * Since we dropped the lock we need to revalidate
    @@ -1955,7 +1968,7 @@ oom:

    unwritable_page:
    page_cache_release(old_page);
    - return VM_FAULT_SIGBUS;
    + return ret;
    }

    /*
    @@ -2472,9 +2485,14 @@ static int __do_fault(struct mm_struct *
    * to become writable
    */
    if (vma->vm_ops->page_mkwrite) {
    + int tmp;
    +
    unlock_page(page);
    - if (vma->vm_ops->page_mkwrite(vma, page) < 0) {
    - ret = VM_FAULT_SIGBUS;
    + vmf.flags |= FAULT_FLAG_MKWRITE;
    + tmp = vma->vm_ops->page_mkwrite(vma, &vmf);
    + if (unlikely(tmp &
    + (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
    + ret = tmp;
    anon = 1; /* no anon but release vmf.page */
    goto out_unlocked;
    }



    \
     
     \ /
      Last update: 2009-05-15 01:09    [W:0.062 / U:31.096 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site