lkml.org 
[lkml]   [2017]   [Dec]   [15]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v5 51/78] shmem: Comment fixups
    Date
    From: Matthew Wilcox <mawilcox@microsoft.com>

    Remove the last mentions of radix tree from various comments.

    Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
    ---
    mm/shmem.c | 14 +++++++-------
    1 file changed, 7 insertions(+), 7 deletions(-)

    diff --git a/mm/shmem.c b/mm/shmem.c
    index ecf05645509b..3cfc247e4796 100644
    --- a/mm/shmem.c
    +++ b/mm/shmem.c
    @@ -743,7 +743,7 @@ void shmem_unlock_mapping(struct address_space *mapping)
    }

    /*
    - * Remove range of pages and swap entries from radix tree, and free them.
    + * Remove range of pages and swap entries from page cache, and free them.
    * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
    */
    static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
    @@ -1118,10 +1118,10 @@ static int shmem_unuse_inode(struct shmem_inode_info *info,
    * We needed to drop mutex to make that restrictive page
    * allocation, but the inode might have been freed while we
    * dropped it: although a racing shmem_evict_inode() cannot
    - * complete without emptying the radix_tree, our page lock
    + * complete without emptying the page cache, our page lock
    * on this swapcache page is not enough to prevent that -
    * free_swap_and_cache() of our swap entry will only
    - * trylock_page(), removing swap from radix_tree whatever.
    + * trylock_page(), removing swap from page cache whatever.
    *
    * We must not proceed to shmem_add_to_page_cache() if the
    * inode has been freed, but of course we cannot rely on
    @@ -1187,7 +1187,7 @@ int shmem_unuse(swp_entry_t swap, struct page *page)
    false);
    if (error)
    goto out;
    - /* No radix_tree_preload: swap entry keeps a place for page in tree */
    + /* No memory allocation: swap entry occupies the slot for the page */
    error = -EAGAIN;

    mutex_lock(&shmem_swaplist_mutex);
    @@ -1862,7 +1862,7 @@ alloc_nohuge: page = shmem_alloc_and_acct_page(gfp, inode,
    spin_unlock_irq(&info->lock);
    goto repeat;
    }
    - if (error == -EEXIST) /* from above or from radix_tree_insert */
    + if (error == -EEXIST)
    goto repeat;
    return error;
    }
    @@ -2474,7 +2474,7 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
    }

    /*
    - * llseek SEEK_DATA or SEEK_HOLE through the radix_tree.
    + * llseek SEEK_DATA or SEEK_HOLE through the page cache.
    */
    static pgoff_t shmem_seek_hole_data(struct address_space *mapping,
    pgoff_t index, pgoff_t end, int whence)
    @@ -2562,7 +2562,7 @@ static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
    }

    /*
    - * We need a tag: a new tag would expand every radix_tree_node by 8 bytes,
    + * We need a tag: a new tag would expand every xa_node by 8 bytes,
    * so reuse a tag which we firmly believe is never set or cleared on shmem.
    */
    #define SHMEM_TAG_PINNED PAGECACHE_TAG_TOWRITE
    --
    2.15.1
    \
     
     \ /
      Last update: 2017-12-15 23:34    [W:3.058 / U:0.052 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site