lkml.org 
[lkml]   [2020]   [Apr]   [15]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
SubjectRe: [PATCH 4/5] mm/swapfile: refcount block and queue before using blkcg_schedule_throttle()
On Wed, Apr 15, 2020 at 05:42:34AM +0000, Luis Chamberlain wrote:
> > I don't understand the atomic part of the comment. How does
> > bdgrab/bdput help us there?
>
> The commit log above did a better job at explaining this in terms of our
> goal to use the request_queue and how this use would prevent the risk of
> releasing the request_queue, which could sleep.

So bdput eventually does and iput, but what leads to an out of context
offload?

But anyway, isn't the original problem better solved by simply not
releasing the queue from atomic context to start with? There isn't
really any good reason we keep holding the spinlock once we have a
reference on the queue, so something like this (not even compile tested)
should do the work:

diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index c5dc833212e1..45faa851f789 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1673,6 +1673,17 @@ void blkcg_maybe_throttle_current(void)
blk_put_queue(q);
}

+/* consumes a reference on q */
+void __blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay)
+{
+ if (current->throttle_queue)
+ blk_put_queue(current->throttle_queue);
+ current->throttle_queue = q;
+ if (use_memdelay)
+ current->use_memdelay = use_memdelay;
+ set_notify_resume(current);
+}
+
/**
* blkcg_schedule_throttle - this task needs to check for throttling
* @q: the request queue IO was submitted on
@@ -1694,16 +1705,8 @@ void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay)
{
if (unlikely(current->flags & PF_KTHREAD))
return;
-
- if (!blk_get_queue(q))
- return;
-
- if (current->throttle_queue)
- blk_put_queue(current->throttle_queue);
- current->throttle_queue = q;
- if (use_memdelay)
- current->use_memdelay = use_memdelay;
- set_notify_resume(current);
+ if (blk_get_queue(q))
+ __blkcg_schedule_throttle(q, use_memdelay);
}

/**
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 35f8ffe92b70..68440cb3ea9e 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -679,6 +679,7 @@ static inline void blkcg_clear_delay(struct blkcg_gq *blkg)

void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta);
void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay);
+void __blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay);
void blkcg_maybe_throttle_current(void);
#else /* CONFIG_BLK_CGROUP */

diff --git a/mm/swapfile.c b/mm/swapfile.c
index 5871a2aa86a5..4c6aa59ee593 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -3749,9 +3749,10 @@ void mem_cgroup_throttle_swaprate(struct mem_cgroup *memcg, int node,
gfp_t gfp_mask)
{
struct swap_info_struct *si, *next;
+ struct request_queue *q = NULL;
+
if (!(gfp_mask & __GFP_IO) || !memcg)
return;
-
if (!blk_cgroup_congested())
return;

@@ -3761,17 +3762,21 @@ void mem_cgroup_throttle_swaprate(struct mem_cgroup *memcg, int node,
*/
if (current->throttle_queue)
return;
+ if (unlikely(current->flags & PF_KTHREAD))
+ return;

spin_lock(&swap_avail_lock);
plist_for_each_entry_safe(si, next, &swap_avail_heads[node],
avail_lists[node]) {
if (si->bdev) {
- blkcg_schedule_throttle(bdev_get_queue(si->bdev),
- true);
+ if (blk_get_queue(dev_get_queue(si->bdev)))
+ q = dev_get_queue(si->bdev);
break;
}
}
spin_unlock(&swap_avail_lock);
+ if (q)
+ __blkcg_schedule_throttle(q, true);
}
#endif

\
 
 \ /
  Last update: 2020-04-15 09:28    [W:0.105 / U:0.144 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site