lkml.org 
[lkml]   [2020]   [May]   [15]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[RFC PATCH 13/13] scsi: scsi_dh: ufshpb: Add "Cold" subregions timer
Date
In order not to hang on to “cold” subregions, we shall inactivate a
subregion that has no READ access for a predefined amount of time. For
that purpose we shall attach to any active subregion a timer, triggering
it on every READ to expire after 500msec. On timer expiry we shall make
note of that event and call the state-machine worker to handle it.

Timers will not be attached to a pinned region's subregions.

Signed-off-by: Avri Altman <avri.altman@wdc.com>
---
drivers/scsi/device_handler/scsi_dh_ufshpb.c | 47 +++++++++++++++++++++++++++-
1 file changed, 46 insertions(+), 1 deletion(-)

diff --git a/drivers/scsi/device_handler/scsi_dh_ufshpb.c b/drivers/scsi/device_handler/scsi_dh_ufshpb.c
index 04e3d56..e89dd30 100644
--- a/drivers/scsi/device_handler/scsi_dh_ufshpb.c
+++ b/drivers/scsi/device_handler/scsi_dh_ufshpb.c
@@ -123,6 +123,8 @@ struct ufshpb_subregion {
unsigned long event;
struct mutex state_lock;
struct work_struct hpb_work;
+
+ struct timer_list read_timer;
};

/**
@@ -210,6 +212,31 @@ static unsigned int entries_per_region_mask;
static unsigned int entries_per_subregion_shift;
static unsigned int entries_per_subregion_mask;

+static void ufshpb_read_timeout(struct timer_list *t)
+{
+ struct ufshpb_subregion *s = from_timer(s, t, read_timer);
+ enum ufshpb_state s_state;
+
+ rcu_read_lock();
+ s_state = s->state;
+ rcu_read_unlock();
+
+ if (WARN_ON_ONCE(s_state != HPB_STATE_ACTIVE))
+ return;
+
+ if (atomic64_read(&s->writes) < SET_AS_DIRTY &&
+ !atomic_dec_and_test(&s->read_timeout_expiries)) {
+ /* rewind the timer for clean subregions */
+ mod_timer(&s->read_timer,
+ jiffies + msecs_to_jiffies(READ_TIMEOUT_MSEC));
+ return;
+ }
+
+ set_bit(SUBREGION_EVENT_TIMER, &s->event);
+
+ queue_work(s->hpb->wq, &s->hpb_work);
+}
+
static inline unsigned int ufshpb_lba_to_region(u64 lba)
{
return lba >> entries_per_region_shift;
@@ -376,6 +403,7 @@ static bool ufshpb_test_block_dirty(struct ufshpb_dh_data *h,
struct ufshpb_region *r = hpb->region_tbl + region;
struct ufshpb_subregion *s = r->subregion_tbl + subregion;
enum ufshpb_state s_state;
+ bool is_dirty;

__update_rw_counters(hpb, start_lba, end_lba, REQ_OP_READ);

@@ -386,7 +414,14 @@ static bool ufshpb_test_block_dirty(struct ufshpb_dh_data *h,
if (s_state != HPB_STATE_ACTIVE)
return true;

- return (atomic64_read(&s->writes) >= SET_AS_DIRTY);
+ is_dirty = (atomic64_read(&s->writes) >= SET_AS_DIRTY);
+ if (!is_dirty && !test_bit(region, hpb->pinned_map)) {
+ mod_timer(&s->read_timer,
+ jiffies + msecs_to_jiffies(READ_TIMEOUT_MSEC));
+ atomic_set(&s->read_timeout_expiries, READ_TIMEOUT_EXPIRIES);
+ }
+
+ return is_dirty;
}

/* Call this on write from prep_fn */
@@ -456,6 +491,8 @@ static void __subregion_inactivate(struct ufshpb_dh_lun *hpb,
list_add(&s->mctx->list, &hpb->lh_map_ctx);
spin_unlock(&hpb->map_list_lock);
s->mctx = NULL;
+
+ del_timer(&s->read_timer);
}

static void ufshpb_subregion_inactivate(struct ufshpb_dh_lun *hpb,
@@ -602,6 +639,13 @@ static int __subregion_activate(struct ufshpb_dh_lun *hpb,
atomic64_sub(atomic64_read(&s->writes), &r->writes);
atomic64_set(&s->writes, 0);

+ if (!test_bit(r->region, hpb->pinned_map)) {
+ timer_setup(&s->read_timer, ufshpb_read_timeout, 0);
+ mod_timer(&s->read_timer,
+ jiffies + msecs_to_jiffies(READ_TIMEOUT_MSEC));
+ atomic_set(&s->read_timeout_expiries, READ_TIMEOUT_EXPIRIES);
+ }
+
out:
if (ret)
__subregion_inactivate(hpb, r, s);
@@ -1377,6 +1421,7 @@ static void ufshpb_region_tbl_remove(struct ufshpb_dh_lun *hpb)
r->subregion_tbl + j;

cancel_work_sync(&s->hpb_work);
+ del_timer(&s->read_timer);
}

kfree(r->subregion_tbl);
--
2.7.4
\
 
 \ /
  Last update: 2020-05-15 12:33    [W:0.211 / U:0.260 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site