]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
scsi: ufs: core: Introduce a new clock_scaling lock
authorAvri Altman <avri.altman@wdc.com>
Sun, 24 Nov 2024 07:08:08 +0000 (09:08 +0200)
committerMartin K. Petersen <martin.petersen@oracle.com>
Wed, 4 Dec 2024 18:12:07 +0000 (13:12 -0500)
Introduce a new clock scaling lock to serialize access to some of the clock
scaling members instead of the host_lock. here also, simplify the code with
the guard() macro and co.

Reviewed-by: Bart Van Assche <bvanassche@acm.org>
Signed-off-by: Avri Altman <avri.altman@wdc.com>
Link: https://lore.kernel.org/r/20241124070808.194860-5-avri.altman@wdc.com
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
drivers/ufs/core/ufshcd.c
include/ufs/ufshcd.h

index 9288575f94a2abdea88375848c2f3e57031120df..bd2cc2fdb5ba02af259a845138ecc0bb549e00d4 100644 (file)
@@ -1452,16 +1452,16 @@ static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
 {
        struct ufs_hba *hba = container_of(work, struct ufs_hba,
                                           clk_scaling.suspend_work);
-       unsigned long irq_flags;
 
-       spin_lock_irqsave(hba->host->host_lock, irq_flags);
-       if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
-               spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
-               return;
+       scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
+       {
+               if (hba->clk_scaling.active_reqs ||
+                   hba->clk_scaling.is_suspended)
+                       return;
+
+               hba->clk_scaling.is_suspended = true;
+               hba->clk_scaling.window_start_t = 0;
        }
-       hba->clk_scaling.is_suspended = true;
-       hba->clk_scaling.window_start_t = 0;
-       spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
 
        devfreq_suspend_device(hba->devfreq);
 }
@@ -1470,15 +1470,13 @@ static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
 {
        struct ufs_hba *hba = container_of(work, struct ufs_hba,
                                           clk_scaling.resume_work);
-       unsigned long irq_flags;
 
-       spin_lock_irqsave(hba->host->host_lock, irq_flags);
-       if (!hba->clk_scaling.is_suspended) {
-               spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
-               return;
+       scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
+       {
+               if (!hba->clk_scaling.is_suspended)
+                       return;
+               hba->clk_scaling.is_suspended = false;
        }
-       hba->clk_scaling.is_suspended = false;
-       spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
 
        devfreq_resume_device(hba->devfreq);
 }
@@ -1492,7 +1490,6 @@ static int ufshcd_devfreq_target(struct device *dev,
        bool scale_up = false, sched_clk_scaling_suspend_work = false;
        struct list_head *clk_list = &hba->clk_list_head;
        struct ufs_clk_info *clki;
-       unsigned long irq_flags;
 
        if (!ufshcd_is_clkscaling_supported(hba))
                return -EINVAL;
@@ -1513,43 +1510,38 @@ static int ufshcd_devfreq_target(struct device *dev,
                *freq = (unsigned long) clk_round_rate(clki->clk, *freq);
        }
 
-       spin_lock_irqsave(hba->host->host_lock, irq_flags);
-       if (ufshcd_eh_in_progress(hba)) {
-               spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
-               return 0;
-       }
+       scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
+       {
+               if (ufshcd_eh_in_progress(hba))
+                       return 0;
 
-       /* Skip scaling clock when clock scaling is suspended */
-       if (hba->clk_scaling.is_suspended) {
-               spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
-               dev_warn(hba->dev, "clock scaling is suspended, skip");
-               return 0;
-       }
+               /* Skip scaling clock when clock scaling is suspended */
+               if (hba->clk_scaling.is_suspended) {
+                       dev_warn(hba->dev, "clock scaling is suspended, skip");
+                       return 0;
+               }
 
-       if (!hba->clk_scaling.active_reqs)
-               sched_clk_scaling_suspend_work = true;
+               if (!hba->clk_scaling.active_reqs)
+                       sched_clk_scaling_suspend_work = true;
 
-       if (list_empty(clk_list)) {
-               spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
-               goto out;
-       }
+               if (list_empty(clk_list))
+                       goto out;
 
-       /* Decide based on the target or rounded-off frequency and update */
-       if (hba->use_pm_opp)
-               scale_up = *freq > hba->clk_scaling.target_freq;
-       else
-               scale_up = *freq == clki->max_freq;
+               /* Decide based on the target or rounded-off frequency and update */
+               if (hba->use_pm_opp)
+                       scale_up = *freq > hba->clk_scaling.target_freq;
+               else
+                       scale_up = *freq == clki->max_freq;
 
-       if (!hba->use_pm_opp && !scale_up)
-               *freq = clki->min_freq;
+               if (!hba->use_pm_opp && !scale_up)
+                       *freq = clki->min_freq;
 
-       /* Update the frequency */
-       if (!ufshcd_is_devfreq_scaling_required(hba, *freq, scale_up)) {
-               spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
-               ret = 0;
-               goto out; /* no state change required */
+               /* Update the frequency */
+               if (!ufshcd_is_devfreq_scaling_required(hba, *freq, scale_up)) {
+                       ret = 0;
+                       goto out; /* no state change required */
+               }
        }
-       spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
 
        start = ktime_get();
        ret = ufshcd_devfreq_scale(hba, *freq, scale_up);
@@ -1574,7 +1566,6 @@ static int ufshcd_devfreq_get_dev_status(struct device *dev,
 {
        struct ufs_hba *hba = dev_get_drvdata(dev);
        struct ufs_clk_scaling *scaling = &hba->clk_scaling;
-       unsigned long flags;
        ktime_t curr_t;
 
        if (!ufshcd_is_clkscaling_supported(hba))
@@ -1582,7 +1573,8 @@ static int ufshcd_devfreq_get_dev_status(struct device *dev,
 
        memset(stat, 0, sizeof(*stat));
 
-       spin_lock_irqsave(hba->host->host_lock, flags);
+       guard(spinlock_irqsave)(&hba->clk_scaling.lock);
+
        curr_t = ktime_get();
        if (!scaling->window_start_t)
                goto start_window;
@@ -1618,7 +1610,7 @@ start_window:
                scaling->busy_start_t = 0;
                scaling->is_busy_started = false;
        }
-       spin_unlock_irqrestore(hba->host->host_lock, flags);
+
        return 0;
 }
 
@@ -1682,19 +1674,19 @@ static void ufshcd_devfreq_remove(struct ufs_hba *hba)
 
 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
 {
-       unsigned long flags;
        bool suspend = false;
 
        cancel_work_sync(&hba->clk_scaling.suspend_work);
        cancel_work_sync(&hba->clk_scaling.resume_work);
 
-       spin_lock_irqsave(hba->host->host_lock, flags);
-       if (!hba->clk_scaling.is_suspended) {
-               suspend = true;
-               hba->clk_scaling.is_suspended = true;
-               hba->clk_scaling.window_start_t = 0;
+       scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
+       {
+               if (!hba->clk_scaling.is_suspended) {
+                       suspend = true;
+                       hba->clk_scaling.is_suspended = true;
+                       hba->clk_scaling.window_start_t = 0;
+               }
        }
-       spin_unlock_irqrestore(hba->host->host_lock, flags);
 
        if (suspend)
                devfreq_suspend_device(hba->devfreq);
@@ -1702,15 +1694,15 @@ static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
 
 static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
 {
-       unsigned long flags;
        bool resume = false;
 
-       spin_lock_irqsave(hba->host->host_lock, flags);
-       if (hba->clk_scaling.is_suspended) {
-               resume = true;
-               hba->clk_scaling.is_suspended = false;
+       scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
+       {
+               if (hba->clk_scaling.is_suspended) {
+                       resume = true;
+                       hba->clk_scaling.is_suspended = false;
+               }
        }
-       spin_unlock_irqrestore(hba->host->host_lock, flags);
 
        if (resume)
                devfreq_resume_device(hba->devfreq);
@@ -1796,6 +1788,8 @@ static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
        INIT_WORK(&hba->clk_scaling.resume_work,
                  ufshcd_clk_scaling_resume_work);
 
+       spin_lock_init(&hba->clk_scaling.lock);
+
        hba->clk_scaling.workq = alloc_ordered_workqueue(
                "ufs_clkscaling_%d", WQ_MEM_RECLAIM, hba->host->host_no);
 
@@ -2157,19 +2151,17 @@ static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
 {
        bool queue_resume_work = false;
        ktime_t curr_t = ktime_get();
-       unsigned long flags;
 
        if (!ufshcd_is_clkscaling_supported(hba))
                return;
 
-       spin_lock_irqsave(hba->host->host_lock, flags);
+       guard(spinlock_irqsave)(&hba->clk_scaling.lock);
+
        if (!hba->clk_scaling.active_reqs++)
                queue_resume_work = true;
 
-       if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress) {
-               spin_unlock_irqrestore(hba->host->host_lock, flags);
+       if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress)
                return;
-       }
 
        if (queue_resume_work)
                queue_work(hba->clk_scaling.workq,
@@ -2185,18 +2177,17 @@ static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
                hba->clk_scaling.busy_start_t = curr_t;
                hba->clk_scaling.is_busy_started = true;
        }
-       spin_unlock_irqrestore(hba->host->host_lock, flags);
 }
 
 static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
 {
        struct ufs_clk_scaling *scaling = &hba->clk_scaling;
-       unsigned long flags;
 
        if (!ufshcd_is_clkscaling_supported(hba))
                return;
 
-       spin_lock_irqsave(hba->host->host_lock, flags);
+       guard(spinlock_irqsave)(&hba->clk_scaling.lock);
+
        hba->clk_scaling.active_reqs--;
        if (!scaling->active_reqs && scaling->is_busy_started) {
                scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
@@ -2204,7 +2195,6 @@ static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
                scaling->busy_start_t = 0;
                scaling->is_busy_started = false;
        }
-       spin_unlock_irqrestore(hba->host->host_lock, flags);
 }
 
 static inline int ufshcd_monitor_opcode2dir(u8 opcode)
index b6311069d901aeb9ff5d7048f82557c549636490..ce7667b020e25fa3f8f30b3a603378315919755f 100644 (file)
@@ -436,6 +436,10 @@ struct ufs_clk_gating {
 
 /**
  * struct ufs_clk_scaling - UFS clock scaling related data
+ * @workq: workqueue to schedule devfreq suspend/resume work
+ * @suspend_work: worker to suspend devfreq
+ * @resume_work: worker to resume devfreq
+ * @lock: serialize access to some struct ufs_clk_scaling members
  * @active_reqs: number of requests that are pending. If this is zero when
  * devfreq ->target() function is called then schedule "suspend_work" to
  * suspend devfreq.
@@ -445,9 +449,6 @@ struct ufs_clk_gating {
  * @enable_attr: sysfs attribute to enable/disable clock scaling
  * @saved_pwr_info: UFS power mode may also be changed during scaling and this
  * one keeps track of previous power mode.
- * @workq: workqueue to schedule devfreq suspend/resume work
- * @suspend_work: worker to suspend devfreq
- * @resume_work: worker to resume devfreq
  * @target_freq: frequency requested by devfreq framework
  * @min_gear: lowest HS gear to scale down to
  * @is_enabled: tracks if scaling is currently enabled or not, controlled by
@@ -459,15 +460,18 @@ struct ufs_clk_gating {
  * @is_suspended: tracks if devfreq is suspended or not
  */
 struct ufs_clk_scaling {
+       struct workqueue_struct *workq;
+       struct work_struct suspend_work;
+       struct work_struct resume_work;
+
+       spinlock_t lock;
+
        int active_reqs;
        unsigned long tot_busy_t;
        ktime_t window_start_t;
        ktime_t busy_start_t;
        struct device_attribute enable_attr;
        struct ufs_pa_layer_attr saved_pwr_info;
-       struct workqueue_struct *workq;
-       struct work_struct suspend_work;
-       struct work_struct resume_work;
        unsigned long target_freq;
        u32 min_gear;
        bool is_enabled;