]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - releases/4.4.166/scsi-ufs-fix-race-between-clock-gating-and-devfreq-scaling-work.patch
drop queue-4.14/mips-make-sure-dt-memory-regions-are-valid.patch
[thirdparty/kernel/stable-queue.git] / releases / 4.4.166 / scsi-ufs-fix-race-between-clock-gating-and-devfreq-scaling-work.patch
1 From 30fc33f1ef475480dc5bea4fe1bda84b003b992c Mon Sep 17 00:00:00 2001
2 From: Subhash Jadavani <subhashj@codeaurora.org>
3 Date: Thu, 27 Oct 2016 17:25:47 -0700
4 Subject: scsi: ufs: fix race between clock gating and devfreq scaling work
5
6 From: Subhash Jadavani <subhashj@codeaurora.org>
7
8 commit 30fc33f1ef475480dc5bea4fe1bda84b003b992c upstream.
9
10 UFS devfreq clock scaling work may require clocks to be ON if it need to
11 execute some UFS commands hence it may request for clock hold before
12 issuing the command. But if UFS clock gating work is already running in
13 parallel, ungate work would end up waiting for the clock gating work to
14 finish and as clock gating work would also wait for the clock scaling
15 work to finish, we would enter in deadlock state. Here is the call trace
16 during this deadlock state:
17
18 Workqueue: devfreq_wq devfreq_monitor
19 __switch_to
20 __schedule
21 schedule
22 schedule_timeout
23 wait_for_common
24 wait_for_completion
25 flush_work
26 ufshcd_hold
27 ufshcd_send_uic_cmd
28 ufshcd_dme_get_attr
29 ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div
30 ufs_qcom_clk_scale_notify
31 ufshcd_scale_clks
32 ufshcd_devfreq_target
33 update_devfreq
34 devfreq_monitor
35 process_one_work
36 worker_thread
37 kthread
38 ret_from_fork
39
40 Workqueue: events ufshcd_gate_work
41 __switch_to
42 __schedule
43 schedule
44 schedule_preempt_disabled
45 __mutex_lock_slowpath
46 mutex_lock
47 devfreq_monitor_suspend
48 devfreq_simple_ondemand_handler
49 devfreq_suspend_device
50 ufshcd_gate_work
51 process_one_work
52 worker_thread
53 kthread
54 ret_from_fork
55
56 Workqueue: events ufshcd_ungate_work
57 __switch_to
58 __schedule
59 schedule
60 schedule_timeout
61 wait_for_common
62 wait_for_completion
63 flush_work
64 __cancel_work_timer
65 cancel_delayed_work_sync
66 ufshcd_ungate_work
67 process_one_work
68 worker_thread
69 kthread
70 ret_from_fork
71
72 This change fixes this deadlock by doing this in devfreq work (devfreq_wq):
73 Try cancelling clock gating work. If we are able to cancel gating work
74 or it wasn't scheduled, hold the clock reference count until scaling is
75 in progress. If gate work is already running in parallel, let's skip
76 the frequecy scaling at this time and it will be retried once next scaling
77 window expires.
78
79 Reviewed-by: Sahitya Tummala <stummala@codeaurora.org>
80 Signed-off-by: Subhash Jadavani <subhashj@codeaurora.org>
81 Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
82 Signed-off-by: Amit Pundir <amit.pundir@linaro.org>
83 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
84
85 ---
86 drivers/scsi/ufs/ufshcd.c | 32 ++++++++++++++++++++++++++++++++
87 1 file changed, 32 insertions(+)
88
89 --- a/drivers/scsi/ufs/ufshcd.c
90 +++ b/drivers/scsi/ufs/ufshcd.c
91 @@ -5511,15 +5511,47 @@ static int ufshcd_devfreq_target(struct
92 {
93 int err = 0;
94 struct ufs_hba *hba = dev_get_drvdata(dev);
95 + bool release_clk_hold = false;
96 + unsigned long irq_flags;
97
98 if (!ufshcd_is_clkscaling_enabled(hba))
99 return -EINVAL;
100
101 + spin_lock_irqsave(hba->host->host_lock, irq_flags);
102 + if (ufshcd_eh_in_progress(hba)) {
103 + spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
104 + return 0;
105 + }
106 +
107 + if (ufshcd_is_clkgating_allowed(hba) &&
108 + (hba->clk_gating.state != CLKS_ON)) {
109 + if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
110 + /* hold the vote until the scaling work is completed */
111 + hba->clk_gating.active_reqs++;
112 + release_clk_hold = true;
113 + hba->clk_gating.state = CLKS_ON;
114 + } else {
115 + /*
116 + * Clock gating work seems to be running in parallel
117 + * hence skip scaling work to avoid deadlock between
118 + * current scaling work and gating work.
119 + */
120 + spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
121 + return 0;
122 + }
123 + }
124 + spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
125 +
126 if (*freq == UINT_MAX)
127 err = ufshcd_scale_clks(hba, true);
128 else if (*freq == 0)
129 err = ufshcd_scale_clks(hba, false);
130
131 + spin_lock_irqsave(hba->host->host_lock, irq_flags);
132 + if (release_clk_hold)
133 + __ufshcd_release(hba);
134 + spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
135 +
136 return err;
137 }
138