]>
Commit | Line | Data |
---|---|---|
2cb7cef9 BS |
1 | From b32a88ad3782fbaa3cb851ebc1b694ba09840c86 Mon Sep 17 00:00:00 2001 |
2 | From: Chris Leech <christopher.leech@intel.com> | |
3 | Date: Thu, 19 Feb 2009 14:37:47 -0800 | |
4 | Subject: fcoe: fix handling of pending queue, prevent out of order frames | |
5 | References: bnc#477953 | |
6 | ||
7 | The per-lport pending queue handling can result in a frame being retried | |
8 | from a different CPU than it was originally sent from. The scope of the | |
9 | queue lock is not large enough to prevent reordering of frames within a | |
10 | sequence in that case. | |
11 | ||
12 | Before this change I would see out of order frames on large write IOs, | |
13 | leading to frequent exchange timeouts and retries. | |
14 | ||
15 | With the locking scope changed, the fcoe_insert_wait_queue(_head) helpers | |
16 | no longer do anything useful, so I removed them as well. | |
17 | ||
18 | Signed-off-by: Chris Leech <christopher.leech@intel.com> | |
19 | Signed-off-by: Hannes Reinecke <hare@suse.de> | |
20 | --- | |
21 | drivers/scsi/fcoe/libfcoe.c | 64 +++++++++---------------------------------- | |
22 | 1 files changed, 13 insertions(+), 51 deletions(-) | |
23 | ||
24 | diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c | |
25 | index ada3ebb..d3f4eb9 100644 | |
26 | --- a/drivers/scsi/fcoe/libfcoe.c | |
27 | +++ b/drivers/scsi/fcoe/libfcoe.c | |
28 | @@ -71,8 +71,6 @@ struct fcoe_percpu_s *fcoe_percpu[NR_CPUS]; | |
29 | ||
30 | /* Function Prototyes */ | |
31 | static int fcoe_check_wait_queue(struct fc_lport *); | |
32 | -static void fcoe_insert_wait_queue_head(struct fc_lport *, struct sk_buff *); | |
33 | -static void fcoe_insert_wait_queue(struct fc_lport *, struct sk_buff *); | |
34 | static void fcoe_recv_flogi(struct fcoe_softc *, struct fc_frame *, u8 *); | |
35 | #ifdef CONFIG_HOTPLUG_CPU | |
36 | static int fcoe_cpu_callback(struct notifier_block *, ulong, void *); | |
37 | @@ -497,6 +495,7 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) | |
38 | ||
39 | /* send down to lld */ | |
40 | fr_dev(fp) = lp; | |
41 | + spin_lock_bh(&fc->fcoe_pending_queue.lock); | |
42 | if (fc->fcoe_pending_queue.qlen) | |
43 | rc = fcoe_check_wait_queue(lp); | |
44 | ||
45 | @@ -504,10 +503,11 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) | |
46 | rc = fcoe_start_io(skb); | |
47 | ||
48 | if (rc) { | |
49 | - fcoe_insert_wait_queue(lp, skb); | |
50 | + __skb_queue_tail(&fc->fcoe_pending_queue, skb); | |
51 | if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) | |
52 | lp->qfull = 1; | |
53 | } | |
54 | + spin_unlock_bh(&fc->fcoe_pending_queue.lock); | |
55 | ||
56 | return 0; | |
57 | } | |
58 | @@ -732,8 +732,11 @@ void fcoe_watchdog(ulong vp) | |
59 | ||
60 | read_lock(&fcoe_hostlist_lock); | |
61 | list_for_each_entry(fc, &fcoe_hostlist, list) { | |
62 | - if (fc->lp) | |
63 | + if (fc->lp) { | |
64 | + spin_lock_bh(&fc->fcoe_pending_queue.lock); | |
65 | fcoe_check_wait_queue(fc->lp); | |
66 | + spin_unlock_bh(&fc->fcoe_pending_queue.lock); | |
67 | + } | |
68 | } | |
69 | read_unlock(&fcoe_hostlist_lock); | |
70 | ||
71 | @@ -756,6 +759,8 @@ void fcoe_watchdog(ulong vp) | |
72 | * in the wait_queue which will be emptied by the time function OR | |
73 | * by the next skb transmit. | |
74 | * | |
75 | + * Locking: fc->fcoe_pending_queue.lock must be held before calling | |
76 | + * | |
77 | * Returns: 0 for success | |
78 | **/ | |
79 | static int fcoe_check_wait_queue(struct fc_lport *lp) | |
80 | @@ -765,20 +770,17 @@ static int fcoe_check_wait_queue(struct fc_lport *lp) | |
81 | int rc = -1; | |
82 | ||
83 | fc = fcoe_softc(lp); | |
84 | - spin_lock_bh(&fc->fcoe_pending_queue.lock); | |
85 | ||
86 | if (fc->fcoe_pending_queue_active) | |
87 | goto out; | |
88 | fc->fcoe_pending_queue_active = 1; | |
89 | if (fc->fcoe_pending_queue.qlen) { | |
90 | while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) { | |
91 | - spin_unlock_bh(&fc->fcoe_pending_queue.lock); | |
92 | rc = fcoe_start_io(skb); | |
93 | - if (rc) | |
94 | - fcoe_insert_wait_queue_head(lp, skb); | |
95 | - spin_lock_bh(&fc->fcoe_pending_queue.lock); | |
96 | - if (rc) | |
97 | + if (rc) { | |
98 | + __skb_queue_head(&fc->fcoe_pending_queue, skb); | |
99 | break; | |
100 | + } | |
101 | } | |
102 | /* | |
103 | * if interface pending queue is below FCOE_LOW_QUEUE_DEPTH | |
104 | @@ -790,47 +792,10 @@ static int fcoe_check_wait_queue(struct fc_lport *lp) | |
105 | fc->fcoe_pending_queue_active = 0; | |
106 | rc = fc->fcoe_pending_queue.qlen; | |
107 | out: | |
108 | - spin_unlock_bh(&fc->fcoe_pending_queue.lock); | |
109 | return rc; | |
110 | } | |
111 | ||
112 | /** | |
113 | - * fcoe_insert_wait_queue_head - puts skb to fcoe pending queue head | |
114 | - * @lp: the fc_port for this skb | |
115 | - * @skb: the associated skb to be xmitted | |
116 | - * | |
117 | - * Returns: none | |
118 | - **/ | |
119 | -static void fcoe_insert_wait_queue_head(struct fc_lport *lp, | |
120 | - struct sk_buff *skb) | |
121 | -{ | |
122 | - struct fcoe_softc *fc; | |
123 | - | |
124 | - fc = fcoe_softc(lp); | |
125 | - spin_lock_bh(&fc->fcoe_pending_queue.lock); | |
126 | - __skb_queue_head(&fc->fcoe_pending_queue, skb); | |
127 | - spin_unlock_bh(&fc->fcoe_pending_queue.lock); | |
128 | -} | |
129 | - | |
130 | -/** | |
131 | - * fcoe_insert_wait_queue - put the skb into fcoe pending queue tail | |
132 | - * @lp: the fc_port for this skb | |
133 | - * @skb: the associated skb to be xmitted | |
134 | - * | |
135 | - * Returns: none | |
136 | - **/ | |
137 | -static void fcoe_insert_wait_queue(struct fc_lport *lp, | |
138 | - struct sk_buff *skb) | |
139 | -{ | |
140 | - struct fcoe_softc *fc; | |
141 | - | |
142 | - fc = fcoe_softc(lp); | |
143 | - spin_lock_bh(&fc->fcoe_pending_queue.lock); | |
144 | - __skb_queue_tail(&fc->fcoe_pending_queue, skb); | |
145 | - spin_unlock_bh(&fc->fcoe_pending_queue.lock); | |
146 | -} | |
147 | - | |
148 | -/** | |
149 | * fcoe_dev_setup - setup link change notification interface | |
150 | * | |
151 | **/ | |
152 | @@ -1187,11 +1152,8 @@ void fcoe_clean_pending_queue(struct fc_lport *lp) | |
153 | struct sk_buff *skb; | |
154 | ||
155 | spin_lock_bh(&fc->fcoe_pending_queue.lock); | |
156 | - while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) { | |
157 | - spin_unlock_bh(&fc->fcoe_pending_queue.lock); | |
158 | + while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) | |
159 | kfree_skb(skb); | |
160 | - spin_lock_bh(&fc->fcoe_pending_queue.lock); | |
161 | - } | |
162 | spin_unlock_bh(&fc->fcoe_pending_queue.lock); | |
163 | } | |
164 | EXPORT_SYMBOL_GPL(fcoe_clean_pending_queue); | |
165 | -- | |
166 | 1.5.4.5 | |
167 |