]> git.ipfire.org Git - people/pmueller/ipfire-2.x.git/blob - src/patches/suse-2.6.27.31/patches.arch/ppc-spufs-05-Use-state_mutex-for-switch_log-lockin.patch
Move xen patchset to new version's subdir.
[people/pmueller/ipfire-2.x.git] / src / patches / suse-2.6.27.31 / patches.arch / ppc-spufs-05-Use-state_mutex-for-switch_log-lockin.patch
1 Subject: Use state_mutex for switch_log locking, and prevent multiple openers
2 From: Jeremy Kerr <jk@ozlabs.org>
3 References: 447133 - LTC50070
4
5 Currently, we use ctx->mapping_lock and ctx->switch_log->lock for the
6 context switch log. The mapping lock only prevents concurrent open()s,
7 so we require the switch_lock->lock for reads.
8
9 Since writes to the switch log buffer occur on context switches, we're
10 better off synchronising with the state_mutex, which is held during a
11 switch. Since we're serialised througout the buffer reads and writes,
12 we can use the state mutex to protect open and release too, and
13 can now kfree() the log buffer on release. This allows us to perform
14 the switch log notify without taking any extra locks.
15
16 Because the buffer is only present while the file is open, we can use
17 it to prevent multiple simultaneous openers.
18
19 Signed-off-by: Jeremy Kerr <jk@ozlabs.org>
20 Signed-off-by: Olaf Hering <olh@suse.de>
21 ---
22 arch/powerpc/platforms/cell/spufs/file.c | 133 ++++++++++++++++++------------
23 arch/powerpc/platforms/cell/spufs/run.c | 3
24 arch/powerpc/platforms/cell/spufs/spufs.h | 1
25 3 files changed, 81 insertions(+), 56 deletions(-)
26
27 --- a/arch/powerpc/platforms/cell/spufs/file.c
28 +++ b/arch/powerpc/platforms/cell/spufs/file.c
29 @@ -2429,38 +2429,48 @@ static inline int spufs_switch_log_avail
30 static int spufs_switch_log_open(struct inode *inode, struct file *file)
31 {
32 struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
33 + int rc;
34 +
35 + rc = spu_acquire(ctx);
36 + if (rc)
37 + return rc;
38
39 - /*
40 - * We (ab-)use the mapping_lock here because it serves the similar
41 - * purpose for synchronizing open/close elsewhere. Maybe it should
42 - * be renamed eventually.
43 - */
44 - mutex_lock(&ctx->mapping_lock);
45 if (ctx->switch_log) {
46 - spin_lock(&ctx->switch_log->lock);
47 - ctx->switch_log->head = 0;
48 - ctx->switch_log->tail = 0;
49 - spin_unlock(&ctx->switch_log->lock);
50 - } else {
51 - /*
52 - * We allocate the switch log data structures on first open.
53 - * They will never be free because we assume a context will
54 - * be traced until it goes away.
55 - */
56 - ctx->switch_log = kzalloc(sizeof(struct switch_log) +
57 - SWITCH_LOG_BUFSIZE * sizeof(struct switch_log_entry),
58 - GFP_KERNEL);
59 - if (!ctx->switch_log)
60 - goto out;
61 - spin_lock_init(&ctx->switch_log->lock);
62 - init_waitqueue_head(&ctx->switch_log->wait);
63 + rc = -EBUSY;
64 + goto out;
65 + }
66 +
67 + ctx->switch_log = kzalloc(sizeof(struct switch_log) +
68 + SWITCH_LOG_BUFSIZE * sizeof(struct switch_log_entry),
69 + GFP_KERNEL);
70 +
71 + if (!ctx->switch_log) {
72 + rc = -ENOMEM;
73 + goto out;
74 }
75 - mutex_unlock(&ctx->mapping_lock);
76 +
77 + init_waitqueue_head(&ctx->switch_log->wait);
78 + rc = 0;
79 +
80 +out:
81 + spu_release(ctx);
82 + return rc;
83 +}
84 +
85 +static int spufs_switch_log_release(struct inode *inode, struct file *file)
86 +{
87 + struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
88 + int rc;
89 +
90 + rc = spu_acquire(ctx);
91 + if (rc)
92 + return rc;
93 +
94 + kfree(ctx->switch_log);
95 + ctx->switch_log = NULL;
96 + spu_release(ctx);
97
98 return 0;
99 - out:
100 - mutex_unlock(&ctx->mapping_lock);
101 - return -ENOMEM;
102 }
103
104 static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n)
105 @@ -2488,42 +2498,46 @@ static ssize_t spufs_switch_log_read(str
106 if (!buf || len < 0)
107 return -EINVAL;
108
109 + error = spu_acquire(ctx);
110 + if (error)
111 + return error;
112 +
113 while (cnt < len) {
114 char tbuf[128];
115 int width;
116
117 if (file->f_flags & O_NONBLOCK) {
118 - if (spufs_switch_log_used(ctx) <= 0)
119 - return cnt ? cnt : -EAGAIN;
120 + if (spufs_switch_log_used(ctx) == 0) {
121 + error = -EAGAIN;
122 + break;
123 + }
124 } else {
125 - /* Wait for data in buffer */
126 - error = wait_event_interruptible(ctx->switch_log->wait,
127 + /* spufs_wait will drop the mutex and re-acquire,
128 + * but since we're in read(), the file cannot be
129 + * _released (and so ctx->switch_log is stable).
130 + */
131 + error = spufs_wait(ctx->switch_log->wait,
132 spufs_switch_log_used(ctx) > 0);
133 +
134 + /* On error, spufs_wait returns without the
135 + * state mutex held */
136 if (error)
137 - break;
138 + return error;
139 }
140
141 - spin_lock(&ctx->switch_log->lock);
142 - if (ctx->switch_log->head == ctx->switch_log->tail) {
143 - /* multiple readers race? */
144 - spin_unlock(&ctx->switch_log->lock);
145 + /* We may have had entries read from underneath us while we
146 + * dropped the mutex in spufs_wait, so re-check */
147 + if (ctx->switch_log->head == ctx->switch_log->tail)
148 continue;
149 - }
150
151 width = switch_log_sprint(ctx, tbuf, sizeof(tbuf));
152 - if (width < len) {
153 + if (width < len)
154 ctx->switch_log->tail =
155 (ctx->switch_log->tail + 1) %
156 SWITCH_LOG_BUFSIZE;
157 - }
158 -
159 - spin_unlock(&ctx->switch_log->lock);
160 -
161 - /*
162 - * If the record is greater than space available return
163 - * partial buffer (so far)
164 - */
165 - if (width >= len)
166 + else
167 + /* If the record is greater than space available return
168 + * partial buffer (so far) */
169 break;
170
171 error = copy_to_user(buf + cnt, tbuf, width);
172 @@ -2532,6 +2546,8 @@ static ssize_t spufs_switch_log_read(str
173 cnt += width;
174 }
175
176 + spu_release(ctx);
177 +
178 return cnt == 0 ? error : cnt;
179 }
180
181 @@ -2540,29 +2556,41 @@ static unsigned int spufs_switch_log_pol
182 struct inode *inode = file->f_path.dentry->d_inode;
183 struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
184 unsigned int mask = 0;
185 + int rc;
186
187 poll_wait(file, &ctx->switch_log->wait, wait);
188
189 + rc = spu_acquire(ctx);
190 + if (rc)
191 + return rc;
192 +
193 if (spufs_switch_log_used(ctx) > 0)
194 mask |= POLLIN;
195
196 + spu_release(ctx);
197 +
198 return mask;
199 }
200
201 static const struct file_operations spufs_switch_log_fops = {
202 - .owner = THIS_MODULE,
203 - .open = spufs_switch_log_open,
204 - .read = spufs_switch_log_read,
205 - .poll = spufs_switch_log_poll,
206 + .owner = THIS_MODULE,
207 + .open = spufs_switch_log_open,
208 + .read = spufs_switch_log_read,
209 + .poll = spufs_switch_log_poll,
210 + .release = spufs_switch_log_release,
211 };
212
213 +/**
214 + * Log a context switch event to a switch log reader.
215 + *
216 + * Must be called with ctx->state_mutex held.
217 + */
218 void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx,
219 u32 type, u32 val)
220 {
221 if (!ctx->switch_log)
222 return;
223
224 - spin_lock(&ctx->switch_log->lock);
225 if (spufs_switch_log_avail(ctx) > 1) {
226 struct switch_log_entry *p;
227
228 @@ -2576,7 +2604,6 @@ void spu_switch_log_notify(struct spu *s
229 ctx->switch_log->head =
230 (ctx->switch_log->head + 1) % SWITCH_LOG_BUFSIZE;
231 }
232 - spin_unlock(&ctx->switch_log->lock);
233
234 wake_up(&ctx->switch_log->wait);
235 }
236 --- a/arch/powerpc/platforms/cell/spufs/run.c
237 +++ b/arch/powerpc/platforms/cell/spufs/run.c
238 @@ -249,6 +249,7 @@ static int spu_run_fini(struct spu_conte
239
240 spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
241 clear_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags);
242 + spu_switch_log_notify(NULL, ctx, SWITCH_LOG_EXIT, *status);
243 spu_release(ctx);
244
245 if (signal_pending(current))
246 @@ -417,8 +418,6 @@ long spufs_run_spu(struct spu_context *c
247 ret = spu_run_fini(ctx, npc, &status);
248 spu_yield(ctx);
249
250 - spu_switch_log_notify(NULL, ctx, SWITCH_LOG_EXIT, status);
251 -
252 if ((status & SPU_STATUS_STOPPED_BY_STOP) &&
253 (((status >> SPU_STOP_STATUS_SHIFT) & 0x3f00) == 0x2100))
254 ctx->stats.libassist++;
255 --- a/arch/powerpc/platforms/cell/spufs/spufs.h
256 +++ b/arch/powerpc/platforms/cell/spufs/spufs.h
257 @@ -65,7 +65,6 @@ enum {
258 };
259
260 struct switch_log {
261 - spinlock_t lock;
262 wait_queue_head_t wait;
263 unsigned long head;
264 unsigned long tail;