]>
Commit | Line | Data |
---|---|---|
4d1e5b62 AF |
1 | From: Dean Nelson <dcn@sgi.com> |
2 | Subject: [PATCH] Add the code to create the activate and notify gru message queues | |
3 | References: bnc#442461 | |
4 | ||
5 | For UV add the code to create the activate and notify gru message queues. | |
6 | ||
7 | Signed-off-by: Dean Nelson <dcn@sgi.com> | |
8 | Acked-by: Bernhard Walle <bwalle@suse.de> | |
9 | ||
10 | --- | |
11 | ||
12 | drivers/misc/sgi-xp/xpc.h | 12 + | |
13 | drivers/misc/sgi-xp/xpc_uv.c | 259 ++++++++++++++++++++++++++++++++++--------- | |
14 | 2 files changed, 218 insertions(+), 53 deletions(-) | |
15 | ||
16 | --- a/drivers/misc/sgi-xp/xpc.h | |
17 | +++ b/drivers/misc/sgi-xp/xpc.h | |
18 | @@ -181,6 +181,18 @@ struct xpc_vars_part_sn2 { | |
19 | xpc_nasid_mask_nlongs)) | |
20 | ||
21 | /* | |
22 | + * Info pertinent to a GRU message queue using a watch list for irq generation. | |
23 | + */ | |
24 | +struct xpc_gru_mq_uv { | |
25 | + void *address; /* address of GRU message queue */ | |
26 | + unsigned int order; /* size of GRU message queue as a power of 2 */ | |
27 | + int irq; /* irq raised when message is received in mq */ | |
28 | + int mmr_blade; /* blade where watchlist was allocated from */ | |
29 | + unsigned long mmr_offset; /* offset of irq mmr located on mmr_blade */ | |
30 | + int watchlist_num; /* number of watchlist allocatd by BIOS */ | |
31 | +}; | |
32 | + | |
33 | +/* | |
34 | * The activate_mq is used to send/receive GRU messages that affect XPC's | |
35 | * heartbeat, partition active state, and channel state. This is UV only. | |
36 | */ | |
37 | --- a/drivers/misc/sgi-xp/xpc_uv.c | |
38 | +++ b/drivers/misc/sgi-xp/xpc_uv.c | |
39 | @@ -18,7 +18,15 @@ | |
40 | #include <linux/interrupt.h> | |
41 | #include <linux/delay.h> | |
42 | #include <linux/device.h> | |
43 | +#include <linux/err.h> | |
44 | #include <asm/uv/uv_hub.h> | |
45 | +#if defined CONFIG_X86_64 | |
46 | +#include <asm/uv/bios.h> | |
47 | +#include <asm/uv/uv_irq.h> | |
48 | +#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV | |
49 | +#include <asm/sn/intr.h> | |
50 | +#include <asm/sn/sn_sal.h> | |
51 | +#endif | |
52 | #include "../sgi-gru/gru.h" | |
53 | #include "../sgi-gru/grukservices.h" | |
54 | #include "xpc.h" | |
55 | @@ -27,15 +35,17 @@ static atomic64_t xpc_heartbeat_uv; | |
56 | static DECLARE_BITMAP(xpc_heartbeating_to_mask_uv, XP_MAX_NPARTITIONS_UV); | |
57 | ||
58 | #define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES) | |
59 | -#define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES) | |
60 | +#define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \ | |
61 | + XPC_ACTIVATE_MSG_SIZE_UV) | |
62 | +#define XPC_ACTIVATE_IRQ_NAME "xpc_activate" | |
63 | ||
64 | -#define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \ | |
65 | - XPC_ACTIVATE_MSG_SIZE_UV) | |
66 | -#define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \ | |
67 | - XPC_NOTIFY_MSG_SIZE_UV) | |
68 | +#define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES) | |
69 | +#define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \ | |
70 | + XPC_NOTIFY_MSG_SIZE_UV) | |
71 | +#define XPC_NOTIFY_IRQ_NAME "xpc_notify" | |
72 | ||
73 | -static void *xpc_activate_mq_uv; | |
74 | -static void *xpc_notify_mq_uv; | |
75 | +static struct xpc_gru_mq_uv *xpc_activate_mq_uv; | |
76 | +static struct xpc_gru_mq_uv *xpc_notify_mq_uv; | |
77 | ||
78 | static int | |
79 | xpc_setup_partitions_sn_uv(void) | |
80 | @@ -52,62 +62,209 @@ xpc_setup_partitions_sn_uv(void) | |
81 | return 0; | |
82 | } | |
83 | ||
84 | -static void * | |
85 | -xpc_create_gru_mq_uv(unsigned int mq_size, int cpuid, unsigned int irq, | |
86 | +static int | |
87 | +xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name) | |
88 | +{ | |
89 | +#if defined CONFIG_X86_64 | |
90 | + mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset); | |
91 | + if (mq->irq < 0) { | |
92 | + dev_err(xpc_part, "uv_setup_irq() returned error=%d\n", | |
93 | + mq->irq); | |
94 | + } | |
95 | + | |
96 | +#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV | |
97 | + int mmr_pnode; | |
98 | + unsigned long mmr_value; | |
99 | + | |
100 | + if (strcmp(irq_name, XPC_ACTIVATE_IRQ_NAME) == 0) | |
101 | + mq->irq = SGI_XPC_ACTIVATE; | |
102 | + else if (strcmp(irq_name, XPC_NOTIFY_IRQ_NAME) == 0) | |
103 | + mq->irq = SGI_XPC_NOTIFY; | |
104 | + else | |
105 | + return -EINVAL; | |
106 | + | |
107 | + mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); | |
108 | + mmr_value = (unsigned long)cpu_physical_id(cpu) << 32 | mq->irq; | |
109 | + | |
110 | + uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mmr_value); | |
111 | +#else | |
112 | + #error not a supported configuration | |
113 | +#endif | |
114 | + | |
115 | + return 0; | |
116 | +} | |
117 | + | |
118 | +static void | |
119 | +xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq) | |
120 | +{ | |
121 | +#if defined CONFIG_X86_64 | |
122 | + uv_teardown_irq(mq->irq, mq->mmr_blade, mq->mmr_offset); | |
123 | + | |
124 | +#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV | |
125 | + int mmr_pnode; | |
126 | + unsigned long mmr_value; | |
127 | + | |
128 | + mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); | |
129 | + mmr_value = 1UL << 16; | |
130 | + | |
131 | + uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mmr_value); | |
132 | +#else | |
133 | + #error not a supported configuration | |
134 | +#endif | |
135 | +} | |
136 | + | |
137 | +static int | |
138 | +xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq) | |
139 | +{ | |
140 | + int ret; | |
141 | + | |
142 | +#if defined CONFIG_X86_64 | |
143 | + ret = uv_bios_mq_watchlist_alloc(mq->mmr_blade, mq->address, mq->order, | |
144 | + &mq->mmr_offset); | |
145 | + if (ret < 0) { | |
146 | + dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, " | |
147 | + "ret=%d\n", ret); | |
148 | + return ret; | |
149 | + } | |
150 | +#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV | |
151 | + ret = sn_mq_watchlist_alloc(mq->mmr_blade, mq->address, mq->order, | |
152 | + &mq->mmr_offset); | |
153 | + if (ret < 0) { | |
154 | + dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n", | |
155 | + ret); | |
156 | + return -EBUSY; | |
157 | + } | |
158 | +#else | |
159 | + #error not a supported configuration | |
160 | +#endif | |
161 | + | |
162 | + mq->watchlist_num = ret; | |
163 | + return 0; | |
164 | +} | |
165 | + | |
166 | +static void | |
167 | +xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq) | |
168 | +{ | |
169 | + int ret; | |
170 | + | |
171 | +#if defined CONFIG_X86_64 | |
172 | + ret = uv_bios_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num); | |
173 | + BUG_ON(ret != BIOS_STATUS_SUCCESS); | |
174 | +#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV | |
175 | + ret = sn_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num); | |
176 | + BUG_ON(ret != SALRET_OK); | |
177 | +#else | |
178 | + #error not a supported configuration | |
179 | +#endif | |
180 | +} | |
181 | + | |
182 | +static struct xpc_gru_mq_uv * | |
183 | +xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name, | |
184 | irq_handler_t irq_handler) | |
185 | { | |
186 | + enum xp_retval xp_ret; | |
187 | int ret; | |
188 | int nid; | |
189 | - int mq_order; | |
190 | + int pg_order; | |
191 | struct page *page; | |
192 | - void *mq; | |
193 | + struct xpc_gru_mq_uv *mq; | |
194 | + | |
195 | + mq = kmalloc(sizeof(struct xpc_gru_mq_uv), GFP_KERNEL); | |
196 | + if (mq == NULL) { | |
197 | + dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() " | |
198 | + "a xpc_gru_mq_uv structure\n"); | |
199 | + ret = -ENOMEM; | |
200 | + goto out_1; | |
201 | + } | |
202 | + | |
203 | + pg_order = get_order(mq_size); | |
204 | + mq->order = pg_order + PAGE_SHIFT; | |
205 | + mq_size = 1UL << mq->order; | |
206 | ||
207 | - nid = cpu_to_node(cpuid); | |
208 | - mq_order = get_order(mq_size); | |
209 | + mq->mmr_blade = uv_cpu_to_blade_id(cpu); | |
210 | + | |
211 | + nid = cpu_to_node(cpu); | |
212 | page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, | |
213 | - mq_order); | |
214 | + pg_order); | |
215 | if (page == NULL) { | |
216 | dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d " | |
217 | "bytes of memory on nid=%d for GRU mq\n", mq_size, nid); | |
218 | - return NULL; | |
219 | + ret = -ENOMEM; | |
220 | + goto out_2; | |
221 | } | |
222 | + mq->address = page_address(page); | |
223 | ||
224 | - mq = page_address(page); | |
225 | - ret = gru_create_message_queue(mq, mq_size); | |
226 | + ret = gru_create_message_queue(mq->address, mq_size); | |
227 | if (ret != 0) { | |
228 | dev_err(xpc_part, "gru_create_message_queue() returned " | |
229 | "error=%d\n", ret); | |
230 | - free_pages((unsigned long)mq, mq_order); | |
231 | - return NULL; | |
232 | + ret = -EINVAL; | |
233 | + goto out_3; | |
234 | } | |
235 | ||
236 | - /* !!! Need to do some other things to set up IRQ */ | |
237 | + /* enable generation of irq when GRU mq operation occurs to this mq */ | |
238 | + ret = xpc_gru_mq_watchlist_alloc_uv(mq); | |
239 | + if (ret != 0) | |
240 | + goto out_3; | |
241 | + | |
242 | + ret = xpc_get_gru_mq_irq_uv(mq, cpu, irq_name); | |
243 | + if (ret != 0) | |
244 | + goto out_4; | |
245 | ||
246 | - ret = request_irq(irq, irq_handler, 0, "xpc", NULL); | |
247 | + ret = request_irq(mq->irq, irq_handler, 0, irq_name, NULL); | |
248 | if (ret != 0) { | |
249 | dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n", | |
250 | - irq, ret); | |
251 | - free_pages((unsigned long)mq, mq_order); | |
252 | - return NULL; | |
253 | + mq->irq, ret); | |
254 | + goto out_5; | |
255 | } | |
256 | ||
257 | - /* !!! enable generation of irq when GRU mq op occurs to this mq */ | |
258 | - | |
259 | - /* ??? allow other partitions to access GRU mq? */ | |
260 | + /* allow other partitions to access this GRU mq */ | |
261 | + xp_ret = xp_expand_memprotect(xp_pa(mq->address), mq_size); | |
262 | + if (xp_ret != xpSuccess) { | |
263 | + ret = -EACCES; | |
264 | + goto out_6; | |
265 | + } | |
266 | ||
267 | return mq; | |
268 | + | |
269 | + /* something went wrong */ | |
270 | +out_6: | |
271 | + free_irq(mq->irq, NULL); | |
272 | +out_5: | |
273 | + xpc_release_gru_mq_irq_uv(mq); | |
274 | +out_4: | |
275 | + xpc_gru_mq_watchlist_free_uv(mq); | |
276 | +out_3: | |
277 | + free_pages((unsigned long)mq->address, pg_order); | |
278 | +out_2: | |
279 | + kfree(mq); | |
280 | +out_1: | |
281 | + return ERR_PTR(ret); | |
282 | } | |
283 | ||
284 | static void | |
285 | -xpc_destroy_gru_mq_uv(void *mq, unsigned int mq_size, unsigned int irq) | |
286 | +xpc_destroy_gru_mq_uv(struct xpc_gru_mq_uv *mq) | |
287 | { | |
288 | - /* ??? disallow other partitions to access GRU mq? */ | |
289 | + unsigned int mq_size; | |
290 | + int pg_order; | |
291 | + int ret; | |
292 | + | |
293 | + /* disallow other partitions to access GRU mq */ | |
294 | + mq_size = 1UL << mq->order; | |
295 | + ret = xp_restrict_memprotect(xp_pa(mq->address), mq_size); | |
296 | + BUG_ON(ret != xpSuccess); | |
297 | + | |
298 | + /* unregister irq handler and release mq irq/vector mapping */ | |
299 | + free_irq(mq->irq, NULL); | |
300 | + xpc_release_gru_mq_irq_uv(mq); | |
301 | ||
302 | - /* !!! disable generation of irq when GRU mq op occurs to this mq */ | |
303 | + /* disable generation of irq when GRU mq op occurs to this mq */ | |
304 | + xpc_gru_mq_watchlist_free_uv(mq); | |
305 | ||
306 | - free_irq(irq, NULL); | |
307 | + pg_order = mq->order - PAGE_SHIFT; | |
308 | + free_pages((unsigned long)mq->address, pg_order); | |
309 | ||
310 | - free_pages((unsigned long)mq, get_order(mq_size)); | |
311 | + kfree(mq); | |
312 | } | |
313 | ||
314 | static enum xp_retval | |
315 | @@ -402,7 +559,10 @@ xpc_handle_activate_IRQ_uv(int irq, void | |
316 | struct xpc_partition *part; | |
317 | int wakeup_hb_checker = 0; | |
318 | ||
319 | - while ((msg_hdr = gru_get_next_message(xpc_activate_mq_uv)) != NULL) { | |
320 | + while (1) { | |
321 | + msg_hdr = gru_get_next_message(xpc_activate_mq_uv->address); | |
322 | + if (msg_hdr == NULL) | |
323 | + break; | |
324 | ||
325 | partid = msg_hdr->partid; | |
326 | if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) { | |
327 | @@ -418,7 +578,7 @@ xpc_handle_activate_IRQ_uv(int irq, void | |
328 | } | |
329 | } | |
330 | ||
331 | - gru_free_message(xpc_activate_mq_uv, msg_hdr); | |
332 | + gru_free_message(xpc_activate_mq_uv->address, msg_hdr); | |
333 | } | |
334 | ||
335 | if (wakeup_hb_checker) | |
336 | @@ -507,7 +667,7 @@ xpc_get_partition_rsvd_page_pa_uv(void * | |
337 | static int | |
338 | xpc_setup_rsvd_page_sn_uv(struct xpc_rsvd_page *rp) | |
339 | { | |
340 | - rp->sn.activate_mq_gpa = uv_gpa(xpc_activate_mq_uv); | |
341 | + rp->sn.activate_mq_gpa = uv_gpa(xpc_activate_mq_uv->address); | |
342 | return 0; | |
343 | } | |
344 | ||
345 | @@ -1410,22 +1570,18 @@ xpc_init_uv(void) | |
346 | return -E2BIG; | |
347 | } | |
348 | ||
349 | - /* ??? The cpuid argument's value is 0, is that what we want? */ | |
350 | - /* !!! The irq argument's value isn't correct. */ | |
351 | - xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0, 0, | |
352 | + xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0, | |
353 | + XPC_ACTIVATE_IRQ_NAME, | |
354 | xpc_handle_activate_IRQ_uv); | |
355 | - if (xpc_activate_mq_uv == NULL) | |
356 | - return -ENOMEM; | |
357 | + if (IS_ERR(xpc_activate_mq_uv)) | |
358 | + return PTR_ERR(xpc_activate_mq_uv); | |
359 | ||
360 | - /* ??? The cpuid argument's value is 0, is that what we want? */ | |
361 | - /* !!! The irq argument's value isn't correct. */ | |
362 | - xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0, 0, | |
363 | + xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0, | |
364 | + XPC_NOTIFY_IRQ_NAME, | |
365 | xpc_handle_notify_IRQ_uv); | |
366 | - if (xpc_notify_mq_uv == NULL) { | |
367 | - /* !!! The irq argument's value isn't correct. */ | |
368 | - xpc_destroy_gru_mq_uv(xpc_activate_mq_uv, | |
369 | - XPC_ACTIVATE_MQ_SIZE_UV, 0); | |
370 | - return -ENOMEM; | |
371 | + if (IS_ERR(xpc_notify_mq_uv)) { | |
372 | + xpc_destroy_gru_mq_uv(xpc_activate_mq_uv); | |
373 | + return PTR_ERR(xpc_notify_mq_uv); | |
374 | } | |
375 | ||
376 | return 0; | |
377 | @@ -1434,9 +1590,6 @@ xpc_init_uv(void) | |
378 | void | |
379 | xpc_exit_uv(void) | |
380 | { | |
381 | - /* !!! The irq argument's value isn't correct. */ | |
382 | - xpc_destroy_gru_mq_uv(xpc_notify_mq_uv, XPC_NOTIFY_MQ_SIZE_UV, 0); | |
383 | - | |
384 | - /* !!! The irq argument's value isn't correct. */ | |
385 | - xpc_destroy_gru_mq_uv(xpc_activate_mq_uv, XPC_ACTIVATE_MQ_SIZE_UV, 0); | |
386 | + xpc_destroy_gru_mq_uv(xpc_notify_mq_uv); | |
387 | + xpc_destroy_gru_mq_uv(xpc_activate_mq_uv); | |
388 | } |