]> git.ipfire.org Git - people/pmueller/ipfire-2.x.git/blob - src/patches/suse-2.6.27.25/patches.suse/xfs-dmapi-src
Updated xen patches taken from suse.
[people/pmueller/ipfire-2.x.git] / src / patches / suse-2.6.27.25 / patches.suse / xfs-dmapi-src
1 Date: Thu, 09 Oct 2008 17:11:31 +1100
2 From: Donald Douwsma <donaldd@sgi.com>
3 Subject: DMAPI Source
4 Patch-mainline: ?
5 References: bnc#450658
6
7 Acked-by: Jan Kara <jack@suse.cz>
8
9 ---
10 fs/dmapi/Makefile | 53 +
11 fs/dmapi/Status | 128 +++
12 fs/dmapi/dmapi.h | 1086 ++++++++++++++++++++++++++
13 fs/dmapi/dmapi_attr.c | 93 ++
14 fs/dmapi/dmapi_bulkattr.c | 170 ++++
15 fs/dmapi/dmapi_config.c | 117 ++
16 fs/dmapi/dmapi_dmattr.c | 228 +++++
17 fs/dmapi/dmapi_event.c | 860 +++++++++++++++++++++
18 fs/dmapi/dmapi_handle.c | 119 ++
19 fs/dmapi/dmapi_hole.c | 119 ++
20 fs/dmapi/dmapi_io.c | 142 +++
21 fs/dmapi/dmapi_kern.h | 599 ++++++++++++++
22 fs/dmapi/dmapi_mountinfo.c | 527 ++++++++++++
23 fs/dmapi/dmapi_port.h | 138 +++
24 fs/dmapi/dmapi_private.h | 619 +++++++++++++++
25 fs/dmapi/dmapi_region.c | 91 ++
26 fs/dmapi/dmapi_register.c | 1644 ++++++++++++++++++++++++++++++++++++++++
27 fs/dmapi/dmapi_right.c | 1256 ++++++++++++++++++++++++++++++
28 fs/dmapi/dmapi_session.c | 1825 +++++++++++++++++++++++++++++++++++++++++++++
29 fs/dmapi/dmapi_sysent.c | 805 +++++++++++++++++++
30 fs/dmapi/sv.h | 89 ++
31 21 files changed, 10708 insertions(+)
32
33 Index: linux-2.6.26/fs/dmapi/dmapi_attr.c
34 ===================================================================
35 --- /dev/null
36 +++ linux-2.6.26/fs/dmapi/dmapi_attr.c
37 @@ -0,0 +1,93 @@
38 +/*
39 + * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved.
40 + *
41 + * This program is free software; you can redistribute it and/or modify it
42 + * under the terms of version 2 of the GNU General Public License as
43 + * published by the Free Software Foundation.
44 + *
45 + * This program is distributed in the hope that it would be useful, but
46 + * WITHOUT ANY WARRANTY; without even the implied warranty of
47 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
48 + *
49 + * Further, this software is distributed without any warranty that it is
50 + * free of the rightful claim of any third person regarding infringement
51 + * or the like. Any license provided herein, whether implied or
52 + * otherwise, applies only to this software file. Patent licenses, if
53 + * any, provided herein do not apply to combinations of this program with
54 + * other software, or any other product whatsoever.
55 + *
56 + * You should have received a copy of the GNU General Public License along
57 + * with this program; if not, write the Free Software Foundation, Inc., 59
58 + * Temple Place - Suite 330, Boston MA 02111-1307, USA.
59 + *
60 + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
61 + * Mountain View, CA 94043, or:
62 + *
63 + * http://www.sgi.com
64 + *
65 + * For further information regarding this notice, see:
66 + *
67 + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
68 + */
69 +
70 +#include "dmapi.h"
71 +#include "dmapi_kern.h"
72 +#include "dmapi_private.h"
73 +
74 +
75 +/* Retrieve attributes for a single file, directory or symlink. */
76 +
77 +int
78 +dm_get_fileattr(
79 + dm_sessid_t sid,
80 + void __user *hanp,
81 + size_t hlen,
82 + dm_token_t token,
83 + u_int mask,
84 + dm_stat_t __user *statp)
85 +{
86 + dm_fsys_vector_t *fsys_vector;
87 + dm_tokdata_t *tdp;
88 + int error;
89 +
90 + error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_VNO,
91 + DM_RIGHT_SHARED, &tdp);
92 + if (error != 0)
93 + return(error);
94 +
95 + fsys_vector = dm_fsys_vector(tdp->td_ip);
96 + error = fsys_vector->get_fileattr(tdp->td_ip, tdp->td_right,
97 + mask, statp);
98 +
99 + dm_app_put_tdp(tdp);
100 + return(error);
101 +}
102 +
103 +
104 +/* Set one or more file attributes of a file, directory, or symlink. */
105 +
106 +int
107 +dm_set_fileattr(
108 + dm_sessid_t sid,
109 + void __user *hanp,
110 + size_t hlen,
111 + dm_token_t token,
112 + u_int mask,
113 + dm_fileattr_t __user *attrp)
114 +{
115 + dm_fsys_vector_t *fsys_vector;
116 + dm_tokdata_t *tdp;
117 + int error;
118 +
119 + error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_VNO,
120 + DM_RIGHT_EXCL, &tdp);
121 + if (error != 0)
122 + return(error);
123 +
124 + fsys_vector = dm_fsys_vector(tdp->td_ip);
125 + error = fsys_vector->set_fileattr(tdp->td_ip, tdp->td_right,
126 + mask, attrp);
127 +
128 + dm_app_put_tdp(tdp);
129 + return(error);
130 +}
131 Index: linux-2.6.26/fs/dmapi/dmapi_bulkattr.c
132 ===================================================================
133 --- /dev/null
134 +++ linux-2.6.26/fs/dmapi/dmapi_bulkattr.c
135 @@ -0,0 +1,170 @@
136 +/*
137 + * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved.
138 + *
139 + * This program is free software; you can redistribute it and/or modify it
140 + * under the terms of version 2 of the GNU General Public License as
141 + * published by the Free Software Foundation.
142 + *
143 + * This program is distributed in the hope that it would be useful, but
144 + * WITHOUT ANY WARRANTY; without even the implied warranty of
145 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
146 + *
147 + * Further, this software is distributed without any warranty that it is
148 + * free of the rightful claim of any third person regarding infringement
149 + * or the like. Any license provided herein, whether implied or
150 + * otherwise, applies only to this software file. Patent licenses, if
151 + * any, provided herein do not apply to combinations of this program with
152 + * other software, or any other product whatsoever.
153 + *
154 + * You should have received a copy of the GNU General Public License along
155 + * with this program; if not, write the Free Software Foundation, Inc., 59
156 + * Temple Place - Suite 330, Boston MA 02111-1307, USA.
157 + *
158 + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
159 + * Mountain View, CA 94043, or:
160 + *
161 + * http://www.sgi.com
162 + *
163 + * For further information regarding this notice, see:
164 + *
165 + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
166 + */
167 +
168 +#include "dmapi.h"
169 +#include "dmapi_kern.h"
170 +#include "dmapi_private.h"
171 +
172 +
173 +int
174 +dm_init_attrloc(
175 + dm_sessid_t sid,
176 + void __user *hanp,
177 + size_t hlen,
178 + dm_token_t token,
179 + dm_attrloc_t __user *locp)
180 +{
181 + dm_fsys_vector_t *fsys_vector;
182 + dm_tokdata_t *tdp;
183 + int error;
184 +
185 + error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_VFS|DM_TDT_DIR,
186 + DM_RIGHT_SHARED, &tdp);
187 + if (error != 0)
188 + return(error);
189 +
190 + fsys_vector = dm_fsys_vector(tdp->td_ip);
191 + error = fsys_vector->init_attrloc(tdp->td_ip, tdp->td_right, locp);
192 +
193 + dm_app_put_tdp(tdp);
194 + return(error);
195 +}
196 +
197 +
198 +/*
199 + * Retrieves both standard and DM specific file attributes for the file
200 + * system indicated by the handle. (The FS has to be mounted).
201 + * Syscall returns 1 to indicate SUCCESS and more information is available.
202 + * -1 is returned on error, and errno will be set appropriately.
203 + * 0 is returned upon successful completion.
204 + */
205 +
206 +int
207 +dm_get_bulkattr_rvp(
208 + dm_sessid_t sid,
209 + void __user *hanp,
210 + size_t hlen,
211 + dm_token_t token,
212 + u_int mask,
213 + dm_attrloc_t __user *locp,
214 + size_t buflen,
215 + void __user *bufp,
216 + size_t __user *rlenp,
217 + int *rvp)
218 +{
219 + dm_fsys_vector_t *fsys_vector;
220 + dm_tokdata_t *tdp;
221 + int error;
222 +
223 + error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_VFS,
224 + DM_RIGHT_SHARED, &tdp);
225 + if (error != 0)
226 + return(error);
227 +
228 + fsys_vector = dm_fsys_vector(tdp->td_ip);
229 + error = fsys_vector->get_bulkattr_rvp(tdp->td_ip, tdp->td_right,
230 + mask, locp, buflen, bufp, rlenp, rvp);
231 +
232 + dm_app_put_tdp(tdp);
233 + return(error);
234 +}
235 +
236 +
237 +/*
238 + * Retrieves attributes of directory entries given a handle to that
239 + * directory. Iterative.
240 + * Syscall returns 1 to indicate SUCCESS and more information is available.
241 + * -1 is returned on error, and errno will be set appropriately.
242 + * 0 is returned upon successful completion.
243 + */
244 +
245 +int
246 +dm_get_dirattrs_rvp(
247 + dm_sessid_t sid,
248 + void __user *hanp,
249 + size_t hlen,
250 + dm_token_t token,
251 + u_int mask,
252 + dm_attrloc_t __user *locp,
253 + size_t buflen,
254 + void __user *bufp,
255 + size_t __user *rlenp,
256 + int *rvp)
257 +{
258 + dm_fsys_vector_t *fsys_vector;
259 + dm_tokdata_t *tdp;
260 + int error;
261 +
262 + error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_DIR,
263 + DM_RIGHT_SHARED, &tdp);
264 + if (error != 0)
265 + return(error);
266 +
267 + fsys_vector = dm_fsys_vector(tdp->td_ip);
268 + error = fsys_vector->get_dirattrs_rvp(tdp->td_ip, tdp->td_right,
269 + mask, locp, buflen, bufp, rlenp, rvp);
270 +
271 + dm_app_put_tdp(tdp);
272 + return(error);
273 +}
274 +
275 +
276 +int
277 +dm_get_bulkall_rvp(
278 + dm_sessid_t sid,
279 + void __user *hanp,
280 + size_t hlen,
281 + dm_token_t token,
282 + u_int mask,
283 + dm_attrname_t __user *attrnamep,
284 + dm_attrloc_t __user *locp,
285 + size_t buflen,
286 + void __user *bufp,
287 + size_t __user *rlenp,
288 + int *rvp)
289 +{
290 + dm_fsys_vector_t *fsys_vector;
291 + dm_tokdata_t *tdp;
292 + int error;
293 +
294 + error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_VFS,
295 + DM_RIGHT_SHARED, &tdp);
296 + if (error != 0)
297 + return(error);
298 +
299 + fsys_vector = dm_fsys_vector(tdp->td_ip);
300 + error = fsys_vector->get_bulkall_rvp(tdp->td_ip, tdp->td_right,
301 + mask, attrnamep, locp, buflen, bufp, rlenp, rvp);
302 +
303 + dm_app_put_tdp(tdp);
304 + return(error);
305 +}
306 Index: linux-2.6.26/fs/dmapi/dmapi_config.c
307 ===================================================================
308 --- /dev/null
309 +++ linux-2.6.26/fs/dmapi/dmapi_config.c
310 @@ -0,0 +1,117 @@
311 +/*
312 + * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
313 + *
314 + * This program is free software; you can redistribute it and/or modify it
315 + * under the terms of version 2 of the GNU General Public License as
316 + * published by the Free Software Foundation.
317 + *
318 + * This program is distributed in the hope that it would be useful, but
319 + * WITHOUT ANY WARRANTY; without even the implied warranty of
320 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
321 + *
322 + * Further, this software is distributed without any warranty that it is
323 + * free of the rightful claim of any third person regarding infringement
324 + * or the like. Any license provided herein, whether implied or
325 + * otherwise, applies only to this software file. Patent licenses, if
326 + * any, provided herein do not apply to combinations of this program with
327 + * other software, or any other product whatsoever.
328 + *
329 + * You should have received a copy of the GNU General Public License along
330 + * with this program; if not, write the Free Software Foundation, Inc., 59
331 + * Temple Place - Suite 330, Boston MA 02111-1307, USA.
332 + *
333 + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
334 + * Mountain View, CA 94043, or:
335 + *
336 + * http://www.sgi.com
337 + *
338 + * For further information regarding this notice, see:
339 + *
340 + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
341 + */
342 +
343 +#include <asm/uaccess.h>
344 +#include "dmapi.h"
345 +#include "dmapi_kern.h"
346 +#include "dmapi_private.h"
347 +
348 +int
349 +dm_get_config(
350 + void __user *hanp,
351 + size_t hlen,
352 + dm_config_t flagname,
353 + dm_size_t __user *retvalp)
354 +{
355 + dm_fsys_vector_t *fsys_vector;
356 + dm_tokdata_t *tdp;
357 + dm_size_t retval;
358 + int system = 1;
359 + int error;
360 +
361 + /* Trap and process configuration parameters which are system-wide. */
362 +
363 + switch (flagname) {
364 + case DM_CONFIG_LEGACY:
365 + case DM_CONFIG_PENDING:
366 + case DM_CONFIG_OBJ_REF:
367 + retval = DM_TRUE;
368 + break;
369 + case DM_CONFIG_MAX_MESSAGE_DATA:
370 + retval = DM_MAX_MSG_DATA;
371 + break;
372 + default:
373 + system = 0;
374 + break;
375 + }
376 + if (system) {
377 + if (copy_to_user(retvalp, &retval, sizeof(retval)))
378 + return(-EFAULT);
379 + return(0);
380 + }
381 +
382 + /* Must be filesystem-specific. Convert the handle into an inode. */
383 +
384 + if ((error = dm_get_config_tdp(hanp, hlen, &tdp)) != 0)
385 + return(error);
386 +
387 + /* Now call the filesystem-specific routine to determine the
388 + value of the configuration option for that filesystem.
389 + */
390 +
391 + fsys_vector = dm_fsys_vector(tdp->td_ip);
392 + error = fsys_vector->get_config(tdp->td_ip, tdp->td_right,
393 + flagname, retvalp);
394 +
395 + dm_app_put_tdp(tdp);
396 + return(error);
397 +}
398 +
399 +
400 +int
401 +dm_get_config_events(
402 + void __user *hanp,
403 + size_t hlen,
404 + u_int nelem,
405 + dm_eventset_t __user *eventsetp,
406 + u_int __user *nelemp)
407 +{
408 + dm_fsys_vector_t *fsys_vector;
409 + dm_tokdata_t *tdp;
410 + int error;
411 +
412 + /* Convert the handle into an inode. */
413 +
414 + if ((error = dm_get_config_tdp(hanp, hlen, &tdp)) != 0)
415 + return(error);
416 +
417 + /* Now call the filesystem-specific routine to determine the
418 + events supported by that filesystem.
419 + */
420 +
421 + fsys_vector = dm_fsys_vector(tdp->td_ip);
422 + error = fsys_vector->get_config_events(tdp->td_ip, tdp->td_right,
423 + nelem, eventsetp, nelemp);
424 +
425 + dm_app_put_tdp(tdp);
426 + return(error);
427 +}
428 Index: linux-2.6.26/fs/dmapi/dmapi_dmattr.c
429 ===================================================================
430 --- /dev/null
431 +++ linux-2.6.26/fs/dmapi/dmapi_dmattr.c
432 @@ -0,0 +1,228 @@
433 +/*
434 + * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved.
435 + *
436 + * This program is free software; you can redistribute it and/or modify it
437 + * under the terms of version 2 of the GNU General Public License as
438 + * published by the Free Software Foundation.
439 + *
440 + * This program is distributed in the hope that it would be useful, but
441 + * WITHOUT ANY WARRANTY; without even the implied warranty of
442 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
443 + *
444 + * Further, this software is distributed without any warranty that it is
445 + * free of the rightful claim of any third person regarding infringement
446 + * or the like. Any license provided herein, whether implied or
447 + * otherwise, applies only to this software file. Patent licenses, if
448 + * any, provided herein do not apply to combinations of this program with
449 + * other software, or any other product whatsoever.
450 + *
451 + * You should have received a copy of the GNU General Public License along
452 + * with this program; if not, write the Free Software Foundation, Inc., 59
453 + * Temple Place - Suite 330, Boston MA 02111-1307, USA.
454 + *
455 + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
456 + * Mountain View, CA 94043, or:
457 + *
458 + * http://www.sgi.com
459 + *
460 + * For further information regarding this notice, see:
461 + *
462 + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
463 + */
464 +#include "dmapi.h"
465 +#include "dmapi_kern.h"
466 +#include "dmapi_private.h"
467 +
468 +
469 +int
470 +dm_clear_inherit(
471 + dm_sessid_t sid,
472 + void __user *hanp,
473 + size_t hlen,
474 + dm_token_t token,
475 + dm_attrname_t __user *attrnamep)
476 +{
477 + dm_fsys_vector_t *fsys_vector;
478 + dm_tokdata_t *tdp;
479 + int error;
480 +
481 + error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_VFS,
482 + DM_RIGHT_EXCL, &tdp);
483 + if (error != 0)
484 + return(error);
485 +
486 + fsys_vector = dm_fsys_vector(tdp->td_ip);
487 + error = fsys_vector->clear_inherit(tdp->td_ip, tdp->td_right,
488 + attrnamep);
489 +
490 + dm_app_put_tdp(tdp);
491 + return(error);
492 +}
493 +
494 +
495 +int
496 +dm_get_dmattr(
497 + dm_sessid_t sid,
498 + void __user *hanp,
499 + size_t hlen,
500 + dm_token_t token,
501 + dm_attrname_t __user *attrnamep,
502 + size_t buflen,
503 + void __user *bufp,
504 + size_t __user *rlenp)
505 +{
506 + dm_fsys_vector_t *fsys_vector;
507 + dm_tokdata_t *tdp;
508 + int error;
509 +
510 + error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_VNO,
511 + DM_RIGHT_SHARED, &tdp);
512 + if (error != 0)
513 + return(error);
514 +
515 + fsys_vector = dm_fsys_vector(tdp->td_ip);
516 + error = fsys_vector->get_dmattr(tdp->td_ip, tdp->td_right,
517 + attrnamep, buflen, bufp, rlenp);
518 +
519 + dm_app_put_tdp(tdp);
520 + return(error);
521 +}
522 +
523 +
524 +int
525 +dm_getall_dmattr(
526 + dm_sessid_t sid,
527 + void __user *hanp,
528 + size_t hlen,
529 + dm_token_t token,
530 + size_t buflen,
531 + void __user *bufp,
532 + size_t __user *rlenp)
533 +{
534 + dm_fsys_vector_t *fsys_vector;
535 + dm_tokdata_t *tdp;
536 + int error;
537 +
538 + error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_VNO,
539 + DM_RIGHT_SHARED, &tdp);
540 + if (error != 0)
541 + return(error);
542 +
543 + fsys_vector = dm_fsys_vector(tdp->td_ip);
544 + error = fsys_vector->getall_dmattr(tdp->td_ip, tdp->td_right,
545 + buflen, bufp, rlenp);
546 +
547 + dm_app_put_tdp(tdp);
548 + return(error);
549 +}
550 +
551 +
552 +int
553 +dm_getall_inherit(
554 + dm_sessid_t sid,
555 + void __user *hanp,
556 + size_t hlen,
557 + dm_token_t token,
558 + u_int nelem,
559 + dm_inherit_t __user *inheritbufp,
560 + u_int __user *nelemp)
561 +{
562 + dm_fsys_vector_t *fsys_vector;
563 + dm_tokdata_t *tdp;
564 + int error;
565 +
566 + error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_VFS,
567 + DM_RIGHT_SHARED, &tdp);
568 + if (error != 0)
569 + return(error);
570 +
571 + fsys_vector = dm_fsys_vector(tdp->td_ip);
572 + error = fsys_vector->getall_inherit(tdp->td_ip, tdp->td_right,
573 + nelem, inheritbufp, nelemp);
574 +
575 + dm_app_put_tdp(tdp);
576 + return(error);
577 +}
578 +
579 +
580 +int
581 +dm_remove_dmattr(
582 + dm_sessid_t sid,
583 + void __user *hanp,
584 + size_t hlen,
585 + dm_token_t token,
586 + int setdtime,
587 + dm_attrname_t __user *attrnamep)
588 +{
589 + dm_fsys_vector_t *fsys_vector;
590 + dm_tokdata_t *tdp;
591 + int error;
592 +
593 + error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_VNO,
594 + DM_RIGHT_EXCL, &tdp);
595 + if (error != 0)
596 + return(error);
597 +
598 + fsys_vector = dm_fsys_vector(tdp->td_ip);
599 + error = fsys_vector->remove_dmattr(tdp->td_ip, tdp->td_right,
600 + setdtime, attrnamep);
601 +
602 + dm_app_put_tdp(tdp);
603 + return(error);
604 +}
605 +
606 +
607 +int
608 +dm_set_dmattr(
609 + dm_sessid_t sid,
610 + void __user *hanp,
611 + size_t hlen,
612 + dm_token_t token,
613 + dm_attrname_t __user *attrnamep,
614 + int setdtime,
615 + size_t buflen,
616 + void __user *bufp)
617 +{
618 + dm_fsys_vector_t *fsys_vector;
619 + dm_tokdata_t *tdp;
620 + int error;
621 +
622 + error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_VNO,
623 + DM_RIGHT_EXCL, &tdp);
624 + if (error != 0)
625 + return(error);
626 +
627 + fsys_vector = dm_fsys_vector(tdp->td_ip);
628 + error = fsys_vector->set_dmattr(tdp->td_ip, tdp->td_right,
629 + attrnamep, setdtime, buflen, bufp);
630 +
631 + dm_app_put_tdp(tdp);
632 + return(error);
633 +}
634 +
635 +
636 +int
637 +dm_set_inherit(
638 + dm_sessid_t sid,
639 + void __user *hanp,
640 + size_t hlen,
641 + dm_token_t token,
642 + dm_attrname_t __user *attrnamep,
643 + mode_t mode)
644 +{
645 + dm_fsys_vector_t *fsys_vector;
646 + dm_tokdata_t *tdp;
647 + int error;
648 +
649 + error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_VFS,
650 + DM_RIGHT_EXCL, &tdp);
651 + if (error != 0)
652 + return(error);
653 +
654 + fsys_vector = dm_fsys_vector(tdp->td_ip);
655 + error = fsys_vector->set_inherit(tdp->td_ip, tdp->td_right,
656 + attrnamep, mode);
657 +
658 + dm_app_put_tdp(tdp);
659 + return(error);
660 +}
661 Index: linux-2.6.26/fs/dmapi/dmapi_event.c
662 ===================================================================
663 --- /dev/null
664 +++ linux-2.6.26/fs/dmapi/dmapi_event.c
665 @@ -0,0 +1,860 @@
666 +/*
667 + * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
668 + *
669 + * This program is free software; you can redistribute it and/or modify it
670 + * under the terms of version 2 of the GNU General Public License as
671 + * published by the Free Software Foundation.
672 + *
673 + * This program is distributed in the hope that it would be useful, but
674 + * WITHOUT ANY WARRANTY; without even the implied warranty of
675 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
676 + *
677 + * Further, this software is distributed without any warranty that it is
678 + * free of the rightful claim of any third person regarding infringement
679 + * or the like. Any license provided herein, whether implied or
680 + * otherwise, applies only to this software file. Patent licenses, if
681 + * any, provided herein do not apply to combinations of this program with
682 + * other software, or any other product whatsoever.
683 + *
684 + * You should have received a copy of the GNU General Public License along
685 + * with this program; if not, write the Free Software Foundation, Inc., 59
686 + * Temple Place - Suite 330, Boston MA 02111-1307, USA.
687 + *
688 + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
689 + * Mountain View, CA 94043, or:
690 + *
691 + * http://www.sgi.com
692 + *
693 + * For further information regarding this notice, see:
694 + *
695 + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
696 + */
697 +#include <asm/uaccess.h>
698 +#include "dmapi.h"
699 +#include "dmapi_kern.h"
700 +#include "dmapi_private.h"
701 +
702 +/* The "rights" portion of the DMAPI spec is not currently implemented. A
703 + framework for rights is provided in the code, but turns out to be a noop
704 + in practice. The following comments are a brain dump to serve as input to
705 + the poor soul that eventually has to get DMAPI rights working in IRIX.
706 +
707 + A DMAPI right is similar but not identical to the mrlock_t mechanism
708 + already used within the kernel. The similarities are that it is a
709 + sleeping lock, and that a multiple-reader, single-writer protocol is used.
710 + How locks are obtained and dropped are different however. With a mrlock_t,
711 + a thread grabs the lock, does some stuff, then drops the lock, and all other
712 + threads block in the meantime (assuming a write lock). There is a one-to-
713 + one relationship between the lock and the thread which obtained the lock.
714 + Not so with DMAPI right locks. A DMAPI lock is associated with a particular
715 + session/token/hanp/hlen quad; since there is a dm_tokdata_t structure for
716 + each such quad, you can think of it as a one-to-one relationship between the
717 + lock and a dm_tokdata_t. Any application thread which presents the correct
718 + quad is entitled to grab or release the lock, or to use the rights
719 + associated with that lock. The thread that grabs the lock does not have to
720 + be the one to use the lock, nor does it have to be the thread which drops
721 + the lock. The lock can be held for very long periods of time, even across
722 + multiple systems calls by multiple application threads. The idea is that a
723 + coordinated group of DMAPI application threads can grab the lock, issue a
724 + series of inode accesses and/or updates, then drop the lock, and be assured
725 + that no other thread in the system could be modifying the inode at the same
726 + time. The kernel is expected to blindly trust that the application will
727 + not forget to unlock inodes it has locked, and will not deadlock itself
728 + against the kernel.
729 +
730 + There are two types of DMAPI rights, file object (inode) and filesystem
731 + object (superblock?). An inode right is the equivalent of the combination
732 + of both the XFS ilock and iolock; if held exclusively, no data or metadata
733 + within the file can be changed by non-lock-holding threads. The filesystem
734 + object lock is a little fuzzier; I think that if it is held, things like
735 + unmounts can be blocked, plus there is an event mask associated with the
736 + filesystem which can't be updated without the lock. (By the way, that
737 + event mask is supposed to be persistent in the superblock; add that to
738 + your worklist :-)
739 +
740 + All events generated by XFS currently arrive with no rights, i.e.
741 + DM_RIGHT_NULL, and return to the filesystem with no rights. It would be
742 + smart to leave it this way if possible, because it otherwise becomes more
743 + likely that an application thread will deadlock against the kernel if the
744 + one responsible for calling dm_get_events() happens to touch a file which
745 + was locked at the time the event was queued. Since the thread is blocked,
746 + it can't read the event in order to find and drop the lock. Catch-22. If
747 + you do have events that arrive with non-null rights, then dm_enqueue() needs
748 + to have code added for synchronous events which atomically switches the
749 + right from being a thread-based right to a dm_tokdata_t-based right without
750 + allowing the lock to drop in between. You will probably have to add a new
751 + dm_fsys_vector entry point to do this. The lock can't be lost during the
752 + switch, or other threads might change the inode or superblock in between.
753 + Likewise, if you need to return to the filesystem holding a right, then
754 + you need a DMAPI-to-thread atomic switch to occur, most likely in
755 + dm_change_right(). Again, the lock must not be lost during the switch; the
756 + DMAPI spec spends a couple of pages stressing this. Another dm_fsys_vector
757 + entry point is probably the answer.
758 +
759 + There are several assumptions implied in the current layout of the code.
760 + First of all, if an event returns to the filesystem with a return value of
761 + zero, then the filesystem can assume that any locks (rights) held at the
762 + start of the event are still in effect at the end of the event. (Note that
763 + the application could have temporarily dropped and reaquired the right
764 + while the event was outstanding, however). If the event returns to the
765 + filesystem with an errno, then the filesystem must assume that it has lost
766 + any and all rights associated with any of the objects in the event. This
767 + was done for a couple of reasons. First of all, since an errno is being
768 + returned, most likely the filesystem is going to immediately drop all the
769 + locks anyway. If the DMAPI code was required to unconditionally reobtain
770 + all locks before returning to the filesystem, then dm_pending() wouldn't
771 + work for NFS server threads because the process would block indefinitely
772 + trying to get its thread-based rights back, because the DMAPI-rights
773 + associated with the dm_tokdata_t in the outstanding event would prevent
774 + the rights from being obtained. That would be a bad thing. We wouldn't
775 + be able to let users Cntl-C out of read/write/truncate events either.
776 +
777 + If a case should ever surface where the thread has lost its rights even
778 + though it has a zero return status, or where the thread has rights even
779 + though it is returning with an errno, then this logic will have to be
780 + reworked. This could be done by changing the 'right' parameters on all
781 + the event calls to (dm_right_t *), so that they could serve both as IN
782 + and OUT parameters.
783 +
784 + Some events such as DM_EVENT_DESTROY arrive without holding an inode
785 + reference; if you don't have an inode reference, you can't have a right
786 + on the file.
787 +
788 + One more quirk. The DM_EVENT_UNMOUNT event is defined to be synchronous
789 + when it's behavior is asynchronous. If an unmount event arrives with
790 + rights, the event should return with the same rights and should NOT leave
791 + any rights in the dm_tokdata_t where the application could use them.
792 +*/
793 +
794 +
795 +#define GETNEXTOFF(vdat) ((vdat).vd_offset + (vdat).vd_length)
796 +#define HANDLE_SIZE(tdp) \
797 + ((tdp)->td_type & DM_TDT_VFS ? DM_FSHSIZE : DM_HSIZE((tdp)->td_handle))
798 +
799 +
800 +/* Given an inode pointer in a filesystem known to support DMAPI,
801 + build a tdp structure for the corresponding inode.
802 +*/
803 +
804 +static dm_tokdata_t *
805 +dm_ip_data(
806 + struct inode *ip,
807 + dm_right_t right,
808 + int referenced) /* != 0, caller holds inode reference */
809 +{
810 + int error;
811 + dm_tokdata_t *tdp;
812 + int filetype;
813 +
814 + tdp = kmem_cache_alloc(dm_tokdata_cachep, GFP_KERNEL);
815 + if (tdp == NULL) {
816 + printk("%s/%d: kmem_cache_alloc(dm_tokdata_cachep) returned NULL\n", __FUNCTION__, __LINE__);
817 + return NULL;
818 + }
819 +
820 + tdp->td_next = NULL;
821 + tdp->td_tevp = NULL;
822 + tdp->td_app_ref = 0;
823 + tdp->td_orig_right = right;
824 + tdp->td_right = right;
825 + tdp->td_flags = DM_TDF_ORIG;
826 + if (referenced) {
827 + tdp->td_flags |= DM_TDF_EVTREF;
828 + }
829 +
830 + filetype = ip->i_mode & S_IFMT;
831 + if (filetype == S_IFREG) {
832 + tdp->td_type = DM_TDT_REG;
833 + } else if (filetype == S_IFDIR) {
834 + tdp->td_type = DM_TDT_DIR;
835 + } else if (filetype == S_IFLNK) {
836 + tdp->td_type = DM_TDT_LNK;
837 + } else {
838 + tdp->td_type = DM_TDT_OTH;
839 + }
840 +
841 + if (referenced) {
842 + tdp->td_ip = ip;
843 + } else {
844 + tdp->td_ip = NULL;
845 + }
846 + tdp->td_vcount = 0;
847 +
848 + if ((error = dm_ip_to_handle(ip, &tdp->td_handle)) != 0) {
849 + panic("dm_ip_data: dm_ip_to_handle failed for ip %p in "
850 + "a DMAPI filesystem, errno %d\n", ip, error);
851 + }
852 +
853 + return(tdp);
854 +}
855 +
856 +
857 +/* Given a sb pointer to a filesystem known to support DMAPI, build a tdp
858 + structure for that sb.
859 +*/
860 +static dm_tokdata_t *
861 +dm_sb_data(
862 + struct super_block *sb,
863 + struct inode *ip, /* will be NULL for DM_EVENT_UNMOUNT */
864 + dm_right_t right)
865 +{
866 + dm_tokdata_t *tdp;
867 + struct filesystem_dmapi_operations *dops;
868 + dm_fsid_t fsid;
869 +
870 + dops = dm_fsys_ops(sb);
871 + ASSERT(dops);
872 + dops->get_fsid(sb, &fsid);
873 +
874 + tdp = kmem_cache_alloc(dm_tokdata_cachep, GFP_KERNEL);
875 + if (tdp == NULL) {
876 + printk("%s/%d: kmem_cache_alloc(dm_tokdata_cachep) returned NULL\n", __FUNCTION__, __LINE__);
877 + return NULL;
878 + }
879 +
880 + tdp->td_next = NULL;
881 + tdp->td_tevp = NULL;
882 + tdp->td_app_ref = 0;
883 + tdp->td_orig_right = right;
884 + tdp->td_right = right;
885 + tdp->td_flags = DM_TDF_ORIG;
886 + if (ip) {
887 + tdp->td_flags |= DM_TDF_EVTREF;
888 + }
889 + tdp->td_type = DM_TDT_VFS;
890 + tdp->td_ip = ip;
891 + tdp->td_vcount = 0;
892 +
893 + memcpy(&tdp->td_handle.ha_fsid, &fsid, sizeof(fsid));
894 + memset((char *)&tdp->td_handle.ha_fsid + sizeof(fsid), 0,
895 + sizeof(tdp->td_handle) - sizeof(fsid));
896 +
897 + return(tdp);
898 +}
899 +
900 +
901 +/* Link a tdp structure into the tevp. */
902 +
903 +static void
904 +dm_add_handle_to_event(
905 + dm_tokevent_t *tevp,
906 + dm_tokdata_t *tdp)
907 +{
908 + tdp->td_next = tevp->te_tdp;
909 + tevp->te_tdp = tdp;
910 + tdp->td_tevp = tevp;
911 +}
912 +
913 +
914 +/* Generate the given data event for the inode, and wait for a reply. The
915 + caller must guarantee that the inode's reference count is greater than zero
916 + so that the filesystem can't disappear while the request is outstanding.
917 +*/
918 +
919 +int
920 +dm_send_data_event(
921 + dm_eventtype_t event,
922 + struct inode *ip,
923 + dm_right_t vp_right, /* current right for ip */
924 + dm_off_t offset,
925 + size_t length,
926 + int flags) /* 0 or DM_FLAGS_NDELAY */
927 +{
928 + dm_data_event_t *datap;
929 + dm_tokevent_t *tevp;
930 + dm_tokdata_t *tdp;
931 + int error;
932 +
933 + tdp = dm_ip_data(ip, vp_right, /* reference held */ 1);
934 + if (tdp == NULL)
935 + return -ENOMEM;
936 +
937 + /* Calculate the size of the event in bytes, create an event structure
938 + for it, and insert the file's handle into the event.
939 + */
940 +
941 + tevp = dm_evt_create_tevp(event, HANDLE_SIZE(tdp), (void **)&datap);
942 + if (tevp == NULL) {
943 + kmem_cache_free(dm_tokdata_cachep, tdp);
944 + return(-ENOMEM);
945 + }
946 + dm_add_handle_to_event(tevp, tdp);
947 +
948 + /* Now fill in all the dm_data_event_t fields. */
949 +
950 + datap->de_handle.vd_offset = sizeof(*datap);
951 + datap->de_handle.vd_length = HANDLE_SIZE(tdp);
952 + memcpy((char *)datap + datap->de_handle.vd_offset, &tdp->td_handle,
953 + datap->de_handle.vd_length);
954 + datap->de_offset = offset;
955 + datap->de_length = length;
956 +
957 + /* Queue the message and wait for the reply. */
958 +
959 + error = dm_enqueue_normal_event(ip->i_sb, &tevp, flags);
960 +
961 + /* If no errors occurred, we must leave with the same rights we had
962 + upon entry. If errors occurred, we must leave with no rights.
963 + */
964 +
965 + dm_evt_rele_tevp(tevp, error);
966 +
967 + return(error);
968 +}
969 +
970 +
971 +/* Generate the destroy event for the inode and wait until the request has been
972 + queued. The caller does not hold an inode reference or a right on the inode,
973 + but it must otherwise lock down the inode such that the filesystem can't
974 + disappear while the request is waiting to be queued. While waiting to be
975 + queued, the inode must not be referenceable either by path or by a call
976 + to dm_handle_to_ip().
977 +*/
978 +
979 +int
980 +dm_send_destroy_event(
981 + struct inode *ip,
982 + dm_right_t vp_right) /* always DM_RIGHT_NULL */
983 +{
984 + dm_fsys_vector_t *fsys_vector;
985 + dm_tokevent_t *tevp;
986 + dm_tokdata_t *tdp;
987 + dm_destroy_event_t *destp;
988 + dm_attrname_t attrname;
989 + char *value;
990 + int value_len;
991 + int error;
992 +
993 + tdp = dm_ip_data(ip, vp_right, /* no reference held */ 0);
994 + if (tdp == NULL)
995 + return -ENOMEM;
996 +
997 + if ((error = dm_waitfor_destroy_attrname(ip->i_sb, &attrname)) != 0)
998 + return(error);
999 +
1000 + /* If a return-on-destroy attribute name exists for this filesystem,
1001 + see if the object being deleted has this attribute. If the object
1002 + doesn't have the attribute or if we encounter an error, then send
1003 + the event without the attribute.
1004 + */
1005 +
1006 + value_len = -1; /* because zero is a valid attribute length */
1007 + if (attrname.an_chars[0] != '\0') {
1008 + fsys_vector = dm_fsys_vector(ip);
1009 + error = fsys_vector->get_destroy_dmattr(ip, vp_right, &attrname,
1010 + &value, &value_len);
1011 + if (error && error != -ENODATA)
1012 + return error;
1013 + }
1014 +
1015 + /* Now that we know the size of the attribute value, if any, calculate
1016 + the size of the event in bytes, create an event structure for it,
1017 + and insert the handle into the event.
1018 + */
1019 +
1020 + tevp = dm_evt_create_tevp(DM_EVENT_DESTROY,
1021 + HANDLE_SIZE(tdp) + (value_len >= 0 ? value_len : 0),
1022 + (void **)&destp);
1023 + if (tevp == NULL) {
1024 + kmem_cache_free(dm_tokdata_cachep, tdp);
1025 + if (value_len > 0)
1026 + kfree(value);
1027 + return(-ENOMEM);
1028 + }
1029 + dm_add_handle_to_event(tevp, tdp);
1030 +
1031 + /* Now fill in all the dm_destroy_event_t fields. */
1032 +
1033 + destp->ds_handle.vd_offset = sizeof(*destp);
1034 + destp->ds_handle.vd_length = HANDLE_SIZE(tdp);
1035 + memcpy((char *)destp + destp->ds_handle.vd_offset, &tdp->td_handle,
1036 + destp->ds_handle.vd_length);
1037 + if (value_len >= 0) {
1038 + destp->ds_attrname = attrname;
1039 + destp->ds_attrcopy.vd_length = value_len;
1040 + if (value_len == 0) {
1041 + destp->ds_attrcopy.vd_offset = 0;
1042 + } else {
1043 + destp->ds_attrcopy.vd_offset = GETNEXTOFF(destp->ds_handle);
1044 + memcpy((char *)destp + destp->ds_attrcopy.vd_offset, value,
1045 + value_len);
1046 + kfree(value);
1047 + }
1048 + }
1049 +
1050 + /* Queue the message asynchronously. */
1051 +
1052 + error = dm_enqueue_normal_event(ip->i_sb, &tevp, 0);
1053 +
1054 + /* Since we had no rights upon entry, we have none to reobtain before
1055 + leaving.
1056 + */
1057 +
1058 + dm_evt_rele_tevp(tevp, 1);
1059 +
1060 + return(error);
1061 +}
1062 +
1063 +
1064 +/* The dm_mount_event_t event is sent in turn to all sessions that have asked
1065 + for it until one either rejects it or accepts it. The filesystem is not
1066 + going anywhere because the mount is blocked until the event is answered.
1067 +*/
1068 +
1069 +int
1070 +dm_send_mount_event(
1071 + struct super_block *sb, /* filesystem being mounted */
1072 + dm_right_t vfsp_right,
1073 + struct inode *ip, /* mounted on directory */
1074 + dm_right_t vp_right,
1075 + struct inode *rootip,
1076 + dm_right_t rootvp_right,
1077 + char *name1, /* mount path */
1078 + char *name2) /* filesystem device name */
1079 +{
1080 + int error;
1081 + dm_tokevent_t *tevp = NULL;
1082 + dm_tokdata_t *tdp1 = NULL; /* filesystem handle for event */
1083 + dm_tokdata_t *tdp2 = NULL; /* file handle for mounted-on dir. */
1084 + dm_tokdata_t *tdp3 = NULL; /* file handle for root inode */
1085 + dm_mount_event_t *mp;
1086 + size_t nextoff;
1087 +
1088 + /* Convert the sb to a filesystem handle, and ip and rootip into
1089 + file handles. ip (the mounted-on directory) may not have a handle
1090 + if it is a different filesystem type which does not support DMAPI.
1091 + */
1092 +
1093 + tdp1 = dm_sb_data(sb, rootip, vfsp_right);
1094 + if (tdp1 == NULL)
1095 + goto out_nomem;
1096 +
1097 + if ((ip == NULL) || dm_check_dmapi_ip(ip)) {
1098 + ip = NULL; /* we are mounting on non-DMAPI FS */
1099 + } else {
1100 + tdp2 = dm_ip_data(ip, vp_right, /* reference held */ 1);
1101 + if (tdp2 == NULL)
1102 + goto out_nomem;
1103 + }
1104 +
1105 + tdp3 = dm_ip_data(rootip, rootvp_right, /* reference held */ 1);
1106 + if (tdp3 == NULL)
1107 + goto out_nomem;
1108 +
1109 + /* Calculate the size of the event in bytes, create an event structure
1110 + for it, and insert the handles into the event.
1111 + */
1112 +
1113 + tevp = dm_evt_create_tevp(DM_EVENT_MOUNT,
1114 + HANDLE_SIZE(tdp1) + (ip ? HANDLE_SIZE(tdp2) : 0) +
1115 + HANDLE_SIZE(tdp3) + strlen(name1) + 1 +
1116 + strlen(name2) + 1, (void **)&mp);
1117 + if (tevp == NULL)
1118 + goto out_nomem;
1119 +
1120 + dm_add_handle_to_event(tevp, tdp1);
1121 + if (ip)
1122 + dm_add_handle_to_event(tevp, tdp2);
1123 + dm_add_handle_to_event(tevp, tdp3);
1124 +
1125 + /* Now fill in all the dm_mount_event_t fields. */
1126 +
1127 + mp->me_handle1.vd_offset = sizeof(*mp);
1128 + mp->me_handle1.vd_length = HANDLE_SIZE(tdp1);
1129 + memcpy((char *) mp + mp->me_handle1.vd_offset, &tdp1->td_handle,
1130 + mp->me_handle1.vd_length);
1131 + nextoff = GETNEXTOFF(mp->me_handle1);
1132 +
1133 + if (ip) {
1134 + mp->me_handle2.vd_offset = nextoff;
1135 + mp->me_handle2.vd_length = HANDLE_SIZE(tdp2);
1136 + memcpy((char *)mp + mp->me_handle2.vd_offset, &tdp2->td_handle,
1137 + mp->me_handle2.vd_length);
1138 + nextoff = GETNEXTOFF(mp->me_handle2);
1139 + }
1140 +
1141 + mp->me_name1.vd_offset = nextoff;
1142 + mp->me_name1.vd_length = strlen(name1) + 1;
1143 + memcpy((char *)mp + mp->me_name1.vd_offset, name1, mp->me_name1.vd_length);
1144 + nextoff = GETNEXTOFF(mp->me_name1);
1145 +
1146 + mp->me_name2.vd_offset = nextoff;
1147 + mp->me_name2.vd_length = strlen(name2) + 1;
1148 + memcpy((char *)mp + mp->me_name2.vd_offset, name2, mp->me_name2.vd_length);
1149 + nextoff = GETNEXTOFF(mp->me_name2);
1150 +
1151 + mp->me_roothandle.vd_offset = nextoff;
1152 + mp->me_roothandle.vd_length = HANDLE_SIZE(tdp3);
1153 + memcpy((char *)mp + mp->me_roothandle.vd_offset, &tdp3->td_handle,
1154 + mp->me_roothandle.vd_length);
1155 +
1156 + mp->me_mode = (sb->s_flags & MS_RDONLY ? DM_MOUNT_RDONLY : 0);
1157 +
1158 + /* Queue the message and wait for the reply. */
1159 +
1160 + error = dm_enqueue_mount_event(sb, tevp);
1161 +
1162 + /* If no errors occurred, we must leave with the same rights we had
1163 + upon entry. If errors occurred, we must leave with no rights.
1164 + */
1165 +
1166 + dm_evt_rele_tevp(tevp, error);
1167 +
1168 + return(error);
1169 +
1170 +out_nomem:
1171 + if (tevp)
1172 + kfree(tevp);
1173 + if (tdp1)
1174 + kmem_cache_free(dm_tokdata_cachep, tdp1);
1175 + if (tdp2)
1176 + kmem_cache_free(dm_tokdata_cachep, tdp2);
1177 + if (tdp3)
1178 + kmem_cache_free(dm_tokdata_cachep, tdp3);
1179 + return -ENOMEM;
1180 +}
1181 +
1182 +
1183 +/* Generate an DM_EVENT_UNMOUNT event and wait for a reply. The 'retcode'
1184 + field indicates whether this is a successful or unsuccessful unmount.
1185 + If successful, the filesystem is already unmounted, and any pending handle
1186 + reference to the filesystem will be failed. If the unmount was
1187 + unsuccessful, then the filesystem will be placed back into full service.
1188 +
1189 + The DM_EVENT_UNMOUNT event should really be asynchronous, because the
1190 + application has no control over whether or not the unmount succeeds. (The
1191 + DMAPI spec defined it that way because asynchronous events aren't always
1192 + guaranteed to be delivered.)
1193 +
1194 + Since the filesystem is already unmounted in the successful case, the
1195 + DM_EVENT_UNMOUNT event can't make available any inode to be used in
1196 + subsequent sid/hanp/hlen/token calls by the application. The event will
1197 + hang around until the application does a DM_RESP_CONTINUE, but the handle
1198 + within the event is unusable by the application.
1199 +*/
1200 +
1201 +void
1202 +dm_send_unmount_event(
1203 + struct super_block *sb,
1204 + struct inode *ip, /* NULL if unmount successful */
1205 + dm_right_t vfsp_right,
1206 + mode_t mode,
1207 + int retcode, /* errno, if unmount failed */
1208 + int flags)
1209 +{
1210 + dm_namesp_event_t *np;
1211 + dm_tokevent_t *tevp;
1212 + dm_tokdata_t *tdp1;
1213 +
1214 + /* If the unmount failed, put the filesystem back into full service,
1215 + allowing blocked handle references to finish. If it succeeded, put
1216 + the filesystem into the DM_STATE_UNMOUNTED state and fail all
1217 + blocked DM_NO_TOKEN handle accesses.
1218 + */
1219 +
1220 + if (retcode != 0) { /* unmount was unsuccessful */
1221 + dm_change_fsys_entry(sb, DM_STATE_MOUNTED);
1222 + } else {
1223 + dm_change_fsys_entry(sb, DM_STATE_UNMOUNTED);
1224 + }
1225 +
1226 + /* If the event wasn't in the filesystem dm_eventset_t, just remove
1227 + the filesystem from the list of DMAPI filesystems and return.
1228 + */
1229 +
1230 + if (flags & DM_FLAGS_UNWANTED) {
1231 + if (retcode == 0)
1232 + dm_remove_fsys_entry(sb);
1233 + return;
1234 + }
1235 +
1236 + /* Calculate the size of the event in bytes and allocate zeroed memory
1237 + for it.
1238 + */
1239 +
1240 + tdp1 = dm_sb_data(sb, ip, vfsp_right);
1241 + if (tdp1 == NULL)
1242 + return;
1243 +
1244 + tevp = dm_evt_create_tevp(DM_EVENT_UNMOUNT, HANDLE_SIZE(tdp1),
1245 + (void **)&np);
1246 + if (tevp == NULL) {
1247 + kmem_cache_free(dm_tokdata_cachep, tdp1);
1248 + return;
1249 + }
1250 +
1251 + dm_add_handle_to_event(tevp, tdp1);
1252 +
1253 + /* Now copy in all the dm_namesp_event_t specific fields. */
1254 +
1255 + np->ne_handle1.vd_offset = sizeof(*np);
1256 + np->ne_handle1.vd_length = HANDLE_SIZE(tdp1);
1257 + memcpy((char *) np + np->ne_handle1.vd_offset, &tdp1->td_handle,
1258 + np->ne_handle1.vd_length);
1259 + np->ne_mode = mode;
1260 + np->ne_retcode = retcode;
1261 +
1262 + /* Since DM_EVENT_UNMOUNT is effectively asynchronous, queue the
1263 + message and ignore any error return for DM_EVENT_UNMOUNT.
1264 + */
1265 +
1266 + (void)dm_enqueue_normal_event(sb, &tevp, flags);
1267 +
1268 + if (retcode == 0)
1269 + dm_remove_fsys_entry(sb);
1270 +
1271 + dm_evt_rele_tevp(tevp, 0);
1272 +}
1273 +
1274 +
1275 +/* Generate the given namespace event and wait for a reply (if synchronous) or
1276 + until the event has been queued (asynchronous). The caller must guarantee
1277 + that at least one inode within the filesystem has had its reference count
1278 + bumped so that the filesystem can't disappear while the event is
1279 + outstanding.
1280 +*/
1281 +
1282 +int
1283 +dm_send_namesp_event(
1284 + dm_eventtype_t event,
1285 + struct super_block *sb, /* used by PREUNMOUNT */
1286 + struct inode *ip1,
1287 + dm_right_t vp1_right,
1288 + struct inode *ip2,
1289 + dm_right_t vp2_right,
1290 + const char *name1,
1291 + const char *name2,
1292 + mode_t mode,
1293 + int retcode,
1294 + int flags)
1295 +{
1296 + dm_namesp_event_t *np;
1297 + dm_tokevent_t *tevp;
1298 + dm_tokdata_t *tdp1 = NULL; /* primary handle for event */
1299 + dm_tokdata_t *tdp2 = NULL; /* additional handle for event */
1300 + size_t nextoff;
1301 + int error;
1302 +
1303 + if (sb == NULL)
1304 + sb = ip1->i_sb;
1305 +
1306 + switch (event) {
1307 + case DM_EVENT_PREUNMOUNT:
1308 + /*
1309 + * PREUNMOUNT - Send the file system handle in handle1,
1310 + * and the handle for the root dir in the second. Otherwise
1311 + * it's a normal sync message; i.e. succeeds or fails
1312 + * depending on the app's return code.
1313 + * ip1 and ip2 are both the root dir of mounted FS
1314 + * vp1_right is the filesystem right.
1315 + * vp2_right is the root inode right.
1316 + */
1317 +
1318 + if (flags & DM_FLAGS_UNWANTED) {
1319 + dm_change_fsys_entry(sb, DM_STATE_UNMOUNTING);
1320 + return(0);
1321 + }
1322 + if (ip1 == NULL) {
1323 + /* If preunmount happens after kill_super then
1324 + * it's too late; there's nothing left with which
1325 + * to construct an event.
1326 + */
1327 + return(0);
1328 + }
1329 + tdp1 = dm_sb_data(sb, ip1, vp1_right);
1330 + if (tdp1 == NULL)
1331 + return -ENOMEM;
1332 + tdp2 = dm_ip_data(ip2, vp2_right, /* reference held */ 1);
1333 + if (tdp2 == NULL) {
1334 + kmem_cache_free(dm_tokdata_cachep, tdp1);
1335 + return -ENOMEM;
1336 + }
1337 + break;
1338 +
1339 + case DM_EVENT_NOSPACE:
1340 + /* vp1_right is the filesystem right. */
1341 +
1342 + tdp1 = dm_sb_data(sb, ip1, vp1_right);
1343 + if (tdp1 == NULL)
1344 + return -ENOMEM;
1345 + tdp2 = dm_ip_data(ip2, vp2_right, /* reference held */ 1); /* additional info - not in the spec */
1346 + if (tdp2 == NULL) {
1347 + kmem_cache_free(dm_tokdata_cachep, tdp1);
1348 + return -ENOMEM;
1349 + }
1350 + break;
1351 +
1352 + default:
1353 + /* All other events only pass in inodes and don't require any
1354 + special cases.
1355 + */
1356 +
1357 + tdp1 = dm_ip_data(ip1, vp1_right, /* reference held */ 1);
1358 + if (tdp1 == NULL)
1359 + return -ENOMEM;
1360 + if (ip2) {
1361 + tdp2 = dm_ip_data(ip2, vp2_right, /* reference held */ 1);
1362 + if (tdp2 == NULL) {
1363 + kmem_cache_free(dm_tokdata_cachep, tdp1);
1364 + return -ENOMEM;
1365 + }
1366 + }
1367 + }
1368 +
1369 + /* Calculate the size of the event in bytes and allocate zeroed memory
1370 + for it.
1371 + */
1372 +
1373 + tevp = dm_evt_create_tevp(event,
1374 + HANDLE_SIZE(tdp1) + (ip2 ? HANDLE_SIZE(tdp2) : 0) +
1375 + (name1 ? strlen(name1) + 1 : 0) +
1376 + (name2 ? strlen(name2) + 1 : 0), (void **)&np);
1377 + if (tevp == NULL) {
1378 + if (tdp1)
1379 + kmem_cache_free(dm_tokdata_cachep, tdp1);
1380 + if (tdp2)
1381 + kmem_cache_free(dm_tokdata_cachep, tdp2);
1382 + return(-ENOMEM);
1383 + }
1384 +
1385 + dm_add_handle_to_event(tevp, tdp1);
1386 + if (ip2)
1387 + dm_add_handle_to_event(tevp, tdp2);
1388 +
1389 + /* Now copy in all the dm_namesp_event_t specific fields. */
1390 +
1391 + np->ne_handle1.vd_offset = sizeof(*np);
1392 + np->ne_handle1.vd_length = HANDLE_SIZE(tdp1);
1393 + memcpy((char *) np + np->ne_handle1.vd_offset, &tdp1->td_handle,
1394 + np->ne_handle1.vd_length);
1395 + nextoff = GETNEXTOFF(np->ne_handle1);
1396 + if (ip2) {
1397 + np->ne_handle2.vd_offset = nextoff;
1398 + np->ne_handle2.vd_length = HANDLE_SIZE(tdp2);
1399 + memcpy((char *)np + np->ne_handle2.vd_offset, &tdp2->td_handle,
1400 + np->ne_handle2.vd_length);
1401 + nextoff = GETNEXTOFF(np->ne_handle2);
1402 + }
1403 + if (name1) {
1404 + np->ne_name1.vd_offset = nextoff;
1405 + np->ne_name1.vd_length = strlen(name1) + 1;
1406 + memcpy((char *)np + np->ne_name1.vd_offset, name1,
1407 + np->ne_name1.vd_length);
1408 + nextoff = GETNEXTOFF(np->ne_name1);
1409 + }
1410 + if (name2) {
1411 + np->ne_name2.vd_offset = nextoff;
1412 + np->ne_name2.vd_length = strlen(name2) + 1;
1413 + memcpy((char *)np + np->ne_name2.vd_offset, name2,
1414 + np->ne_name2.vd_length);
1415 + }
1416 + np->ne_mode = mode;
1417 + np->ne_retcode = retcode;
1418 +
1419 + /* Queue the message and wait for the reply. */
1420 +
1421 + error = dm_enqueue_normal_event(sb, &tevp, flags);
1422 +
1423 + /* If no errors occurred, we must leave with the same rights we had
1424 + upon entry. If errors occurred, we must leave with no rights.
1425 + */
1426 +
1427 + dm_evt_rele_tevp(tevp, error);
1428 +
1429 + if (!error && event == DM_EVENT_PREUNMOUNT) {
1430 + dm_change_fsys_entry(sb, DM_STATE_UNMOUNTING);
1431 + }
1432 +
1433 + return(error);
1434 +}
1435 +
1436 +
1437 +/*
1438 + * Send a message of type "DM_EVENT_USER". Since no inode is involved, we
1439 + * don't have to worry about rights here.
1440 + */
1441 +
1442 +int
1443 +dm_send_msg(
1444 + dm_sessid_t targetsid,
1445 + dm_msgtype_t msgtype, /* SYNC or ASYNC */
1446 + size_t buflen,
1447 + void __user *bufp)
1448 +{
1449 + dm_tokevent_t *tevp;
1450 + int sync;
1451 + void *msgp;
1452 + int error;
1453 +
1454 + if (buflen > DM_MAX_MSG_DATA)
1455 + return(-E2BIG);
1456 + if (msgtype == DM_MSGTYPE_ASYNC) {
1457 + sync = 0;
1458 + } else if (msgtype == DM_MSGTYPE_SYNC) {
1459 + sync = 1;
1460 + } else {
1461 + return(-EINVAL);
1462 + }
1463 +
1464 + tevp = dm_evt_create_tevp(DM_EVENT_USER, buflen, (void **)&msgp);
1465 + if (tevp == NULL)
1466 + return -ENOMEM;
1467 +
1468 + if (buflen && copy_from_user(msgp, bufp, buflen)) {
1469 + dm_evt_rele_tevp(tevp, 0);
1470 + return(-EFAULT);
1471 + }
1472 +
1473 + /* Enqueue the request and wait for the reply. */
1474 +
1475 + error = dm_enqueue_sendmsg_event(targetsid, tevp, sync);
1476 +
1477 + /* Destroy the tevp and return the reply. (dm_pending is not
1478 + supported here.)
1479 + */
1480 +
1481 + dm_evt_rele_tevp(tevp, error);
1482 +
1483 + return(error);
1484 +}
1485 +
1486 +
1487 +/*
1488 + * Send a message of type "DM_EVENT_USER". Since no inode is involved, we
1489 + * don't have to worry about rights here.
1490 + */
1491 +
1492 +int
1493 +dm_create_userevent(
1494 + dm_sessid_t sid,
1495 + size_t msglen,
1496 + void __user *msgdatap,
1497 + dm_token_t __user *tokenp) /* return token created */
1498 +{
1499 + dm_tokevent_t *tevp;
1500 + dm_token_t token;
1501 + int error;
1502 + void *msgp;
1503 +
1504 + if (msglen > DM_MAX_MSG_DATA)
1505 + return(-E2BIG);
1506 +
1507 + tevp = dm_evt_create_tevp(DM_EVENT_USER, msglen, (void **)&msgp);
1508 + if (tevp == NULL)
1509 + return(-ENOMEM);
1510 +
1511 + if (msglen && copy_from_user(msgp, msgdatap, msglen)) {
1512 + dm_evt_rele_tevp(tevp, 0);
1513 + return(-EFAULT);
1514 + }
1515 +
1516 + /* Queue the message. If that didn't work, free the tevp structure. */
1517 +
1518 + if ((error = dm_enqueue_user_event(sid, tevp, &token)) != 0)
1519 + dm_evt_rele_tevp(tevp, 0);
1520 +
1521 + if (!error && copy_to_user(tokenp, &token, sizeof(token)))
1522 + error = -EFAULT;
1523 +
1524 + return(error);
1525 +}
1526 Index: linux-2.6.26/fs/dmapi/dmapi.h
1527 ===================================================================
1528 --- /dev/null
1529 +++ linux-2.6.26/fs/dmapi/dmapi.h
1530 @@ -0,0 +1,1086 @@
1531 +/*
1532 + * Copyright (c) 1995-2003 Silicon Graphics, Inc. All Rights Reserved.
1533 + *
1534 + * This program is free software; you can redistribute it and/or modify it
1535 + * under the terms of version 2.1 of the GNU Lesser General Public License
1536 + * as published by the Free Software Foundation.
1537 + *
1538 + * This program is distributed in the hope that it would be useful, but
1539 + * WITHOUT ANY WARRANTY; without even the implied warranty of
1540 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
1541 + *
1542 + * Further, this software is distributed without any warranty that it is
1543 + * free of the rightful claim of any third person regarding infringement
1544 + * or the like. Any license provided herein, whether implied or
1545 + * otherwise, applies only to this software file. Patent licenses, if
1546 + * any, provided herein do not apply to combinations of this program with
1547 + * other software, or any other product whatsoever.
1548 + *
1549 + * You should have received a copy of the GNU Lesser General Public
1550 + * License along with this program; if not, write the Free Software
1551 + * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307,
1552 + * USA.
1553 + *
1554 + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
1555 + * Mountain View, CA 94043, or:
1556 + *
1557 + * http://www.sgi.com
1558 + *
1559 + * For further information regarding this notice, see:
1560 + *
1561 + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
1562 + */
1563 +
1564 +#ifndef __DMAPI_H__
1565 +#define __DMAPI_H__
1566 +
1567 +#ifdef __cplusplus
1568 +extern "C" {
1569 +#endif
1570 +
1571 +#ifndef __KERNEL__
1572 +#include <sys/types.h>
1573 +#endif
1574 +#include <linux/types.h>
1575 +
1576 +#ifndef __user
1577 +#define __user
1578 +#endif
1579 +
1580 +/**************************************************************************
1581 + * *
1582 + * The SGI implementation of DMAPI is based upon the X/Open document *
1583 + * Systems Management: Data Storage Managment (XDSM) API *
1584 + * dated February 1997. Not all DMAPI functions and structure fields *
1585 + * have been implemented. Most importantly, the DMAPI functions *
1586 + * dm_request_right, dm_release_right, dm_query_right, dm_upgrade_right *
1587 + * and dm_downgrade_right do not work as described in the specification. *
1588 + * *
1589 + * The XFS filesystem currently does not allow its locking mechanisms to *
1590 + * be externally accessed from user space. While the above-mentioned *
1591 + * dm_xxx_right functions exist and can be called by applications, they *
1592 + * always return successfully without actually obtaining any locks *
1593 + * within the filesystem. *
1594 + * *
1595 + * Applications which do not need full rights support and which only *
1596 + * make dm_xxx_right calls in order to satisfy the input requirements of *
1597 + * other DMAPI calls should be able to use these routines to avoid *
1598 + * having to implement special-case code for SGI platforms. Applications *
1599 + * which truely need the capabilities of a full implementation of rights *
1600 + * will unfortunately have to come up with alternate software solutions *
1601 + * until such time as rights can be completely implemented. *
1602 + * *
1603 + * Functions and structure fields defined within this file which are not *
1604 + * supported in the SGI implementation of DMAPI are indicated by comments *
1605 + * following their definitions such as "not supported", or "not *
1606 + * completely supported". Any function or field not so marked may be *
1607 + * assumed to work exactly according to the spec. *
1608 + * *
1609 + **************************************************************************/
1610 +
1611 +
1612 +
1613 +/* The first portion of this file contains defines and typedefs that are
1614 + DMAPI implementation-dependent, and could be different on other platforms.
1615 +*/
1616 +
1617 +typedef __s64 dm_attrloc_t;
1618 +typedef unsigned int dm_boolean_t;
1619 +typedef __u64 dm_eventset_t;
1620 +typedef __u64 dm_fsid_t;
1621 +typedef __u64 dm_ino_t;
1622 +typedef __u32 dm_igen_t;
1623 +typedef __s64 dm_off_t;
1624 +typedef unsigned int dm_sequence_t;
1625 +typedef int dm_sessid_t;
1626 +typedef __u64 dm_size_t;
1627 +typedef __s64 dm_ssize_t;
1628 +typedef int dm_token_t;
1629 +
1630 +/* XXX dev_t, mode_t, and nlink_t are not the same size in kernel space
1631 + and user space. This affects the field offsets for dm_stat_t.
1632 + The following solution is temporary.
1633 +
1634 + user space sizes: dev_t=8 mode_t=4 nlink_t=4
1635 + kernel space : dev_t=2 mode_t=2 nlink_t=2
1636 +
1637 +*/
1638 +typedef __s64 dm_dev_t;
1639 +typedef int dm_mode_t;
1640 +typedef int dm_nlink_t;
1641 +
1642 +
1643 +#define DM_REGION_NOEVENT 0x0
1644 +#define DM_REGION_READ 0x1
1645 +#define DM_REGION_WRITE 0x2
1646 +#define DM_REGION_TRUNCATE 0x4
1647 +
1648 +/* Values for the mask argument used with dm_get_fileattr, dm_get_bulkattr,
1649 + dm_get_dirattrs, and dm_set_fileattr.
1650 +*/
1651 +
1652 +#define DM_AT_MODE 0x0001
1653 +#define DM_AT_UID 0x0002
1654 +#define DM_AT_GID 0x0004
1655 +#define DM_AT_ATIME 0x0008
1656 +#define DM_AT_MTIME 0x0010
1657 +#define DM_AT_CTIME 0x0020
1658 +#define DM_AT_SIZE 0x0040
1659 +#define DM_AT_DTIME 0x0080
1660 +#define DM_AT_HANDLE 0x0100
1661 +#define DM_AT_EMASK 0x0200
1662 +#define DM_AT_PMANR 0x0400
1663 +#define DM_AT_PATTR 0x0800
1664 +#define DM_AT_STAT 0x1000
1665 +#define DM_AT_CFLAG 0x2000
1666 +
1667 +#define DM_EV_WAIT 0x1 /* used in dm_get_events() */
1668 +
1669 +#define DM_MOUNT_RDONLY 0x1 /* me_mode field in dm_mount_event_t */
1670 +
1671 +#define DM_RR_WAIT 0x1
1672 +
1673 +#define DM_UNMOUNT_FORCE 0x1 /* ne_mode field in dm_namesp_event_t */
1674 +
1675 +#define DM_WRITE_SYNC 0x1 /* used in dm_write_invis() */
1676 +
1677 +#define DM_SESSION_INFO_LEN 256
1678 +#define DM_NO_SESSION 0
1679 +#define DM_TRUE 1
1680 +#define DM_FALSE 0
1681 +#define DM_INVALID_TOKEN 0
1682 +#define DM_NO_TOKEN (-1)
1683 +#define DM_INVALID_HANP NULL
1684 +#define DM_INVALID_HLEN 0
1685 +#define DM_GLOBAL_HANP ((void *)(1LL))
1686 +#define DM_GLOBAL_HLEN ((size_t)(1))
1687 +#define DM_VER_STR_CONTENTS "SGI DMAPI (XDSM) API, Release 1.1."
1688 +
1689 +
1690 +#define DMEV_SET(event_type, event_list) \
1691 + ((event_list) |= (1 << (event_type)))
1692 +#define DMEV_CLR(event_type, event_list) \
1693 + ((event_list) &= ~(1 << (event_type)))
1694 +#define DMEV_ISSET(event_type, event_list) \
1695 + (int)(((event_list) & (1 << (event_type))) != 0)
1696 +#define DMEV_ZERO(event_list) \
1697 + (event_list) = 0
1698 +
1699 +
1700 +typedef struct {
1701 + int vd_offset; /* offset from start of containing struct */
1702 + unsigned int vd_length; /* length of data starting at vd_offset */
1703 +} dm_vardata_t;
1704 +
1705 +#define DM_GET_VALUE(p, field, type) \
1706 + ((type) ((char *)(p) + (p)->field.vd_offset))
1707 +
1708 +#define DM_GET_LEN(p, field) \
1709 + ((p)->field.vd_length)
1710 +
1711 +#define DM_STEP_TO_NEXT(p, type) \
1712 + ((type) ((p)->_link ? (char *)(p) + (p)->_link : NULL))
1713 +
1714 +
1715 +
1716 +
1717 +/* The remainder of this include file contains defines, typedefs, and
1718 + structures which are strictly defined by the DMAPI 2.3 specification.
1719 +
1720 + (The _link field which appears in several structures is an
1721 + implementation-specific way to implement DM_STEP_TO_NEXT, and
1722 + should not be referenced directly by application code.)
1723 +*/
1724 +
1725 +
1726 +#define DM_ATTR_NAME_SIZE 8
1727 +
1728 +
1729 +struct dm_attrname {
1730 + unsigned char an_chars[DM_ATTR_NAME_SIZE];
1731 +};
1732 +typedef struct dm_attrname dm_attrname_t;
1733 +
1734 +
1735 +struct dm_attrlist {
1736 + int _link;
1737 + dm_attrname_t al_name;
1738 + dm_vardata_t al_data;
1739 +};
1740 +typedef struct dm_attrlist dm_attrlist_t;
1741 +
1742 +
1743 +typedef enum {
1744 + DM_CONFIG_INVALID,
1745 + DM_CONFIG_BULKALL,
1746 + DM_CONFIG_CREATE_BY_HANDLE,
1747 + DM_CONFIG_DTIME_OVERLOAD,
1748 + DM_CONFIG_LEGACY,
1749 + DM_CONFIG_LOCK_UPGRADE,
1750 + DM_CONFIG_MAX_ATTR_ON_DESTROY,
1751 + DM_CONFIG_MAX_ATTRIBUTE_SIZE,
1752 + DM_CONFIG_MAX_HANDLE_SIZE,
1753 + DM_CONFIG_MAX_MANAGED_REGIONS,
1754 + DM_CONFIG_MAX_MESSAGE_DATA,
1755 + DM_CONFIG_OBJ_REF,
1756 + DM_CONFIG_PENDING,
1757 + DM_CONFIG_PERS_ATTRIBUTES,
1758 + DM_CONFIG_PERS_EVENTS,
1759 + DM_CONFIG_PERS_INHERIT_ATTRIBS,
1760 + DM_CONFIG_PERS_MANAGED_REGIONS,
1761 + DM_CONFIG_PUNCH_HOLE,
1762 + DM_CONFIG_TOTAL_ATTRIBUTE_SPACE,
1763 + DM_CONFIG_WILL_RETRY
1764 +} dm_config_t;
1765 +
1766 +
1767 +struct dm_dioinfo { /* non-standard SGI addition */
1768 + unsigned int d_mem;
1769 + unsigned int d_miniosz;
1770 + unsigned int d_maxiosz;
1771 + dm_boolean_t d_dio_only;
1772 +};
1773 +typedef struct dm_dioinfo dm_dioinfo_t;
1774 +
1775 +
1776 +struct dm_dispinfo {
1777 + int _link;
1778 + unsigned int di_pad1; /* reserved; do not reference */
1779 + dm_vardata_t di_fshandle;
1780 + dm_eventset_t di_eventset;
1781 +};
1782 +typedef struct dm_dispinfo dm_dispinfo_t;
1783 +
1784 +
1785 +#ifndef HAVE_DM_EVENTTYPE_T
1786 +#define HAVE_DM_EVENTTYPE_T
1787 +typedef enum {
1788 + DM_EVENT_INVALID = -1,
1789 + DM_EVENT_CANCEL = 0, /* not supported */
1790 + DM_EVENT_MOUNT = 1,
1791 + DM_EVENT_PREUNMOUNT = 2,
1792 + DM_EVENT_UNMOUNT = 3,
1793 + DM_EVENT_DEBUT = 4, /* not supported */
1794 + DM_EVENT_CREATE = 5,
1795 + DM_EVENT_CLOSE = 6, /* not supported */
1796 + DM_EVENT_POSTCREATE = 7,
1797 + DM_EVENT_REMOVE = 8,
1798 + DM_EVENT_POSTREMOVE = 9,
1799 + DM_EVENT_RENAME = 10,
1800 + DM_EVENT_POSTRENAME = 11,
1801 + DM_EVENT_LINK = 12,
1802 + DM_EVENT_POSTLINK = 13,
1803 + DM_EVENT_SYMLINK = 14,
1804 + DM_EVENT_POSTSYMLINK = 15,
1805 + DM_EVENT_READ = 16,
1806 + DM_EVENT_WRITE = 17,
1807 + DM_EVENT_TRUNCATE = 18,
1808 + DM_EVENT_ATTRIBUTE = 19,
1809 + DM_EVENT_DESTROY = 20,
1810 + DM_EVENT_NOSPACE = 21,
1811 + DM_EVENT_USER = 22,
1812 + DM_EVENT_MAX = 23
1813 +} dm_eventtype_t;
1814 +#endif
1815 +
1816 +
1817 +struct dm_eventmsg {
1818 + int _link;
1819 + dm_eventtype_t ev_type;
1820 + dm_token_t ev_token;
1821 + dm_sequence_t ev_sequence;
1822 + dm_vardata_t ev_data;
1823 +};
1824 +typedef struct dm_eventmsg dm_eventmsg_t;
1825 +
1826 +
1827 +struct dm_cancel_event { /* not supported */
1828 + dm_sequence_t ce_sequence;
1829 + dm_token_t ce_token;
1830 +};
1831 +typedef struct dm_cancel_event dm_cancel_event_t;
1832 +
1833 +
1834 +struct dm_data_event {
1835 + dm_vardata_t de_handle;
1836 + dm_off_t de_offset;
1837 + dm_size_t de_length;
1838 +};
1839 +typedef struct dm_data_event dm_data_event_t;
1840 +
1841 +struct dm_destroy_event {
1842 + dm_vardata_t ds_handle;
1843 + dm_attrname_t ds_attrname;
1844 + dm_vardata_t ds_attrcopy;
1845 +};
1846 +typedef struct dm_destroy_event dm_destroy_event_t;
1847 +
1848 +struct dm_mount_event {
1849 + dm_mode_t me_mode;
1850 + dm_vardata_t me_handle1;
1851 + dm_vardata_t me_handle2;
1852 + dm_vardata_t me_name1;
1853 + dm_vardata_t me_name2;
1854 + dm_vardata_t me_roothandle;
1855 +};
1856 +typedef struct dm_mount_event dm_mount_event_t;
1857 +
1858 +struct dm_namesp_event {
1859 + dm_mode_t ne_mode;
1860 + dm_vardata_t ne_handle1;
1861 + dm_vardata_t ne_handle2;
1862 + dm_vardata_t ne_name1;
1863 + dm_vardata_t ne_name2;
1864 + int ne_retcode;
1865 +};
1866 +typedef struct dm_namesp_event dm_namesp_event_t;
1867 +
1868 +
1869 +typedef enum {
1870 + DM_EXTENT_INVALID,
1871 + DM_EXTENT_RES,
1872 + DM_EXTENT_HOLE
1873 +} dm_extenttype_t;
1874 +
1875 +
1876 +struct dm_extent {
1877 + dm_extenttype_t ex_type;
1878 + unsigned int ex_pad1; /* reserved; do not reference */
1879 + dm_off_t ex_offset;
1880 + dm_size_t ex_length;
1881 +};
1882 +typedef struct dm_extent dm_extent_t;
1883 +
1884 +struct dm_fileattr {
1885 + dm_mode_t fa_mode;
1886 + uid_t fa_uid;
1887 + gid_t fa_gid;
1888 + time_t fa_atime;
1889 + time_t fa_mtime;
1890 + time_t fa_ctime;
1891 + time_t fa_dtime;
1892 + unsigned int fa_pad1; /* reserved; do not reference */
1893 + dm_off_t fa_size;
1894 +};
1895 +typedef struct dm_fileattr dm_fileattr_t;
1896 +
1897 +
1898 +struct dm_inherit { /* not supported */
1899 + dm_attrname_t ih_name;
1900 + dm_mode_t ih_filetype;
1901 +};
1902 +typedef struct dm_inherit dm_inherit_t;
1903 +
1904 +
1905 +typedef enum {
1906 + DM_MSGTYPE_INVALID,
1907 + DM_MSGTYPE_SYNC,
1908 + DM_MSGTYPE_ASYNC
1909 +} dm_msgtype_t;
1910 +
1911 +
1912 +struct dm_region {
1913 + dm_off_t rg_offset;
1914 + dm_size_t rg_size;
1915 + unsigned int rg_flags;
1916 + unsigned int rg_pad1; /* reserved; do not reference */
1917 +};
1918 +typedef struct dm_region dm_region_t;
1919 +
1920 +
1921 +typedef enum {
1922 + DM_RESP_INVALID,
1923 + DM_RESP_CONTINUE,
1924 + DM_RESP_ABORT,
1925 + DM_RESP_DONTCARE
1926 +} dm_response_t;
1927 +
1928 +
1929 +#ifndef HAVE_DM_RIGHT_T
1930 +#define HAVE_DM_RIGHT_T
1931 +typedef enum {
1932 + DM_RIGHT_NULL,
1933 + DM_RIGHT_SHARED,
1934 + DM_RIGHT_EXCL
1935 +} dm_right_t;
1936 +#endif
1937 +
1938 +
1939 +struct dm_stat {
1940 + int _link;
1941 + dm_vardata_t dt_handle;
1942 + dm_vardata_t dt_compname;
1943 + int dt_nevents;
1944 + dm_eventset_t dt_emask;
1945 + int dt_pers; /* field not supported */
1946 + int dt_pmanreg;
1947 + time_t dt_dtime;
1948 + unsigned int dt_change; /* field not supported */
1949 + unsigned int dt_pad1; /* reserved; do not reference */
1950 + dm_dev_t dt_dev;
1951 + dm_ino_t dt_ino;
1952 + dm_mode_t dt_mode;
1953 + dm_nlink_t dt_nlink;
1954 + uid_t dt_uid;
1955 + gid_t dt_gid;
1956 + dm_dev_t dt_rdev;
1957 + unsigned int dt_pad2; /* reserved; do not reference */
1958 + dm_off_t dt_size;
1959 + time_t dt_atime;
1960 + time_t dt_mtime;
1961 + time_t dt_ctime;
1962 + unsigned int dt_blksize;
1963 + dm_size_t dt_blocks;
1964 +
1965 + /* Non-standard filesystem-specific fields. Currently XFS is the only
1966 + supported filesystem type.
1967 + */
1968 +
1969 + __u64 dt_pad3; /* reserved; do not reference */
1970 + int dt_fstype; /* filesystem index; see sysfs(2) */
1971 + union {
1972 + struct {
1973 + dm_igen_t igen;
1974 + unsigned int xflags;
1975 + unsigned int extsize;
1976 + unsigned int extents;
1977 + unsigned short aextents;
1978 + unsigned short dmstate;
1979 + } sgi_xfs;
1980 + } fsys_dep;
1981 +};
1982 +typedef struct dm_stat dm_stat_t;
1983 +
1984 +#define dt_xfs_igen fsys_dep.sgi_xfs.igen
1985 +#define dt_xfs_xflags fsys_dep.sgi_xfs.xflags
1986 +#define dt_xfs_extsize fsys_dep.sgi_xfs.extsize
1987 +#define dt_xfs_extents fsys_dep.sgi_xfs.extents
1988 +#define dt_xfs_aextents fsys_dep.sgi_xfs.aextents
1989 +#define dt_xfs_dmstate fsys_dep.sgi_xfs.dmstate
1990 +
1991 +/* Flags for the non-standard dt_xfs_xflags field. */
1992 +
1993 +#define DM_XFLAG_REALTIME 0x00000001
1994 +#define DM_XFLAG_PREALLOC 0x00000002
1995 +#define DM_XFLAG_IMMUTABLE 0x00000008
1996 +#define DM_XFLAG_APPEND 0x00000010
1997 +#define DM_XFLAG_SYNC 0x00000020
1998 +#define DM_XFLAG_NOATIME 0x00000040
1999 +#define DM_XFLAG_NODUMP 0x00000080
2000 +#define DM_XFLAG_HASATTR 0x80000000
2001 +
2002 +
2003 +struct dm_timestruct {
2004 + time_t dm_tv_sec;
2005 + int dm_tv_nsec;
2006 +};
2007 +typedef struct dm_timestruct dm_timestruct_t;
2008 +
2009 +
2010 +struct dm_xstat { /* not supported */
2011 + dm_stat_t dx_statinfo;
2012 + dm_vardata_t dx_attrdata;
2013 +};
2014 +typedef struct dm_xstat dm_xstat_t;
2015 +
2016 +
2017 +#define MAXDMFSFIDSZ 46
2018 +
2019 +struct dm_fid {
2020 + __u16 dm_fid_len; /* length of remainder */
2021 + __u16 dm_fid_pad;
2022 + __u32 dm_fid_gen; /* generation number */
2023 + __u64 dm_fid_ino; /* 64 bits inode number */
2024 +};
2025 +typedef struct dm_fid dm_fid_t;
2026 +
2027 +
2028 +struct dm_handle {
2029 + union {
2030 + __s64 align; /* force alignment of ha_fid */
2031 + dm_fsid_t _ha_fsid; /* unique file system identifier */
2032 + } ha_u;
2033 + dm_fid_t ha_fid; /* file system specific file ID */
2034 +};
2035 +typedef struct dm_handle dm_handle_t;
2036 +#define ha_fsid ha_u._ha_fsid
2037 +
2038 +#define DM_HSIZE(handle) (((char *) &(handle).ha_fid.dm_fid_pad \
2039 + - (char *) &(handle)) \
2040 + + (handle).ha_fid.dm_fid_len)
2041 +
2042 +#define DM_HANDLE_CMP(h1, h2) memcmp(h1, h2, sizeof(dm_handle_t))
2043 +
2044 +#define DM_FSHSIZE sizeof(dm_fsid_t)
2045 +
2046 +
2047 +/* The following list provides the prototypes for all functions defined in
2048 + the DMAPI interface.
2049 +*/
2050 +
2051 +extern int
2052 +dm_clear_inherit( /* not supported */
2053 + dm_sessid_t sid,
2054 + void __user *hanp,
2055 + size_t hlen,
2056 + dm_token_t token,
2057 + dm_attrname_t __user *attrnamep);
2058 +
2059 +extern int
2060 +dm_create_by_handle( /* not supported */
2061 + dm_sessid_t sid,
2062 + void __user *dirhanp,
2063 + size_t dirhlen,
2064 + dm_token_t token,
2065 + void __user *hanp,
2066 + size_t hlen,
2067 + char __user *cname);
2068 +
2069 +extern int
2070 +dm_create_session(
2071 + dm_sessid_t oldsid,
2072 + char __user *sessinfop,
2073 + dm_sessid_t __user *newsidp);
2074 +
2075 +extern int
2076 +dm_create_userevent(
2077 + dm_sessid_t sid,
2078 + size_t msglen,
2079 + void __user *msgdatap,
2080 + dm_token_t __user *tokenp);
2081 +
2082 +extern int
2083 +dm_destroy_session(
2084 + dm_sessid_t sid);
2085 +
2086 +extern int
2087 +dm_downgrade_right( /* not completely supported; see caveat above */
2088 + dm_sessid_t sid,
2089 + void __user *hanp,
2090 + size_t hlen,
2091 + dm_token_t token);
2092 +
2093 +extern int
2094 +dm_fd_to_handle(
2095 + int fd,
2096 + void **hanpp,
2097 + size_t *hlenp);
2098 +
2099 +extern int
2100 +dm_find_eventmsg(
2101 + dm_sessid_t sid,
2102 + dm_token_t token,
2103 + size_t buflen,
2104 + void __user *bufp,
2105 + size_t __user *rlenp);
2106 +
2107 +extern int
2108 +dm_get_allocinfo(
2109 + dm_sessid_t sid,
2110 + void __user *hanp,
2111 + size_t hlen,
2112 + dm_token_t token,
2113 + dm_off_t *offp,
2114 + unsigned int nelem,
2115 + dm_extent_t *extentp,
2116 + unsigned int *nelemp);
2117 +
2118 +extern int
2119 +dm_get_bulkall( /* not supported */
2120 + dm_sessid_t sid,
2121 + void __user *hanp,
2122 + size_t hlen,
2123 + dm_token_t token,
2124 + unsigned int mask,
2125 + dm_attrname_t *attrnamep,
2126 + dm_attrloc_t *locp,
2127 + size_t buflen,
2128 + void *bufp,
2129 + size_t *rlenp);
2130 +
2131 +extern int
2132 +dm_get_bulkattr(
2133 + dm_sessid_t sid,
2134 + void __user *hanp,
2135 + size_t hlen,
2136 + dm_token_t token,
2137 + unsigned int mask,
2138 + dm_attrloc_t *locp,
2139 + size_t buflen,
2140 + void *bufp,
2141 + size_t *rlenp);
2142 +
2143 +extern int
2144 +dm_get_config(
2145 + void __user *hanp,
2146 + size_t hlen,
2147 + dm_config_t flagname,
2148 + dm_size_t __user *retvalp);
2149 +
2150 +extern int
2151 +dm_get_config_events(
2152 + void __user *hanp,
2153 + size_t hlen,
2154 + unsigned int nelem,
2155 + dm_eventset_t __user *eventsetp,
2156 + unsigned int __user *nelemp);
2157 +
2158 +extern int
2159 +dm_get_dirattrs(
2160 + dm_sessid_t sid,
2161 + void __user *hanp,
2162 + size_t hlen,
2163 + dm_token_t token,
2164 + unsigned int mask,
2165 + dm_attrloc_t *locp,
2166 + size_t buflen,
2167 + void *bufp,
2168 + size_t *rlenp);
2169 +
2170 +extern int
2171 +dm_get_dmattr(
2172 + dm_sessid_t sid,
2173 + void __user *hanp,
2174 + size_t hlen,
2175 + dm_token_t token,
2176 + dm_attrname_t __user *attrnamep,
2177 + size_t buflen,
2178 + void __user *bufp,
2179 + size_t __user *rlenp);
2180 +
2181 +extern int
2182 +dm_get_eventlist(
2183 + dm_sessid_t sid,
2184 + void __user *hanp,
2185 + size_t hlen,
2186 + dm_token_t token,
2187 + unsigned int nelem,
2188 + dm_eventset_t __user *eventsetp,
2189 + unsigned int __user *nelemp);
2190 +
2191 +extern int
2192 +dm_get_events(
2193 + dm_sessid_t sid,
2194 + unsigned int maxmsgs,
2195 + unsigned int flags,
2196 + size_t buflen,
2197 + void __user *bufp,
2198 + size_t __user *rlenp);
2199 +
2200 +extern int
2201 +dm_get_fileattr(
2202 + dm_sessid_t sid,
2203 + void __user *hanp,
2204 + size_t hlen,
2205 + dm_token_t token,
2206 + unsigned int mask,
2207 + dm_stat_t __user *statp);
2208 +
2209 +extern int
2210 +dm_get_mountinfo(
2211 + dm_sessid_t sid,
2212 + void __user *hanp,
2213 + size_t hlen,
2214 + dm_token_t token,
2215 + size_t buflen,
2216 + void __user *bufp,
2217 + size_t __user *rlenp);
2218 +
2219 +extern int
2220 +dm_get_region(
2221 + dm_sessid_t sid,
2222 + void __user *hanp,
2223 + size_t hlen,
2224 + dm_token_t token,
2225 + unsigned int nelem,
2226 + dm_region_t __user *regbufp,
2227 + unsigned int __user *nelemp);
2228 +
2229 +extern int
2230 +dm_getall_disp(
2231 + dm_sessid_t sid,
2232 + size_t buflen,
2233 + void __user *bufp,
2234 + size_t __user *rlenp);
2235 +
2236 +extern int
2237 +dm_getall_dmattr(
2238 + dm_sessid_t sid,
2239 + void __user *hanp,
2240 + size_t hlen,
2241 + dm_token_t token,
2242 + size_t buflen,
2243 + void __user *bufp,
2244 + size_t __user *rlenp);
2245 +
2246 +extern int
2247 +dm_getall_inherit( /* not supported */
2248 + dm_sessid_t sid,
2249 + void __user *hanp,
2250 + size_t hlen,
2251 + dm_token_t token,
2252 + unsigned int nelem,
2253 + dm_inherit_t __user *inheritbufp,
2254 + unsigned int __user *nelemp);
2255 +
2256 +extern int
2257 +dm_getall_sessions(
2258 + unsigned int nelem,
2259 + dm_sessid_t __user *sidbufp,
2260 + unsigned int __user *nelemp);
2261 +
2262 +extern int
2263 +dm_getall_tokens(
2264 + dm_sessid_t sid,
2265 + unsigned int nelem,
2266 + dm_token_t __user *tokenbufp,
2267 + unsigned int __user *nelemp);
2268 +
2269 +extern int
2270 +dm_handle_cmp(
2271 + void *hanp1,
2272 + size_t hlen1,
2273 + void *hanp2,
2274 + size_t hlen2);
2275 +
2276 +extern void
2277 +dm_handle_free(
2278 + void *hanp,
2279 + size_t hlen);
2280 +
2281 +extern u_int
2282 +dm_handle_hash(
2283 + void *hanp,
2284 + size_t hlen);
2285 +
2286 +extern dm_boolean_t
2287 +dm_handle_is_valid(
2288 + void *hanp,
2289 + size_t hlen);
2290 +
2291 +extern int
2292 +dm_handle_to_fshandle(
2293 + void *hanp,
2294 + size_t hlen,
2295 + void **fshanpp,
2296 + size_t *fshlenp);
2297 +
2298 +extern int
2299 +dm_handle_to_fsid(
2300 + void *hanp,
2301 + size_t hlen,
2302 + dm_fsid_t *fsidp);
2303 +
2304 +extern int
2305 +dm_handle_to_igen(
2306 + void *hanp,
2307 + size_t hlen,
2308 + dm_igen_t *igenp);
2309 +
2310 +extern int
2311 +dm_handle_to_ino(
2312 + void *hanp,
2313 + size_t hlen,
2314 + dm_ino_t *inop);
2315 +
2316 +extern int
2317 +dm_handle_to_path(
2318 + void *dirhanp,
2319 + size_t dirhlen,
2320 + void *targhanp,
2321 + size_t targhlen,
2322 + size_t buflen,
2323 + char *pathbufp,
2324 + size_t *rlenp);
2325 +
2326 +extern int
2327 +dm_init_attrloc(
2328 + dm_sessid_t sid,
2329 + void __user *hanp,
2330 + size_t hlen,
2331 + dm_token_t token,
2332 + dm_attrloc_t __user *locp);
2333 +
2334 +extern int
2335 +dm_init_service(
2336 + char **versionstrpp);
2337 +
2338 +extern int
2339 +dm_make_handle(
2340 + dm_fsid_t *fsidp,
2341 + dm_ino_t *inop,
2342 + dm_igen_t *igenp,
2343 + void **hanpp,
2344 + size_t *hlenp);
2345 +
2346 +extern int
2347 +dm_make_fshandle(
2348 + dm_fsid_t *fsidp,
2349 + void **hanpp,
2350 + size_t *hlenp);
2351 +
2352 +extern int
2353 +dm_mkdir_by_handle( /* not supported */
2354 + dm_sessid_t sid,
2355 + void __user *dirhanp,
2356 + size_t dirhlen,
2357 + dm_token_t token,
2358 + void __user *hanp,
2359 + size_t hlen,
2360 + char __user *cname);
2361 +
2362 +extern int
2363 +dm_move_event(
2364 + dm_sessid_t srcsid,
2365 + dm_token_t token,
2366 + dm_sessid_t targetsid,
2367 + dm_token_t __user *rtokenp);
2368 +
2369 +extern int
2370 +dm_obj_ref_hold(
2371 + dm_sessid_t sid,
2372 + dm_token_t token,
2373 + void __user *hanp,
2374 + size_t hlen);
2375 +
2376 +extern int
2377 +dm_obj_ref_query(
2378 + dm_sessid_t sid,
2379 + dm_token_t token,
2380 + void *hanp,
2381 + size_t hlen);
2382 +
2383 +extern int
2384 +dm_obj_ref_rele(
2385 + dm_sessid_t sid,
2386 + dm_token_t token,
2387 + void __user *hanp,
2388 + size_t hlen);
2389 +
2390 +extern int
2391 +dm_path_to_fshandle(
2392 + char *path,
2393 + void **hanpp,
2394 + size_t *hlenp);
2395 +
2396 +extern int
2397 +dm_path_to_handle(
2398 + char *path,
2399 + void **hanpp,
2400 + size_t *hlenp);
2401 +
2402 +extern int
2403 +dm_pending(
2404 + dm_sessid_t sid,
2405 + dm_token_t token,
2406 + dm_timestruct_t __user *delay);
2407 +
2408 +extern int
2409 +dm_probe_hole(
2410 + dm_sessid_t sid,
2411 + void __user *hanp,
2412 + size_t hlen,
2413 + dm_token_t token,
2414 + dm_off_t off,
2415 + dm_size_t len,
2416 + dm_off_t __user *roffp,
2417 + dm_size_t __user *rlenp);
2418 +
2419 +extern int
2420 +dm_punch_hole(
2421 + dm_sessid_t sid,
2422 + void __user *hanp,
2423 + size_t hlen,
2424 + dm_token_t token,
2425 + dm_off_t off,
2426 + dm_size_t len);
2427 +
2428 +extern int
2429 +dm_query_right( /* not completely supported; see caveat above */
2430 + dm_sessid_t sid,
2431 + void __user *hanp,
2432 + size_t hlen,
2433 + dm_token_t token,
2434 + dm_right_t __user *rightp);
2435 +
2436 +extern int
2437 +dm_query_session(
2438 + dm_sessid_t sid,
2439 + size_t buflen,
2440 + void __user *bufp,
2441 + size_t __user *rlenp);
2442 +
2443 +extern dm_ssize_t
2444 +dm_read_invis(
2445 + dm_sessid_t sid,
2446 + void *hanp,
2447 + size_t hlen,
2448 + dm_token_t token,
2449 + dm_off_t off,
2450 + dm_size_t len,
2451 + void *bufp);
2452 +
2453 +extern int
2454 +dm_release_right( /* not completely supported; see caveat above */
2455 + dm_sessid_t sid,
2456 + void __user *hanp,
2457 + size_t hlen,
2458 + dm_token_t token);
2459 +
2460 +extern int
2461 +dm_remove_dmattr(
2462 + dm_sessid_t sid,
2463 + void __user *hanp,
2464 + size_t hlen,
2465 + dm_token_t token,
2466 + int setdtime,
2467 + dm_attrname_t __user *attrnamep);
2468 +
2469 +extern int
2470 +dm_request_right( /* not completely supported; see caveat above */
2471 + dm_sessid_t sid,
2472 + void __user *hanp,
2473 + size_t hlen,
2474 + dm_token_t token,
2475 + unsigned int flags,
2476 + dm_right_t right);
2477 +
2478 +extern int
2479 +dm_respond_event(
2480 + dm_sessid_t sid,
2481 + dm_token_t token,
2482 + dm_response_t response,
2483 + int reterror,
2484 + size_t buflen,
2485 + void __user *respbufp);
2486 +
2487 +extern int
2488 +dm_send_msg(
2489 + dm_sessid_t targetsid,
2490 + dm_msgtype_t msgtype,
2491 + size_t buflen,
2492 + void __user *bufp);
2493 +
2494 +extern int
2495 +dm_set_disp(
2496 + dm_sessid_t sid,
2497 + void __user *hanp,
2498 + size_t hlen,
2499 + dm_token_t token,
2500 + dm_eventset_t __user *eventsetp,
2501 + unsigned int maxevent);
2502 +
2503 +extern int
2504 +dm_set_dmattr(
2505 + dm_sessid_t sid,
2506 + void __user *hanp,
2507 + size_t hlen,
2508 + dm_token_t token,
2509 + dm_attrname_t __user *attrnamep,
2510 + int setdtime,
2511 + size_t buflen,
2512 + void __user *bufp);
2513 +
2514 +extern int
2515 +dm_set_eventlist(
2516 + dm_sessid_t sid,
2517 + void __user *hanp,
2518 + size_t hlen,
2519 + dm_token_t token,
2520 + dm_eventset_t __user *eventsetp,
2521 + unsigned int maxevent);
2522 +
2523 +extern int
2524 +dm_set_fileattr(
2525 + dm_sessid_t sid,
2526 + void __user *hanp,
2527 + size_t hlen,
2528 + dm_token_t token,
2529 + unsigned int mask,
2530 + dm_fileattr_t __user *attrp);
2531 +
2532 +extern int
2533 +dm_set_inherit( /* not supported */
2534 + dm_sessid_t sid,
2535 + void __user *hanp,
2536 + size_t hlen,
2537 + dm_token_t token,
2538 + dm_attrname_t __user *attrnamep,
2539 + mode_t mode);
2540 +
2541 +extern int
2542 +dm_set_region(
2543 + dm_sessid_t sid,
2544 + void __user *hanp,
2545 + size_t hlen,
2546 + dm_token_t token,
2547 + unsigned int nelem,
2548 + dm_region_t __user *regbufp,
2549 + dm_boolean_t __user *exactflagp);
2550 +
2551 +extern int
2552 +dm_set_return_on_destroy(
2553 + dm_sessid_t sid,
2554 + void __user *hanp,
2555 + size_t hlen,
2556 + dm_token_t token,
2557 + dm_attrname_t __user *attrnamep,
2558 + dm_boolean_t enable);
2559 +
2560 +extern int
2561 +dm_symlink_by_handle( /* not supported */
2562 + dm_sessid_t sid,
2563 + void __user *dirhanp,
2564 + size_t dirhlen,
2565 + dm_token_t token,
2566 + void __user *hanp,
2567 + size_t hlen,
2568 + char __user *cname,
2569 + char __user *path);
2570 +
2571 +extern int
2572 +dm_sync_by_handle(
2573 + dm_sessid_t sid,
2574 + void __user *hanp,
2575 + size_t hlen,
2576 + dm_token_t token);
2577 +
2578 +extern int
2579 +dm_upgrade_right( /* not completely supported; see caveat above */
2580 + dm_sessid_t sid,
2581 + void __user *hanp,
2582 + size_t hlen,
2583 + dm_token_t token);
2584 +
2585 +extern dm_ssize_t
2586 +dm_write_invis(
2587 + dm_sessid_t sid,
2588 + void *hanp,
2589 + size_t hlen,
2590 + dm_token_t token,
2591 + int flags,
2592 + dm_off_t off,
2593 + dm_size_t len,
2594 + void *bufp);
2595 +
2596 +/* Non-standard SGI additions to the DMAPI interface. */
2597 +
2598 +int
2599 +dm_open_by_handle(
2600 + void __user *hanp,
2601 + size_t hlen,
2602 + int mode);
2603 +
2604 +extern int
2605 +dm_get_dioinfo(
2606 + dm_sessid_t sid,
2607 + void __user *hanp,
2608 + size_t hlen,
2609 + dm_token_t token,
2610 + dm_dioinfo_t __user *diop);
2611 +
2612 +#ifdef __cplusplus
2613 +}
2614 +#endif
2615 +
2616 +#endif /* __DMAPI_H__ */
2617 Index: linux-2.6.26/fs/dmapi/dmapi_handle.c
2618 ===================================================================
2619 --- /dev/null
2620 +++ linux-2.6.26/fs/dmapi/dmapi_handle.c
2621 @@ -0,0 +1,119 @@
2622 +/*
2623 + * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved.
2624 + *
2625 + * This program is free software; you can redistribute it and/or modify it
2626 + * under the terms of version 2 of the GNU General Public License as
2627 + * published by the Free Software Foundation.
2628 + *
2629 + * This program is distributed in the hope that it would be useful, but
2630 + * WITHOUT ANY WARRANTY; without even the implied warranty of
2631 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
2632 + *
2633 + * Further, this software is distributed without any warranty that it is
2634 + * free of the rightful claim of any third person regarding infringement
2635 + * or the like. Any license provided herein, whether implied or
2636 + * otherwise, applies only to this software file. Patent licenses, if
2637 + * any, provided herein do not apply to combinations of this program with
2638 + * other software, or any other product whatsoever.
2639 + *
2640 + * You should have received a copy of the GNU General Public License along
2641 + * with this program; if not, write the Free Software Foundation, Inc., 59
2642 + * Temple Place - Suite 330, Boston MA 02111-1307, USA.
2643 + *
2644 + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
2645 + * Mountain View, CA 94043, or:
2646 + *
2647 + * http://www.sgi.com
2648 + *
2649 + * For further information regarding this notice, see:
2650 + *
2651 + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
2652 + */
2653 +#include "dmapi.h"
2654 +#include "dmapi_kern.h"
2655 +#include "dmapi_private.h"
2656 +
2657 +
2658 +int
2659 +dm_create_by_handle(
2660 + dm_sessid_t sid,
2661 + void __user *dirhanp,
2662 + size_t dirhlen,
2663 + dm_token_t token,
2664 + void __user *hanp,
2665 + size_t hlen,
2666 + char __user *cname)
2667 +{
2668 + dm_fsys_vector_t *fsys_vector;
2669 + dm_tokdata_t *tdp;
2670 + int error;
2671 +
2672 + error = dm_app_get_tdp(sid, dirhanp, dirhlen, token, DM_TDT_DIR,
2673 + DM_RIGHT_EXCL, &tdp);
2674 + if (error != 0)
2675 + return(error);
2676 +
2677 + fsys_vector = dm_fsys_vector(tdp->td_ip);
2678 + error = fsys_vector->create_by_handle(tdp->td_ip, tdp->td_right,
2679 + hanp, hlen, cname);
2680 +
2681 + dm_app_put_tdp(tdp);
2682 + return(error);
2683 +}
2684 +
2685 +
2686 +int
2687 +dm_mkdir_by_handle(
2688 + dm_sessid_t sid,
2689 + void __user *dirhanp,
2690 + size_t dirhlen,
2691 + dm_token_t token,
2692 + void __user *hanp,
2693 + size_t hlen,
2694 + char __user *cname)
2695 +{
2696 + dm_fsys_vector_t *fsys_vector;
2697 + dm_tokdata_t *tdp;
2698 + int error;
2699 +
2700 + error = dm_app_get_tdp(sid, dirhanp, dirhlen, token, DM_TDT_DIR,
2701 + DM_RIGHT_EXCL, &tdp);
2702 + if (error != 0)
2703 + return(error);
2704 +
2705 + fsys_vector = dm_fsys_vector(tdp->td_ip);
2706 + error = fsys_vector->mkdir_by_handle(tdp->td_ip, tdp->td_right,
2707 + hanp, hlen, cname);
2708 +
2709 + dm_app_put_tdp(tdp);
2710 + return(error);
2711 +}
2712 +
2713 +
2714 +int
2715 +dm_symlink_by_handle(
2716 + dm_sessid_t sid,
2717 + void __user *dirhanp,
2718 + size_t dirhlen,
2719 + dm_token_t token,
2720 + void __user *hanp,
2721 + size_t hlen,
2722 + char __user *cname,
2723 + char __user *path)
2724 +{
2725 + dm_fsys_vector_t *fsys_vector;
2726 + dm_tokdata_t *tdp;
2727 + int error;
2728 +
2729 + error = dm_app_get_tdp(sid, dirhanp, dirhlen, token, DM_TDT_DIR,
2730 + DM_RIGHT_EXCL, &tdp);
2731 + if (error != 0)
2732 + return(error);
2733 +
2734 + fsys_vector = dm_fsys_vector(tdp->td_ip);
2735 + error = fsys_vector->symlink_by_handle(tdp->td_ip, tdp->td_right,
2736 + hanp, hlen, cname, path);
2737 +
2738 + dm_app_put_tdp(tdp);
2739 + return(error);
2740 +}
2741 Index: linux-2.6.26/fs/dmapi/dmapi_hole.c
2742 ===================================================================
2743 --- /dev/null
2744 +++ linux-2.6.26/fs/dmapi/dmapi_hole.c
2745 @@ -0,0 +1,119 @@
2746 +/*
2747 + * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved.
2748 + *
2749 + * This program is free software; you can redistribute it and/or modify it
2750 + * under the terms of version 2 of the GNU General Public License as
2751 + * published by the Free Software Foundation.
2752 + *
2753 + * This program is distributed in the hope that it would be useful, but
2754 + * WITHOUT ANY WARRANTY; without even the implied warranty of
2755 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
2756 + *
2757 + * Further, this software is distributed without any warranty that it is
2758 + * free of the rightful claim of any third person regarding infringement
2759 + * or the like. Any license provided herein, whether implied or
2760 + * otherwise, applies only to this software file. Patent licenses, if
2761 + * any, provided herein do not apply to combinations of this program with
2762 + * other software, or any other product whatsoever.
2763 + *
2764 + * You should have received a copy of the GNU General Public License along
2765 + * with this program; if not, write the Free Software Foundation, Inc., 59
2766 + * Temple Place - Suite 330, Boston MA 02111-1307, USA.
2767 + *
2768 + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
2769 + * Mountain View, CA 94043, or:
2770 + *
2771 + * http://www.sgi.com
2772 + *
2773 + * For further information regarding this notice, see:
2774 + *
2775 + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
2776 + */
2777 +#include "dmapi.h"
2778 +#include "dmapi_kern.h"
2779 +#include "dmapi_private.h"
2780 +
2781 +
2782 +int
2783 +dm_get_allocinfo_rvp(
2784 + dm_sessid_t sid,
2785 + void __user *hanp,
2786 + size_t hlen,
2787 + dm_token_t token,
2788 + dm_off_t __user *offp,
2789 + u_int nelem,
2790 + dm_extent_t __user *extentp,
2791 + u_int __user *nelemp,
2792 + int *rvp)
2793 +{
2794 + dm_fsys_vector_t *fsys_vector;
2795 + dm_tokdata_t *tdp;
2796 + int error;
2797 +
2798 + error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_REG,
2799 + DM_RIGHT_SHARED, &tdp);
2800 + if (error != 0)
2801 + return(error);
2802 +
2803 + fsys_vector = dm_fsys_vector(tdp->td_ip);
2804 + error = fsys_vector->get_allocinfo_rvp(tdp->td_ip, tdp->td_right,
2805 + offp, nelem, extentp, nelemp, rvp);
2806 +
2807 + dm_app_put_tdp(tdp);
2808 + return(error);
2809 +}
2810 +
2811 +
2812 +int
2813 +dm_probe_hole(
2814 + dm_sessid_t sid,
2815 + void __user *hanp,
2816 + size_t hlen,
2817 + dm_token_t token,
2818 + dm_off_t off,
2819 + dm_size_t len,
2820 + dm_off_t __user *roffp,
2821 + dm_size_t __user *rlenp)
2822 +{
2823 + dm_fsys_vector_t *fsys_vector;
2824 + dm_tokdata_t *tdp;
2825 + int error;
2826 +
2827 + error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_REG,
2828 + DM_RIGHT_SHARED, &tdp);
2829 + if (error != 0)
2830 + return(error);
2831 +
2832 + fsys_vector = dm_fsys_vector(tdp->td_ip);
2833 + error = fsys_vector->probe_hole(tdp->td_ip, tdp->td_right,
2834 + off, len, roffp, rlenp);
2835 +
2836 + dm_app_put_tdp(tdp);
2837 + return(error);
2838 +}
2839 +
2840 +
2841 +int
2842 +dm_punch_hole(
2843 + dm_sessid_t sid,
2844 + void __user *hanp,
2845 + size_t hlen,
2846 + dm_token_t token,
2847 + dm_off_t off,
2848 + dm_size_t len)
2849 +{
2850 + dm_fsys_vector_t *fsys_vector;
2851 + dm_tokdata_t *tdp;
2852 + int error;
2853 +
2854 + error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_REG,
2855 + DM_RIGHT_EXCL, &tdp);
2856 + if (error != 0)
2857 + return(error);
2858 +
2859 + fsys_vector = dm_fsys_vector(tdp->td_ip);
2860 + error = fsys_vector->punch_hole(tdp->td_ip, tdp->td_right, off, len);
2861 +
2862 + dm_app_put_tdp(tdp);
2863 + return(error);
2864 +}
2865 Index: linux-2.6.26/fs/dmapi/dmapi_io.c
2866 ===================================================================
2867 --- /dev/null
2868 +++ linux-2.6.26/fs/dmapi/dmapi_io.c
2869 @@ -0,0 +1,142 @@
2870 +/*
2871 + * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved.
2872 + *
2873 + * This program is free software; you can redistribute it and/or modify it
2874 + * under the terms of version 2 of the GNU General Public License as
2875 + * published by the Free Software Foundation.
2876 + *
2877 + * This program is distributed in the hope that it would be useful, but
2878 + * WITHOUT ANY WARRANTY; without even the implied warranty of
2879 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
2880 + *
2881 + * Further, this software is distributed without any warranty that it is
2882 + * free of the rightful claim of any third person regarding infringement
2883 + * or the like. Any license provided herein, whether implied or
2884 + * otherwise, applies only to this software file. Patent licenses, if
2885 + * any, provided herein do not apply to combinations of this program with
2886 + * other software, or any other product whatsoever.
2887 + *
2888 + * You should have received a copy of the GNU General Public License along
2889 + * with this program; if not, write the Free Software Foundation, Inc., 59
2890 + * Temple Place - Suite 330, Boston MA 02111-1307, USA.
2891 + *
2892 + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
2893 + * Mountain View, CA 94043, or:
2894 + *
2895 + * http://www.sgi.com
2896 + *
2897 + * For further information regarding this notice, see:
2898 + *
2899 + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
2900 + */
2901 +#include "dmapi.h"
2902 +#include "dmapi_kern.h"
2903 +#include "dmapi_private.h"
2904 +
2905 +
2906 +int
2907 +dm_read_invis_rvp(
2908 + dm_sessid_t sid,
2909 + void __user *hanp,
2910 + size_t hlen,
2911 + dm_token_t token,
2912 + dm_off_t off,
2913 + dm_size_t len,
2914 + void __user *bufp,
2915 + int *rvp)
2916 +{
2917 + dm_fsys_vector_t *fsys_vector;
2918 + dm_tokdata_t *tdp;
2919 + int error;
2920 +
2921 + error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_REG,
2922 + DM_RIGHT_SHARED, &tdp);
2923 + if (error != 0)
2924 + return(error);
2925 +
2926 + fsys_vector = dm_fsys_vector(tdp->td_ip);
2927 + error = fsys_vector->read_invis_rvp(tdp->td_ip, tdp->td_right,
2928 + off, len, bufp, rvp);
2929 +
2930 + dm_app_put_tdp(tdp);
2931 + return(error);
2932 +}
2933 +
2934 +
2935 +int
2936 +dm_write_invis_rvp(
2937 + dm_sessid_t sid,
2938 + void __user *hanp,
2939 + size_t hlen,
2940 + dm_token_t token,
2941 + int flags,
2942 + dm_off_t off,
2943 + dm_size_t len,
2944 + void __user *bufp,
2945 + int *rvp)
2946 +{
2947 + dm_fsys_vector_t *fsys_vector;
2948 + dm_tokdata_t *tdp;
2949 + int error;
2950 +
2951 + error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_REG,
2952 + DM_RIGHT_EXCL, &tdp);
2953 + if (error != 0)
2954 + return(error);
2955 +
2956 + fsys_vector = dm_fsys_vector(tdp->td_ip);
2957 + error = fsys_vector->write_invis_rvp(tdp->td_ip, tdp->td_right,
2958 + flags, off, len, bufp, rvp);
2959 +
2960 + dm_app_put_tdp(tdp);
2961 + return(error);
2962 +}
2963 +
2964 +
2965 +int
2966 +dm_sync_by_handle (
2967 + dm_sessid_t sid,
2968 + void __user *hanp,
2969 + size_t hlen,
2970 + dm_token_t token)
2971 +{
2972 + dm_fsys_vector_t *fsys_vector;
2973 + dm_tokdata_t *tdp;
2974 + int error;
2975 +
2976 + error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_REG,
2977 + DM_RIGHT_EXCL, &tdp);
2978 + if (error != 0)
2979 + return(error);
2980 +
2981 + fsys_vector = dm_fsys_vector(tdp->td_ip);
2982 + error = fsys_vector->sync_by_handle(tdp->td_ip, tdp->td_right);
2983 +
2984 + dm_app_put_tdp(tdp);
2985 + return(error);
2986 +}
2987 +
2988 +
2989 +int
2990 +dm_get_dioinfo (
2991 + dm_sessid_t sid,
2992 + void __user *hanp,
2993 + size_t hlen,
2994 + dm_token_t token,
2995 + dm_dioinfo_t __user *diop)
2996 +{
2997 + dm_fsys_vector_t *fsys_vector;
2998 + dm_tokdata_t *tdp;
2999 + int error;
3000 +
3001 + error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_REG,
3002 + DM_RIGHT_SHARED, &tdp);
3003 + if (error != 0)
3004 + return(error);
3005 +
3006 + fsys_vector = dm_fsys_vector(tdp->td_ip);
3007 + error = fsys_vector->get_dioinfo(tdp->td_ip, tdp->td_right, diop);
3008 +
3009 + dm_app_put_tdp(tdp);
3010 + return(error);
3011 +}
3012 Index: linux-2.6.26/fs/dmapi/dmapi_kern.h
3013 ===================================================================
3014 --- /dev/null
3015 +++ linux-2.6.26/fs/dmapi/dmapi_kern.h
3016 @@ -0,0 +1,599 @@
3017 +/*
3018 + * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
3019 + *
3020 + * This program is free software; you can redistribute it and/or modify it
3021 + * under the terms of version 2 of the GNU General Public License as
3022 + * published by the Free Software Foundation.
3023 + *
3024 + * This program is distributed in the hope that it would be useful, but
3025 + * WITHOUT ANY WARRANTY; without even the implied warranty of
3026 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
3027 + *
3028 + * Further, this software is distributed without any warranty that it is
3029 + * free of the rightful claim of any third person regarding infringement
3030 + * or the like. Any license provided herein, whether implied or
3031 + * otherwise, applies only to this software file. Patent licenses, if
3032 + * any, provided herein do not apply to combinations of this program with
3033 + * other software, or any other product whatsoever.
3034 + *
3035 + * You should have received a copy of the GNU General Public License along
3036 + * with this program; if not, write the Free Software Foundation, Inc., 59
3037 + * Temple Place - Suite 330, Boston MA 02111-1307, USA.
3038 + *
3039 + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
3040 + * Mountain View, CA 94043, or:
3041 + *
3042 + * http://www.sgi.com
3043 + *
3044 + * For further information regarding this notice, see:
3045 + *
3046 + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
3047 + */
3048 +
3049 +#ifndef __DMAPI_KERN_H__
3050 +#define __DMAPI_KERN_H__
3051 +
3052 +#include <linux/fs.h>
3053 +
3054 +union sys_dmapi_uarg {
3055 + void *p;
3056 + __u64 u;
3057 +};
3058 +typedef union sys_dmapi_uarg sys_dmapi_u;
3059 +
3060 +struct sys_dmapi_args {
3061 + sys_dmapi_u uarg1, uarg2, uarg3, uarg4, uarg5, uarg6, uarg7, uarg8,
3062 + uarg9, uarg10, uarg11;
3063 +};
3064 +typedef struct sys_dmapi_args sys_dmapi_args_t;
3065 +
3066 +#define DM_Uarg(uap,i) uap->uarg##i.u
3067 +#define DM_Parg(uap,i) uap->uarg##i.p
3068 +
3069 +#ifdef __KERNEL__
3070 +
3071 +struct dm_handle_t;
3072 +
3073 +/* The first group of definitions and prototypes define the filesystem's
3074 + interface into the DMAPI code.
3075 +*/
3076 +
3077 +
3078 +/* Definitions used for the flags field on dm_send_data_event(),
3079 + dm_send_unmount_event(), and dm_send_namesp_event() calls.
3080 +*/
3081 +
3082 +#define DM_FLAGS_NDELAY 0x001 /* return EAGAIN after dm_pending() */
3083 +#define DM_FLAGS_UNWANTED 0x002 /* event not in fsys dm_eventset_t */
3084 +
3085 +/* Possible code levels reported by dm_code_level(). */
3086 +
3087 +#define DM_CLVL_INIT 0 /* DMAPI prior to X/Open compliance */
3088 +#define DM_CLVL_XOPEN 1 /* X/Open compliant DMAPI */
3089 +
3090 +
3091 +/*
3092 + * Filesystem operations accessed by the DMAPI core.
3093 + */
3094 +struct filesystem_dmapi_operations {
3095 + int (*get_fsys_vector)(struct super_block *sb, void *addr);
3096 + int (*fh_to_inode)(struct super_block *sb, struct inode **ip,
3097 + dm_fid_t *fid);
3098 + const struct file_operations * (*get_invis_ops)(struct inode *ip);
3099 + int (*inode_to_fh)(struct inode *ip, dm_fid_t *fid,
3100 + dm_fsid_t *fsid );
3101 + void (*get_fsid)(struct super_block *sb, dm_fsid_t *fsid);
3102 +#define HAVE_DM_QUEUE_FLUSH
3103 + int (*flushing)(struct inode *ip);
3104 +};
3105 +
3106 +
3107 +/* Prototypes used outside of the DMI module/directory. */
3108 +
3109 +int dm_send_data_event(
3110 + dm_eventtype_t event,
3111 + struct inode *ip,
3112 + dm_right_t vp_right,
3113 + dm_off_t off,
3114 + size_t len,
3115 + int flags);
3116 +
3117 +int dm_send_destroy_event(
3118 + struct inode *ip,
3119 + dm_right_t vp_right);
3120 +
3121 +int dm_send_mount_event(
3122 + struct super_block *sb,
3123 + dm_right_t vfsp_right,
3124 + struct inode *ip,
3125 + dm_right_t vp_right,
3126 + struct inode *rootip,
3127 + dm_right_t rootvp_right,
3128 + char *name1,
3129 + char *name2);
3130 +
3131 +int dm_send_namesp_event(
3132 + dm_eventtype_t event,
3133 + struct super_block *sb,
3134 + struct inode *ip1,
3135 + dm_right_t vp1_right,
3136 + struct inode *ip2,
3137 + dm_right_t vp2_right,
3138 + const char *name1,
3139 + const char *name2,
3140 + mode_t mode,
3141 + int retcode,
3142 + int flags);
3143 +
3144 +void dm_send_unmount_event(
3145 + struct super_block *sbp,
3146 + struct inode *ip,
3147 + dm_right_t sbp_right,
3148 + mode_t mode,
3149 + int retcode,
3150 + int flags);
3151 +
3152 +int dm_code_level(void);
3153 +
3154 +int dm_ip_to_handle (
3155 + struct inode *ip,
3156 + dm_handle_t *handlep);
3157 +
3158 +#define HAVE_DM_RELEASE_THREADS_ERRNO
3159 +int dm_release_threads(
3160 + struct super_block *sb,
3161 + struct inode *inode,
3162 + int errno);
3163 +
3164 +void dmapi_register(
3165 + struct file_system_type *fstype,
3166 + struct filesystem_dmapi_operations *dmapiops);
3167 +
3168 +void dmapi_unregister(
3169 + struct file_system_type *fstype);
3170 +
3171 +int dmapi_registered(
3172 + struct file_system_type *fstype,
3173 + struct filesystem_dmapi_operations **dmapiops);
3174 +
3175 +
3176 +/* The following prototypes and definitions are used by DMAPI as its
3177 + interface into the filesystem code. Communication between DMAPI and the
3178 + filesystem are established as follows:
3179 + 1. DMAPI uses the VFS_DMAPI_FSYS_VECTOR to ask for the addresses
3180 + of all the functions within the filesystem that it may need to call.
3181 + 2. The filesystem returns an array of function name/address pairs which
3182 + DMAPI builds into a function vector.
3183 + The VFS_DMAPI_FSYS_VECTOR call is only made one time for a particular
3184 + filesystem type. From then on, DMAPI uses its function vector to call the
3185 + filesystem functions directly. Functions in the array which DMAPI doesn't
3186 + recognize are ignored. A dummy function which returns ENOSYS is used for
3187 + any function that DMAPI needs but which was not provided by the filesystem.
3188 + If XFS doesn't recognize the VFS_DMAPI_FSYS_VECTOR, DMAPI assumes that it
3189 + doesn't have the X/Open support code; in this case DMAPI uses the XFS-code
3190 + originally bundled within DMAPI.
3191 +
3192 + The goal of this interface is allow incremental changes to be made to
3193 + both the filesystem and to DMAPI while minimizing inter-patch dependencies,
3194 + and to eventually allow DMAPI to support multiple filesystem types at the
3195 + same time should that become necessary.
3196 +*/
3197 +
3198 +typedef enum {
3199 + DM_FSYS_CLEAR_INHERIT = 0,
3200 + DM_FSYS_CREATE_BY_HANDLE = 1,
3201 + DM_FSYS_DOWNGRADE_RIGHT = 2,
3202 + DM_FSYS_GET_ALLOCINFO_RVP = 3,
3203 + DM_FSYS_GET_BULKALL_RVP = 4,
3204 + DM_FSYS_GET_BULKATTR_RVP = 5,
3205 + DM_FSYS_GET_CONFIG = 6,
3206 + DM_FSYS_GET_CONFIG_EVENTS = 7,
3207 + DM_FSYS_GET_DESTROY_DMATTR = 8,
3208 + DM_FSYS_GET_DIOINFO = 9,
3209 + DM_FSYS_GET_DIRATTRS_RVP = 10,
3210 + DM_FSYS_GET_DMATTR = 11,
3211 + DM_FSYS_GET_EVENTLIST = 12,
3212 + DM_FSYS_GET_FILEATTR = 13,
3213 + DM_FSYS_GET_REGION = 14,
3214 + DM_FSYS_GETALL_DMATTR = 15,
3215 + DM_FSYS_GETALL_INHERIT = 16,
3216 + DM_FSYS_INIT_ATTRLOC = 17,
3217 + DM_FSYS_MKDIR_BY_HANDLE = 18,
3218 + DM_FSYS_PROBE_HOLE = 19,
3219 + DM_FSYS_PUNCH_HOLE = 20,
3220 + DM_FSYS_READ_INVIS_RVP = 21,
3221 + DM_FSYS_RELEASE_RIGHT = 22,
3222 + DM_FSYS_REMOVE_DMATTR = 23,
3223 + DM_FSYS_REQUEST_RIGHT = 24,
3224 + DM_FSYS_SET_DMATTR = 25,
3225 + DM_FSYS_SET_EVENTLIST = 26,
3226 + DM_FSYS_SET_FILEATTR = 27,
3227 + DM_FSYS_SET_INHERIT = 28,
3228 + DM_FSYS_SET_REGION = 29,
3229 + DM_FSYS_SYMLINK_BY_HANDLE = 30,
3230 + DM_FSYS_SYNC_BY_HANDLE = 31,
3231 + DM_FSYS_UPGRADE_RIGHT = 32,
3232 + DM_FSYS_WRITE_INVIS_RVP = 33,
3233 + DM_FSYS_OBJ_REF_HOLD = 34,
3234 + DM_FSYS_MAX = 35
3235 +} dm_fsys_switch_t;
3236 +
3237 +
3238 +#define DM_FSYS_OBJ 0x1 /* object refers to a fsys handle */
3239 +
3240 +
3241 +/*
3242 + * Prototypes for filesystem-specific functions.
3243 + */
3244 +
3245 +typedef int (*dm_fsys_clear_inherit_t)(
3246 + struct inode *ip,
3247 + dm_right_t right,
3248 + dm_attrname_t __user *attrnamep);
3249 +
3250 +typedef int (*dm_fsys_create_by_handle_t)(
3251 + struct inode *ip,
3252 + dm_right_t right,
3253 + void __user *hanp,
3254 + size_t hlen,
3255 + char __user *cname);
3256 +
3257 +typedef int (*dm_fsys_downgrade_right_t)(
3258 + struct inode *ip,
3259 + dm_right_t right,
3260 + u_int type); /* DM_FSYS_OBJ or zero */
3261 +
3262 +typedef int (*dm_fsys_get_allocinfo_rvp_t)(
3263 + struct inode *ip,
3264 + dm_right_t right,
3265 + dm_off_t __user *offp,
3266 + u_int nelem,
3267 + dm_extent_t __user *extentp,
3268 + u_int __user *nelemp,
3269 + int *rvalp);
3270 +
3271 +typedef int (*dm_fsys_get_bulkall_rvp_t)(
3272 + struct inode *ip, /* root inode */
3273 + dm_right_t right,
3274 + u_int mask,
3275 + dm_attrname_t __user *attrnamep,
3276 + dm_attrloc_t __user *locp,
3277 + size_t buflen,
3278 + void __user *bufp,
3279 + size_t __user *rlenp,
3280 + int *rvalp);
3281 +
3282 +typedef int (*dm_fsys_get_bulkattr_rvp_t)(
3283 + struct inode *ip, /* root inode */
3284 + dm_right_t right,
3285 + u_int mask,
3286 + dm_attrloc_t __user *locp,
3287 + size_t buflen,
3288 + void __user *bufp,
3289 + size_t __user *rlenp,
3290 + int *rvalp);
3291 +
3292 +typedef int (*dm_fsys_get_config_t)(
3293 + struct inode *ip,
3294 + dm_right_t right,
3295 + dm_config_t flagname,
3296 + dm_size_t __user *retvalp);
3297 +
3298 +typedef int (*dm_fsys_get_config_events_t)(
3299 + struct inode *ip,
3300 + dm_right_t right,
3301 + u_int nelem,
3302 + dm_eventset_t __user *eventsetp,
3303 + u_int __user *nelemp);
3304 +
3305 +typedef int (*dm_fsys_get_destroy_dmattr_t)(
3306 + struct inode *ip,
3307 + dm_right_t right,
3308 + dm_attrname_t *attrnamep,
3309 + char **valuepp,
3310 + int *vlenp);
3311 +
3312 +typedef int (*dm_fsys_get_dioinfo_t)(
3313 + struct inode *ip,
3314 + dm_right_t right,
3315 + dm_dioinfo_t __user *diop);
3316 +
3317 +typedef int (*dm_fsys_get_dirattrs_rvp_t)(
3318 + struct inode *ip,
3319 + dm_right_t right,
3320 + u_int mask,
3321 + dm_attrloc_t __user *locp,
3322 + size_t buflen,
3323 + void __user *bufp,
3324 + size_t __user *rlenp,
3325 + int *rvalp);
3326 +
3327 +typedef int (*dm_fsys_get_dmattr_t)(
3328 + struct inode *ip,
3329 + dm_right_t right,
3330 + dm_attrname_t __user *attrnamep,
3331 + size_t buflen,
3332 + void __user *bufp,
3333 + size_t __user *rlenp);
3334 +
3335 +typedef int (*dm_fsys_get_eventlist_t)(
3336 + struct inode *ip,
3337 + dm_right_t right,
3338 + u_int type,
3339 + u_int nelem,
3340 + dm_eventset_t *eventsetp, /* in kernel space! */
3341 + u_int *nelemp); /* in kernel space! */
3342 +
3343 +typedef int (*dm_fsys_get_fileattr_t)(
3344 + struct inode *ip,
3345 + dm_right_t right,
3346 + u_int mask,
3347 + dm_stat_t __user *statp);
3348 +
3349 +typedef int (*dm_fsys_get_region_t)(
3350 + struct inode *ip,
3351 + dm_right_t right,
3352 + u_int nelem,
3353 + dm_region_t __user *regbufp,
3354 + u_int __user *nelemp);
3355 +
3356 +typedef int (*dm_fsys_getall_dmattr_t)(
3357 + struct inode *ip,
3358 + dm_right_t right,
3359 + size_t buflen,
3360 + void __user *bufp,
3361 + size_t __user *rlenp);
3362 +
3363 +typedef int (*dm_fsys_getall_inherit_t)(
3364 + struct inode *ip,
3365 + dm_right_t right,
3366 + u_int nelem,
3367 + dm_inherit_t __user *inheritbufp,
3368 + u_int __user *nelemp);
3369 +
3370 +typedef int (*dm_fsys_init_attrloc_t)(
3371 + struct inode *ip, /* sometimes root inode */
3372 + dm_right_t right,
3373 + dm_attrloc_t __user *locp);
3374 +
3375 +typedef int (*dm_fsys_mkdir_by_handle_t)(
3376 + struct inode *ip,
3377 + dm_right_t right,
3378 + void __user *hanp,
3379 + size_t hlen,
3380 + char __user *cname);
3381 +
3382 +typedef int (*dm_fsys_probe_hole_t)(
3383 + struct inode *ip,
3384 + dm_right_t right,
3385 + dm_off_t off,
3386 + dm_size_t len,
3387 + dm_off_t __user *roffp,
3388 + dm_size_t __user *rlenp);
3389 +
3390 +typedef int (*dm_fsys_punch_hole_t)(
3391 + struct inode *ip,
3392 + dm_right_t right,
3393 + dm_off_t off,
3394 + dm_size_t len);
3395 +
3396 +typedef int (*dm_fsys_read_invis_rvp_t)(
3397 + struct inode *ip,
3398 + dm_right_t right,
3399 + dm_off_t off,
3400 + dm_size_t len,
3401 + void __user *bufp,
3402 + int *rvp);
3403 +
3404 +typedef int (*dm_fsys_release_right_t)(
3405 + struct inode *ip,
3406 + dm_right_t right,
3407 + u_int type);
3408 +
3409 +typedef int (*dm_fsys_remove_dmattr_t)(
3410 + struct inode *ip,
3411 + dm_right_t right,
3412 + int setdtime,
3413 + dm_attrname_t __user *attrnamep);
3414 +
3415 +typedef int (*dm_fsys_request_right_t)(
3416 + struct inode *ip,
3417 + dm_right_t right,
3418 + u_int type, /* DM_FSYS_OBJ or zero */
3419 + u_int flags,
3420 + dm_right_t newright);
3421 +
3422 +typedef int (*dm_fsys_set_dmattr_t)(
3423 + struct inode *ip,
3424 + dm_right_t right,
3425 + dm_attrname_t __user *attrnamep,
3426 + int setdtime,
3427 + size_t buflen,
3428 + void __user *bufp);
3429 +
3430 +typedef int (*dm_fsys_set_eventlist_t)(
3431 + struct inode *ip,
3432 + dm_right_t right,
3433 + u_int type,
3434 + dm_eventset_t *eventsetp, /* in kernel space! */
3435 + u_int maxevent);
3436 +
3437 +typedef int (*dm_fsys_set_fileattr_t)(
3438 + struct inode *ip,
3439 + dm_right_t right,
3440 + u_int mask,
3441 + dm_fileattr_t __user *attrp);
3442 +
3443 +typedef int (*dm_fsys_set_inherit_t)(
3444 + struct inode *ip,
3445 + dm_right_t right,
3446 + dm_attrname_t __user *attrnamep,
3447 + mode_t mode);
3448 +
3449 +typedef int (*dm_fsys_set_region_t)(
3450 + struct inode *ip,
3451 + dm_right_t right,
3452 + u_int nelem,
3453 + dm_region_t __user *regbufp,
3454 + dm_boolean_t __user *exactflagp);
3455 +
3456 +typedef int (*dm_fsys_symlink_by_handle_t)(
3457 + struct inode *ip,
3458 + dm_right_t right,
3459 + void __user *hanp,
3460 + size_t hlen,
3461 + char __user *cname,
3462 + char __user *path);
3463 +
3464 +typedef int (*dm_fsys_sync_by_handle_t)(
3465 + struct inode *ip,
3466 + dm_right_t right);
3467 +
3468 +typedef int (*dm_fsys_upgrade_right_t)(
3469 + struct inode *ip,
3470 + dm_right_t right,
3471 + u_int type); /* DM_FSYS_OBJ or zero */
3472 +
3473 +typedef int (*dm_fsys_write_invis_rvp_t)(
3474 + struct inode *ip,
3475 + dm_right_t right,
3476 + int flags,
3477 + dm_off_t off,
3478 + dm_size_t len,
3479 + void __user *bufp,
3480 + int *rvp);
3481 +
3482 +typedef void (*dm_fsys_obj_ref_hold_t)(
3483 + struct inode *ip);
3484 +
3485 +
3486 +/* Structure definitions used by the VFS_DMAPI_FSYS_VECTOR call. */
3487 +
3488 +typedef struct {
3489 + dm_fsys_switch_t func_no; /* function number */
3490 + union {
3491 + dm_fsys_clear_inherit_t clear_inherit;
3492 + dm_fsys_create_by_handle_t create_by_handle;
3493 + dm_fsys_downgrade_right_t downgrade_right;
3494 + dm_fsys_get_allocinfo_rvp_t get_allocinfo_rvp;
3495 + dm_fsys_get_bulkall_rvp_t get_bulkall_rvp;
3496 + dm_fsys_get_bulkattr_rvp_t get_bulkattr_rvp;
3497 + dm_fsys_get_config_t get_config;
3498 + dm_fsys_get_config_events_t get_config_events;
3499 + dm_fsys_get_destroy_dmattr_t get_destroy_dmattr;
3500 + dm_fsys_get_dioinfo_t get_dioinfo;
3501 + dm_fsys_get_dirattrs_rvp_t get_dirattrs_rvp;
3502 + dm_fsys_get_dmattr_t get_dmattr;
3503 + dm_fsys_get_eventlist_t get_eventlist;
3504 + dm_fsys_get_fileattr_t get_fileattr;
3505 + dm_fsys_get_region_t get_region;
3506 + dm_fsys_getall_dmattr_t getall_dmattr;
3507 + dm_fsys_getall_inherit_t getall_inherit;
3508 + dm_fsys_init_attrloc_t init_attrloc;
3509 + dm_fsys_mkdir_by_handle_t mkdir_by_handle;
3510 + dm_fsys_probe_hole_t probe_hole;
3511 + dm_fsys_punch_hole_t punch_hole;
3512 + dm_fsys_read_invis_rvp_t read_invis_rvp;
3513 + dm_fsys_release_right_t release_right;
3514 + dm_fsys_remove_dmattr_t remove_dmattr;
3515 + dm_fsys_request_right_t request_right;
3516 + dm_fsys_set_dmattr_t set_dmattr;
3517 + dm_fsys_set_eventlist_t set_eventlist;
3518 + dm_fsys_set_fileattr_t set_fileattr;
3519 + dm_fsys_set_inherit_t set_inherit;
3520 + dm_fsys_set_region_t set_region;
3521 + dm_fsys_symlink_by_handle_t symlink_by_handle;
3522 + dm_fsys_sync_by_handle_t sync_by_handle;
3523 + dm_fsys_upgrade_right_t upgrade_right;
3524 + dm_fsys_write_invis_rvp_t write_invis_rvp;
3525 + dm_fsys_obj_ref_hold_t obj_ref_hold;
3526 + } u_fc;
3527 +} fsys_function_vector_t;
3528 +
3529 +struct dm_fcntl_vector {
3530 + int code_level;
3531 + int count; /* Number of functions in the vector */
3532 + fsys_function_vector_t *vecp;
3533 +};
3534 +typedef struct dm_fcntl_vector dm_fcntl_vector_t;
3535 +
3536 +struct dm_fcntl_mapevent {
3537 + size_t length; /* length of transfer */
3538 + dm_eventtype_t max_event; /* Maximum (WRITE or READ) event */
3539 + int error; /* returned error code */
3540 +};
3541 +typedef struct dm_fcntl_mapevent dm_fcntl_mapevent_t;
3542 +
3543 +#endif /* __KERNEL__ */
3544 +
3545 +
3546 +/* The following definitions are needed both by the kernel and by the
3547 + library routines.
3548 +*/
3549 +
3550 +#define DM_MAX_HANDLE_SIZE 56 /* maximum size for a file handle */
3551 +
3552 +
3553 +/*
3554 + * Opcodes for dmapi ioctl.
3555 + */
3556 +
3557 +#define DM_CLEAR_INHERIT 1
3558 +#define DM_CREATE_BY_HANDLE 2
3559 +#define DM_CREATE_SESSION 3
3560 +#define DM_CREATE_USEREVENT 4
3561 +#define DM_DESTROY_SESSION 5
3562 +#define DM_DOWNGRADE_RIGHT 6
3563 +#define DM_FD_TO_HANDLE 7
3564 +#define DM_FIND_EVENTMSG 8
3565 +#define DM_GET_ALLOCINFO 9
3566 +#define DM_GET_BULKALL 10
3567 +#define DM_GET_BULKATTR 11
3568 +#define DM_GET_CONFIG 12
3569 +#define DM_GET_CONFIG_EVENTS 13
3570 +#define DM_GET_DIOINFO 14
3571 +#define DM_GET_DIRATTRS 15
3572 +#define DM_GET_DMATTR 16
3573 +#define DM_GET_EVENTLIST 17
3574 +#define DM_GET_EVENTS 18
3575 +#define DM_GET_FILEATTR 19
3576 +#define DM_GET_MOUNTINFO 20
3577 +#define DM_GET_REGION 21
3578 +#define DM_GETALL_DISP 22
3579 +#define DM_GETALL_DMATTR 23
3580 +#define DM_GETALL_INHERIT 24
3581 +#define DM_GETALL_SESSIONS 25
3582 +#define DM_GETALL_TOKENS 26
3583 +#define DM_INIT_ATTRLOC 27
3584 +#define DM_MKDIR_BY_HANDLE 28
3585 +#define DM_MOVE_EVENT 29
3586 +#define DM_OBJ_REF_HOLD 30
3587 +#define DM_OBJ_REF_QUERY 31
3588 +#define DM_OBJ_REF_RELE 32
3589 +#define DM_PATH_TO_FSHANDLE 33
3590 +#define DM_PATH_TO_HANDLE 34
3591 +#define DM_PENDING 35
3592 +#define DM_PROBE_HOLE 36
3593 +#define DM_PUNCH_HOLE 37
3594 +#define DM_QUERY_RIGHT 38
3595 +#define DM_QUERY_SESSION 39
3596 +#define DM_READ_INVIS 40
3597 +#define DM_RELEASE_RIGHT 41
3598 +#define DM_REMOVE_DMATTR 42
3599 +#define DM_REQUEST_RIGHT 43
3600 +#define DM_RESPOND_EVENT 44
3601 +#define DM_SEND_MSG 45
3602 +#define DM_SET_DISP 46
3603 +#define DM_SET_DMATTR 47
3604 +#define DM_SET_EVENTLIST 48
3605 +#define DM_SET_FILEATTR 49
3606 +#define DM_SET_INHERIT 50
3607 +#define DM_SET_REGION 51
3608 +#define DM_SET_RETURN_ON_DESTROY 52
3609 +#define DM_SYMLINK_BY_HANDLE 53
3610 +#define DM_SYNC_BY_HANDLE 54
3611 +#define DM_UPGRADE_RIGHT 55
3612 +#define DM_WRITE_INVIS 56
3613 +#define DM_OPEN_BY_HANDLE 57
3614 +
3615 +#endif /* __DMAPI_KERN_H__ */
3616 Index: linux-2.6.26/fs/dmapi/dmapi_mountinfo.c
3617 ===================================================================
3618 --- /dev/null
3619 +++ linux-2.6.26/fs/dmapi/dmapi_mountinfo.c
3620 @@ -0,0 +1,527 @@
3621 +/*
3622 + * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
3623 + *
3624 + * This program is free software; you can redistribute it and/or modify it
3625 + * under the terms of version 2 of the GNU General Public License as
3626 + * published by the Free Software Foundation.
3627 + *
3628 + * This program is distributed in the hope that it would be useful, but
3629 + * WITHOUT ANY WARRANTY; without even the implied warranty of
3630 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
3631 + *
3632 + * Further, this software is distributed without any warranty that it is
3633 + * free of the rightful claim of any third person regarding infringement
3634 + * or the like. Any license provided herein, whether implied or
3635 + * otherwise, applies only to this software file. Patent licenses, if
3636 + * any, provided herein do not apply to combinations of this program with
3637 + * other software, or any other product whatsoever.
3638 + *
3639 + * You should have received a copy of the GNU General Public License along
3640 + * with this program; if not, write the Free Software Foundation, Inc., 59
3641 + * Temple Place - Suite 330, Boston MA 02111-1307, USA.
3642 + *
3643 + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
3644 + * Mountain View, CA 94043, or:
3645 + *
3646 + * http://www.sgi.com
3647 + *
3648 + * For further information regarding this notice, see:
3649 + *
3650 + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
3651 + */
3652 +#include "dmapi.h"
3653 +#include "dmapi_kern.h"
3654 +#include "dmapi_private.h"
3655 +
3656 +static LIST_HEAD(dm_fsys_map);
3657 +static spinlock_t dm_fsys_lock = SPIN_LOCK_UNLOCKED;
3658 +
3659 +int
3660 +dm_code_level(void)
3661 +{
3662 + return DM_CLVL_XOPEN; /* initial X/Open compliant release */
3663 +}
3664 +
3665 +
3666 +/* Dummy routine which is stored in each function vector slot for which the
3667 + filesystem provides no function of its own. If an application calls the
3668 + function, he will just get ENOSYS.
3669 +*/
3670 +
3671 +static int
3672 +dm_enosys(void)
3673 +{
3674 + return -ENOSYS; /* function not supported by filesystem */
3675 +}
3676 +
3677 +
3678 +/* dm_query_fsys_for_vector() asks a filesystem for its list of supported
3679 + DMAPI functions, and builds a dm_vector_map_t structure based upon the
3680 + reply. We ignore functions supported by the filesystem which we do not
3681 + know about, and we substitute the subroutine 'dm_enosys' for each function
3682 + we know about but the filesystem does not support.
3683 +*/
3684 +
3685 +static void
3686 +dm_query_fsys_for_vector(
3687 + dm_vector_map_t *map)
3688 +{
3689 + struct super_block *sb = map->sb;
3690 + fsys_function_vector_t *vecp;
3691 + dm_fcntl_vector_t vecrq;
3692 + dm_fsys_vector_t *vptr;
3693 + struct filesystem_dmapi_operations *dmapiops = map->dmapiops;
3694 + int error;
3695 + int i;
3696 +
3697 +
3698 + /* Allocate a function vector and initialize all fields with a
3699 + dummy function that returns ENOSYS.
3700 + */
3701 +
3702 + vptr = map->vptr = kmem_cache_alloc(dm_fsys_vptr_cachep, GFP_KERNEL);
3703 + if (vptr == NULL) {
3704 + printk("%s/%d: kmem_cache_alloc(dm_fsys_vptr_cachep) returned NULL\n", __FUNCTION__, __LINE__);
3705 + return;
3706 + }
3707 +
3708 + vptr->code_level = 0;
3709 + vptr->clear_inherit = (dm_fsys_clear_inherit_t)dm_enosys;
3710 + vptr->create_by_handle = (dm_fsys_create_by_handle_t)dm_enosys;
3711 + vptr->downgrade_right = (dm_fsys_downgrade_right_t)dm_enosys;
3712 + vptr->get_allocinfo_rvp = (dm_fsys_get_allocinfo_rvp_t)dm_enosys;
3713 + vptr->get_bulkall_rvp = (dm_fsys_get_bulkall_rvp_t)dm_enosys;
3714 + vptr->get_bulkattr_rvp = (dm_fsys_get_bulkattr_rvp_t)dm_enosys;
3715 + vptr->get_config = (dm_fsys_get_config_t)dm_enosys;
3716 + vptr->get_config_events = (dm_fsys_get_config_events_t)dm_enosys;
3717 + vptr->get_destroy_dmattr = (dm_fsys_get_destroy_dmattr_t)dm_enosys;
3718 + vptr->get_dioinfo = (dm_fsys_get_dioinfo_t)dm_enosys;
3719 + vptr->get_dirattrs_rvp = (dm_fsys_get_dirattrs_rvp_t)dm_enosys;
3720 + vptr->get_dmattr = (dm_fsys_get_dmattr_t)dm_enosys;
3721 + vptr->get_eventlist = (dm_fsys_get_eventlist_t)dm_enosys;
3722 + vptr->get_fileattr = (dm_fsys_get_fileattr_t)dm_enosys;
3723 + vptr->get_region = (dm_fsys_get_region_t)dm_enosys;
3724 + vptr->getall_dmattr = (dm_fsys_getall_dmattr_t)dm_enosys;
3725 + vptr->getall_inherit = (dm_fsys_getall_inherit_t)dm_enosys;
3726 + vptr->init_attrloc = (dm_fsys_init_attrloc_t)dm_enosys;
3727 + vptr->mkdir_by_handle = (dm_fsys_mkdir_by_handle_t)dm_enosys;
3728 + vptr->probe_hole = (dm_fsys_probe_hole_t)dm_enosys;
3729 + vptr->punch_hole = (dm_fsys_punch_hole_t)dm_enosys;
3730 + vptr->read_invis_rvp = (dm_fsys_read_invis_rvp_t)dm_enosys;
3731 + vptr->release_right = (dm_fsys_release_right_t)dm_enosys;
3732 + vptr->request_right = (dm_fsys_request_right_t)dm_enosys;
3733 + vptr->remove_dmattr = (dm_fsys_remove_dmattr_t)dm_enosys;
3734 + vptr->set_dmattr = (dm_fsys_set_dmattr_t)dm_enosys;
3735 + vptr->set_eventlist = (dm_fsys_set_eventlist_t)dm_enosys;
3736 + vptr->set_fileattr = (dm_fsys_set_fileattr_t)dm_enosys;
3737 + vptr->set_inherit = (dm_fsys_set_inherit_t)dm_enosys;
3738 + vptr->set_region = (dm_fsys_set_region_t)dm_enosys;
3739 + vptr->symlink_by_handle = (dm_fsys_symlink_by_handle_t)dm_enosys;
3740 + vptr->sync_by_handle = (dm_fsys_sync_by_handle_t)dm_enosys;
3741 + vptr->upgrade_right = (dm_fsys_upgrade_right_t)dm_enosys;
3742 + vptr->write_invis_rvp = (dm_fsys_write_invis_rvp_t)dm_enosys;
3743 + vptr->obj_ref_hold = (dm_fsys_obj_ref_hold_t)dm_enosys;
3744 +
3745 + /* Issue a call to the filesystem in order to obtain
3746 + its vector of filesystem-specific DMAPI routines.
3747 + */
3748 +
3749 + vecrq.count = 0;
3750 + vecrq.vecp = NULL;
3751 +
3752 + error = -ENOSYS;
3753 + ASSERT(dmapiops);
3754 + if (dmapiops->get_fsys_vector)
3755 + error = dmapiops->get_fsys_vector(sb, (caddr_t)&vecrq);
3756 +
3757 + /* If we still have an error at this point, then the filesystem simply
3758 + does not support DMAPI, so we give up with all functions set to
3759 + ENOSYS.
3760 + */
3761 +
3762 + if (error || vecrq.count == 0) {
3763 + kmem_cache_free(dm_fsys_vptr_cachep, vptr);
3764 + map->vptr = NULL;
3765 + return;
3766 + }
3767 +
3768 + /* The request succeeded and we were given a vector which we need to
3769 + map to our current level. Overlay the dummy function with every
3770 + filesystem function we understand.
3771 + */
3772 +
3773 + vptr->code_level = vecrq.code_level;
3774 + vecp = vecrq.vecp;
3775 + for (i = 0; i < vecrq.count; i++) {
3776 + switch (vecp[i].func_no) {
3777 + case DM_FSYS_CLEAR_INHERIT:
3778 + vptr->clear_inherit = vecp[i].u_fc.clear_inherit;
3779 + break;
3780 + case DM_FSYS_CREATE_BY_HANDLE:
3781 + vptr->create_by_handle = vecp[i].u_fc.create_by_handle;
3782 + break;
3783 + case DM_FSYS_DOWNGRADE_RIGHT:
3784 + vptr->downgrade_right = vecp[i].u_fc.downgrade_right;
3785 + break;
3786 + case DM_FSYS_GET_ALLOCINFO_RVP:
3787 + vptr->get_allocinfo_rvp = vecp[i].u_fc.get_allocinfo_rvp;
3788 + break;
3789 + case DM_FSYS_GET_BULKALL_RVP:
3790 + vptr->get_bulkall_rvp = vecp[i].u_fc.get_bulkall_rvp;
3791 + break;
3792 + case DM_FSYS_GET_BULKATTR_RVP:
3793 + vptr->get_bulkattr_rvp = vecp[i].u_fc.get_bulkattr_rvp;
3794 + break;
3795 + case DM_FSYS_GET_CONFIG:
3796 + vptr->get_config = vecp[i].u_fc.get_config;
3797 + break;
3798 + case DM_FSYS_GET_CONFIG_EVENTS:
3799 + vptr->get_config_events = vecp[i].u_fc.get_config_events;
3800 + break;
3801 + case DM_FSYS_GET_DESTROY_DMATTR:
3802 + vptr->get_destroy_dmattr = vecp[i].u_fc.get_destroy_dmattr;
3803 + break;
3804 + case DM_FSYS_GET_DIOINFO:
3805 + vptr->get_dioinfo = vecp[i].u_fc.get_dioinfo;
3806 + break;
3807 + case DM_FSYS_GET_DIRATTRS_RVP:
3808 + vptr->get_dirattrs_rvp = vecp[i].u_fc.get_dirattrs_rvp;
3809 + break;
3810 + case DM_FSYS_GET_DMATTR:
3811 + vptr->get_dmattr = vecp[i].u_fc.get_dmattr;
3812 + break;
3813 + case DM_FSYS_GET_EVENTLIST:
3814 + vptr->get_eventlist = vecp[i].u_fc.get_eventlist;
3815 + break;
3816 + case DM_FSYS_GET_FILEATTR:
3817 + vptr->get_fileattr = vecp[i].u_fc.get_fileattr;
3818 + break;
3819 + case DM_FSYS_GET_REGION:
3820 + vptr->get_region = vecp[i].u_fc.get_region;
3821 + break;
3822 + case DM_FSYS_GETALL_DMATTR:
3823 + vptr->getall_dmattr = vecp[i].u_fc.getall_dmattr;
3824 + break;
3825 + case DM_FSYS_GETALL_INHERIT:
3826 + vptr->getall_inherit = vecp[i].u_fc.getall_inherit;
3827 + break;
3828 + case DM_FSYS_INIT_ATTRLOC:
3829 + vptr->init_attrloc = vecp[i].u_fc.init_attrloc;
3830 + break;
3831 + case DM_FSYS_MKDIR_BY_HANDLE:
3832 + vptr->mkdir_by_handle = vecp[i].u_fc.mkdir_by_handle;
3833 + break;
3834 + case DM_FSYS_PROBE_HOLE:
3835 + vptr->probe_hole = vecp[i].u_fc.probe_hole;
3836 + break;
3837 + case DM_FSYS_PUNCH_HOLE:
3838 + vptr->punch_hole = vecp[i].u_fc.punch_hole;
3839 + break;
3840 + case DM_FSYS_READ_INVIS_RVP:
3841 + vptr->read_invis_rvp = vecp[i].u_fc.read_invis_rvp;
3842 + break;
3843 + case DM_FSYS_RELEASE_RIGHT:
3844 + vptr->release_right = vecp[i].u_fc.release_right;
3845 + break;
3846 + case DM_FSYS_REMOVE_DMATTR:
3847 + vptr->remove_dmattr = vecp[i].u_fc.remove_dmattr;
3848 + break;
3849 + case DM_FSYS_REQUEST_RIGHT:
3850 + vptr->request_right = vecp[i].u_fc.request_right;
3851 + break;
3852 + case DM_FSYS_SET_DMATTR:
3853 + vptr->set_dmattr = vecp[i].u_fc.set_dmattr;
3854 + break;
3855 + case DM_FSYS_SET_EVENTLIST:
3856 + vptr->set_eventlist = vecp[i].u_fc.set_eventlist;
3857 + break;
3858 + case DM_FSYS_SET_FILEATTR:
3859 + vptr->set_fileattr = vecp[i].u_fc.set_fileattr;
3860 + break;
3861 + case DM_FSYS_SET_INHERIT:
3862 + vptr->set_inherit = vecp[i].u_fc.set_inherit;
3863 + break;
3864 + case DM_FSYS_SET_REGION:
3865 + vptr->set_region = vecp[i].u_fc.set_region;
3866 + break;
3867 + case DM_FSYS_SYMLINK_BY_HANDLE:
3868 + vptr->symlink_by_handle = vecp[i].u_fc.symlink_by_handle;
3869 + break;
3870 + case DM_FSYS_SYNC_BY_HANDLE:
3871 + vptr->sync_by_handle = vecp[i].u_fc.sync_by_handle;
3872 + break;
3873 + case DM_FSYS_UPGRADE_RIGHT:
3874 + vptr->upgrade_right = vecp[i].u_fc.upgrade_right;
3875 + break;
3876 + case DM_FSYS_WRITE_INVIS_RVP:
3877 + vptr->write_invis_rvp = vecp[i].u_fc.write_invis_rvp;
3878 + break;
3879 + case DM_FSYS_OBJ_REF_HOLD:
3880 + vptr->obj_ref_hold = vecp[i].u_fc.obj_ref_hold;
3881 + break;
3882 + default: /* ignore ones we don't understand */
3883 + break;
3884 + }
3885 + }
3886 +}
3887 +
3888 +
3889 +/* Must hold dm_fsys_lock.
3890 + * This returns the prototype for all instances of the fstype.
3891 + */
3892 +static dm_vector_map_t *
3893 +dm_fsys_map_by_fstype(
3894 + struct file_system_type *fstype)
3895 +{
3896 + struct list_head *p;
3897 + dm_vector_map_t *proto = NULL;
3898 + dm_vector_map_t *m;
3899 +
3900 + ASSERT_ALWAYS(fstype);
3901 + list_for_each(p, &dm_fsys_map) {
3902 + m = list_entry(p, dm_vector_map_t, ftype_list);
3903 + if (m->f_type == fstype) {
3904 + proto = m;
3905 + break;
3906 + }
3907 + }
3908 + return proto;
3909 +}
3910 +
3911 +
3912 +/* Must hold dm_fsys_lock */
3913 +static dm_vector_map_t *
3914 +dm_fsys_map_by_sb(
3915 + struct super_block *sb)
3916 +{
3917 + struct list_head *p;
3918 + dm_vector_map_t *proto;
3919 + dm_vector_map_t *m;
3920 + dm_vector_map_t *foundmap = NULL;
3921 +
3922 + proto = dm_fsys_map_by_fstype(sb->s_type);
3923 + if(proto == NULL) {
3924 + return NULL;
3925 + }
3926 +
3927 + list_for_each(p, &proto->sb_list) {
3928 + m = list_entry(p, dm_vector_map_t, sb_list);
3929 + if (m->sb == sb) {
3930 + foundmap = m;
3931 + break;
3932 + }
3933 + }
3934 + return foundmap;
3935 +}
3936 +
3937 +
3938 +#ifdef CONFIG_DMAPI_DEBUG
3939 +static void
3940 +sb_list(
3941 + struct super_block *sb)
3942 +{
3943 + struct list_head *p;
3944 + dm_vector_map_t *proto;
3945 + dm_vector_map_t *m;
3946 +
3947 + proto = dm_fsys_map_by_fstype(sb->s_type);
3948 + ASSERT(proto);
3949 +
3950 +printk("%s/%d: Current sb_list\n", __FUNCTION__, __LINE__);
3951 + list_for_each(p, &proto->sb_list) {
3952 + m = list_entry(p, dm_vector_map_t, sb_list);
3953 +printk("%s/%d: map 0x%p, sb 0x%p, vptr 0x%p, dmapiops 0x%p\n", __FUNCTION__, __LINE__, m, m->sb, m->vptr, m->dmapiops);
3954 + }
3955 +printk("%s/%d: Done sb_list\n", __FUNCTION__, __LINE__);
3956 +}
3957 +#else
3958 +#define sb_list(x)
3959 +#endif
3960 +
3961 +#ifdef CONFIG_DMAPI_DEBUG
3962 +static void
3963 +ftype_list(void)
3964 +{
3965 + struct list_head *p;
3966 + dm_vector_map_t *m;
3967 +
3968 +printk("%s/%d: Current ftype_list\n", __FUNCTION__, __LINE__);
3969 + list_for_each(p, &dm_fsys_map) {
3970 + m = list_entry(p, dm_vector_map_t, ftype_list);
3971 + printk("%s/%d: FS 0x%p, ftype 0x%p %s\n", __FUNCTION__, __LINE__, m, m->f_type, m->f_type->name);
3972 + }
3973 +printk("%s/%d: Done ftype_list\n", __FUNCTION__, __LINE__);
3974 +}
3975 +#else
3976 +#define ftype_list()
3977 +#endif
3978 +
3979 +/* Ask for vptr for this filesystem instance.
3980 + * The caller knows this inode is on a dmapi-managed filesystem.
3981 + */
3982 +dm_fsys_vector_t *
3983 +dm_fsys_vector(
3984 + struct inode *ip)
3985 +{
3986 + dm_vector_map_t *map;
3987 +
3988 + spin_lock(&dm_fsys_lock);
3989 + ftype_list();
3990 + map = dm_fsys_map_by_sb(ip->i_sb);
3991 + spin_unlock(&dm_fsys_lock);
3992 + ASSERT(map);
3993 + ASSERT(map->vptr);
3994 + return map->vptr;
3995 +}
3996 +
3997 +
3998 +/* Ask for the dmapiops for this filesystem instance. The caller is
3999 + * also asking if this is a dmapi-managed filesystem.
4000 + */
4001 +struct filesystem_dmapi_operations *
4002 +dm_fsys_ops(
4003 + struct super_block *sb)
4004 +{
4005 + dm_vector_map_t *proto = NULL;
4006 + dm_vector_map_t *map;
4007 +
4008 + spin_lock(&dm_fsys_lock);
4009 + ftype_list();
4010 + sb_list(sb);
4011 + map = dm_fsys_map_by_sb(sb);
4012 + if (map == NULL)
4013 + proto = dm_fsys_map_by_fstype(sb->s_type);
4014 + spin_unlock(&dm_fsys_lock);
4015 +
4016 + if ((map == NULL) && (proto == NULL))
4017 + return NULL;
4018 +
4019 + if (map == NULL) {
4020 + /* Find out if it's dmapi-managed */
4021 + dm_vector_map_t *m;
4022 +
4023 + ASSERT(proto);
4024 + m = kmem_cache_alloc(dm_fsys_map_cachep, GFP_KERNEL);
4025 + if (m == NULL) {
4026 + printk("%s/%d: kmem_cache_alloc(dm_fsys_map_cachep) returned NULL\n", __FUNCTION__, __LINE__);
4027 + return NULL;
4028 + }
4029 + memset(m, 0, sizeof(*m));
4030 + m->dmapiops = proto->dmapiops;
4031 + m->f_type = sb->s_type;
4032 + m->sb = sb;
4033 + INIT_LIST_HEAD(&m->sb_list);
4034 + INIT_LIST_HEAD(&m->ftype_list);
4035 +
4036 + dm_query_fsys_for_vector(m);
4037 + if (m->vptr == NULL) {
4038 + /* This isn't dmapi-managed */
4039 + kmem_cache_free(dm_fsys_map_cachep, m);
4040 + return NULL;
4041 + }
4042 +
4043 + spin_lock(&dm_fsys_lock);
4044 + if ((map = dm_fsys_map_by_sb(sb)) == NULL)
4045 + list_add(&m->sb_list, &proto->sb_list);
4046 + spin_unlock(&dm_fsys_lock);
4047 +
4048 + if (map) {
4049 + kmem_cache_free(dm_fsys_vptr_cachep, m->vptr);
4050 + kmem_cache_free(dm_fsys_map_cachep, m);
4051 + }
4052 + else {
4053 + map = m;
4054 + }
4055 + }
4056 +
4057 + return map->dmapiops;
4058 +}
4059 +
4060 +
4061 +
4062 +/* Called when a filesystem instance is unregistered from dmapi */
4063 +void
4064 +dm_fsys_ops_release(
4065 + struct super_block *sb)
4066 +{
4067 + dm_vector_map_t *map;
4068 +
4069 + spin_lock(&dm_fsys_lock);
4070 + ASSERT(!list_empty(&dm_fsys_map));
4071 + map = dm_fsys_map_by_sb(sb);
4072 + ASSERT(map);
4073 + list_del(&map->sb_list);
4074 + spin_unlock(&dm_fsys_lock);
4075 +
4076 + ASSERT(map->vptr);
4077 + kmem_cache_free(dm_fsys_vptr_cachep, map->vptr);
4078 + kmem_cache_free(dm_fsys_map_cachep, map);
4079 +}
4080 +
4081 +
4082 +/* Called by a filesystem module that is loading into the kernel.
4083 + * This creates a new dm_vector_map_t which serves as the prototype
4084 + * for instances of this fstype and also provides the list_head
4085 + * for instances of this fstype. The prototypes are the only ones
4086 + * on the fstype_list, and will never be on the sb_list.
4087 + */
4088 +void
4089 +dmapi_register(
4090 + struct file_system_type *fstype,
4091 + struct filesystem_dmapi_operations *dmapiops)
4092 +{
4093 + dm_vector_map_t *proto;
4094 +
4095 + proto = kmem_cache_alloc(dm_fsys_map_cachep, GFP_KERNEL);
4096 + if (proto == NULL) {
4097 + printk("%s/%d: kmem_cache_alloc(dm_fsys_map_cachep) returned NULL\n", __FUNCTION__, __LINE__);
4098 + return;
4099 + }
4100 + memset(proto, 0, sizeof(*proto));
4101 + proto->dmapiops = dmapiops;
4102 + proto->f_type = fstype;
4103 + INIT_LIST_HEAD(&proto->sb_list);
4104 + INIT_LIST_HEAD(&proto->ftype_list);
4105 +
4106 + spin_lock(&dm_fsys_lock);
4107 + ASSERT(dm_fsys_map_by_fstype(fstype) == NULL);
4108 + list_add(&proto->ftype_list, &dm_fsys_map);
4109 + ftype_list();
4110 + spin_unlock(&dm_fsys_lock);
4111 +}
4112 +
4113 +/* Called by a filesystem module that is unloading from the kernel */
4114 +void
4115 +dmapi_unregister(
4116 + struct file_system_type *fstype)
4117 +{
4118 + struct list_head *p;
4119 + dm_vector_map_t *proto;
4120 + dm_vector_map_t *m;
4121 +
4122 + spin_lock(&dm_fsys_lock);
4123 + ASSERT(!list_empty(&dm_fsys_map));
4124 + proto = dm_fsys_map_by_fstype(fstype);
4125 + ASSERT(proto);
4126 + list_del(&proto->ftype_list);
4127 + spin_unlock(&dm_fsys_lock);
4128 +
4129 + p = &proto->sb_list;
4130 + while (!list_empty(p)) {
4131 + m = list_entry(p->next, dm_vector_map_t, sb_list);
4132 + list_del(&m->sb_list);
4133 + ASSERT(m->vptr);
4134 + kmem_cache_free(dm_fsys_vptr_cachep, m->vptr);
4135 + kmem_cache_free(dm_fsys_map_cachep, m);
4136 + }
4137 + kmem_cache_free(dm_fsys_map_cachep, proto);
4138 +}
4139 +
4140 +
4141 +int
4142 +dmapi_registered(
4143 + struct file_system_type *fstype,
4144 + struct filesystem_dmapi_operations **dmapiops)
4145 +{
4146 + return 0;
4147 +}
4148 Index: linux-2.6.26/fs/dmapi/dmapi_port.h
4149 ===================================================================
4150 --- /dev/null
4151 +++ linux-2.6.26/fs/dmapi/dmapi_port.h
4152 @@ -0,0 +1,138 @@
4153 +/*
4154 + * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
4155 + *
4156 + * This program is free software; you can redistribute it and/or modify it
4157 + * under the terms of version 2 of the GNU General Public License as
4158 + * published by the Free Software Foundation.
4159 + *
4160 + * This program is distributed in the hope that it would be useful, but
4161 + * WITHOUT ANY WARRANTY; without even the implied warranty of
4162 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
4163 + *
4164 + * Further, this software is distributed without any warranty that it is
4165 + * free of the rightful claim of any third person regarding infringement
4166 + * or the like. Any license provided herein, whether implied or
4167 + * otherwise, applies only to this software file. Patent licenses, if
4168 + * any, provided herein do not apply to combinations of this program with
4169 + * other software, or any other product whatsoever.
4170 + *
4171 + * You should have received a copy of the GNU General Public License along
4172 + * with this program; if not, write the Free Software Foundation, Inc., 59
4173 + * Temple Place - Suite 330, Boston MA 02111-1307, USA.
4174 + *
4175 + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
4176 + * Mountain View, CA 94043, or:
4177 + *
4178 + * http://www.sgi.com
4179 + *
4180 + * For further information regarding this notice, see:
4181 + *
4182 + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
4183 + */
4184 +#ifndef _DMAPI_PORT_H
4185 +#define _DMAPI_PORT_H
4186 +
4187 +#include <asm/div64.h>
4188 +#include "sv.h"
4189 +
4190 +#include <linux/sched.h> /* preempt needs this */
4191 +#include <linux/spinlock.h>
4192 +
4193 +typedef spinlock_t lock_t;
4194 +
4195 +#define spinlock_init(lock, name) spin_lock_init(lock)
4196 +#define spinlock_destroy(lock)
4197 +
4198 +#define mutex_spinlock(lock) ({ spin_lock(lock); 0; })
4199 +#define mutex_spinunlock(lock, s) spin_unlock(lock)
4200 +#define nested_spinlock(lock) spin_lock(lock)
4201 +#define nested_spinunlock(lock) spin_unlock(lock)
4202 +
4203 +typedef signed int __int32_t;
4204 +typedef unsigned int __uint32_t;
4205 +typedef signed long long int __int64_t;
4206 +typedef unsigned long long int __uint64_t;
4207 +
4208 +
4209 +/* __psint_t is the same size as a pointer */
4210 +#if (BITS_PER_LONG == 32)
4211 +typedef __int32_t __psint_t;
4212 +typedef __uint32_t __psunsigned_t;
4213 +#elif (BITS_PER_LONG == 64)
4214 +typedef __int64_t __psint_t;
4215 +typedef __uint64_t __psunsigned_t;
4216 +#else
4217 +#error BITS_PER_LONG must be 32 or 64
4218 +#endif
4219 +
4220 +static inline void
4221 +assfail(char *a, char *f, int l)
4222 +{
4223 + printk("DMAPI assertion failed: %s, file: %s, line: %d\n", a, f, l);
4224 + BUG();
4225 +}
4226 +
4227 +#ifdef DEBUG
4228 +#define doass 1
4229 +# ifdef lint
4230 +# define ASSERT(EX) ((void)0) /* avoid "constant in conditional" babble */
4231 +# else
4232 +# define ASSERT(EX) ((!doass||(EX))?((void)0):assfail(#EX, __FILE__, __LINE__))
4233 +# endif /* lint */
4234 +#else
4235 +# define ASSERT(x) ((void)0)
4236 +#endif /* DEBUG */
4237 +
4238 +#define ASSERT_ALWAYS(EX) ((EX)?((void)0):assfail(#EX, __FILE__, __LINE__))
4239 +
4240 +
4241 +#if defined __i386__
4242 +
4243 +/* Side effect free 64 bit mod operation */
4244 +static inline __u32 dmapi_do_mod(void *a, __u32 b, int n)
4245 +{
4246 + switch (n) {
4247 + case 4:
4248 + return *(__u32 *)a % b;
4249 + case 8:
4250 + {
4251 + unsigned long __upper, __low, __high, __mod;
4252 + __u64 c = *(__u64 *)a;
4253 + __upper = __high = c >> 32;
4254 + __low = c;
4255 + if (__high) {
4256 + __upper = __high % (b);
4257 + __high = __high / (b);
4258 + }
4259 + asm("divl %2":"=a" (__low), "=d" (__mod):"rm" (b), "0" (__low), "1" (__upper));
4260 + asm("":"=A" (c):"a" (__low),"d" (__high));
4261 + return __mod;
4262 + }
4263 + }
4264 +
4265 + /* NOTREACHED */
4266 + return 0;
4267 +}
4268 +#else
4269 +
4270 +/* Side effect free 64 bit mod operation */
4271 +static inline __u32 dmapi_do_mod(void *a, __u32 b, int n)
4272 +{
4273 + switch (n) {
4274 + case 4:
4275 + return *(__u32 *)a % b;
4276 + case 8:
4277 + {
4278 + __u64 c = *(__u64 *)a;
4279 + return do_div(c, b);
4280 + }
4281 + }
4282 +
4283 + /* NOTREACHED */
4284 + return 0;
4285 +}
4286 +#endif
4287 +
4288 +#define do_mod(a, b) dmapi_do_mod(&(a), (b), sizeof(a))
4289 +
4290 +#endif /* _DMAPI_PORT_H */
4291 Index: linux-2.6.26/fs/dmapi/dmapi_private.h
4292 ===================================================================
4293 --- /dev/null
4294 +++ linux-2.6.26/fs/dmapi/dmapi_private.h
4295 @@ -0,0 +1,619 @@
4296 +/*
4297 + * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
4298 + *
4299 + * This program is free software; you can redistribute it and/or modify it
4300 + * under the terms of version 2 of the GNU General Public License as
4301 + * published by the Free Software Foundation.
4302 + *
4303 + * This program is distributed in the hope that it would be useful, but
4304 + * WITHOUT ANY WARRANTY; without even the implied warranty of
4305 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
4306 + *
4307 + * Further, this software is distributed without any warranty that it is
4308 + * free of the rightful claim of any third person regarding infringement
4309 + * or the like. Any license provided herein, whether implied or
4310 + * otherwise, applies only to this software file. Patent licenses, if
4311 + * any, provided herein do not apply to combinations of this program with
4312 + * other software, or any other product whatsoever.
4313 + *
4314 + * You should have received a copy of the GNU General Public License along
4315 + * with this program; if not, write the Free Software Foundation, Inc., 59
4316 + * Temple Place - Suite 330, Boston MA 02111-1307, USA.
4317 + *
4318 + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
4319 + * Mountain View, CA 94043, or:
4320 + *
4321 + * http://www.sgi.com
4322 + *
4323 + * For further information regarding this notice, see:
4324 + *
4325 + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
4326 + */
4327 +#ifndef _DMAPI_PRIVATE_H
4328 +#define _DMAPI_PRIVATE_H
4329 +
4330 +#include <linux/slab.h>
4331 +#include "dmapi_port.h"
4332 +#include "sv.h"
4333 +
4334 +#ifdef CONFIG_PROC_FS
4335 +#define DMAPI_PROCFS "orig/fs/dmapi_v2" /* DMAPI device in /proc. */
4336 +#define DMAPI_DBG_PROCFS "orig/fs/dmapi_d" /* DMAPI debugging dir */
4337 +#endif
4338 +
4339 +extern struct kmem_cache *dm_fsreg_cachep;
4340 +extern struct kmem_cache *dm_tokdata_cachep;
4341 +extern struct kmem_cache *dm_session_cachep;
4342 +extern struct kmem_cache *dm_fsys_map_cachep;
4343 +extern struct kmem_cache *dm_fsys_vptr_cachep;
4344 +
4345 +typedef struct dm_tokdata {
4346 + struct dm_tokdata *td_next;
4347 + struct dm_tokevent *td_tevp; /* pointer to owning tevp */
4348 + int td_app_ref; /* # app threads currently active */
4349 + dm_right_t td_orig_right; /* original right held when created */
4350 + dm_right_t td_right; /* current right held for this handle */
4351 + short td_flags;
4352 + short td_type; /* object type */
4353 + int td_vcount; /* # of current application VN_HOLDs */
4354 + struct inode *td_ip; /* inode pointer */
4355 + dm_handle_t td_handle; /* handle for ip or sb */
4356 +} dm_tokdata_t;
4357 +
4358 +/* values for td_type */
4359 +
4360 +#define DM_TDT_NONE 0x00 /* td_handle is empty */
4361 +#define DM_TDT_VFS 0x01 /* td_handle points to a sb */
4362 +#define DM_TDT_REG 0x02 /* td_handle points to a file */
4363 +#define DM_TDT_DIR 0x04 /* td_handle points to a directory */
4364 +#define DM_TDT_LNK 0x08 /* td_handle points to a symlink */
4365 +#define DM_TDT_OTH 0x10 /* some other object eg. pipe, socket */
4366 +
4367 +#define DM_TDT_VNO (DM_TDT_REG|DM_TDT_DIR|DM_TDT_LNK|DM_TDT_OTH)
4368 +#define DM_TDT_ANY (DM_TDT_VFS|DM_TDT_REG|DM_TDT_DIR|DM_TDT_LNK|DM_TDT_OTH)
4369 +
4370 +/* values for td_flags */
4371 +
4372 +#define DM_TDF_ORIG 0x0001 /* part of the original event */
4373 +#define DM_TDF_EVTREF 0x0002 /* event thread holds inode reference */
4374 +#define DM_TDF_STHREAD 0x0004 /* only one app can use this handle */
4375 +#define DM_TDF_RIGHT 0x0008 /* vcount bumped for dm_request_right */
4376 +#define DM_TDF_HOLD 0x0010 /* vcount bumped for dm_obj_ref_hold */
4377 +
4378 +
4379 +/* Because some events contain __u64 fields, we force te_msg and te_event
4380 + to always be 8-byte aligned. In order to send more than one message in
4381 + a single dm_get_events() call, we also ensure that each message is an
4382 + 8-byte multiple.
4383 +*/
4384 +
4385 +typedef struct dm_tokevent {
4386 + struct dm_tokevent *te_next;
4387 + struct dm_tokevent *te_hashnext; /* hash chain */
4388 + lock_t te_lock; /* lock for all fields but te_*next.
4389 + * te_next and te_hashnext are
4390 + * protected by the session lock.
4391 + */
4392 + short te_flags;
4393 + short te_allocsize; /* alloc'ed size of this structure */
4394 + sv_t te_evt_queue; /* queue waiting for dm_respond_event */
4395 + sv_t te_app_queue; /* queue waiting for handle access */
4396 + int te_evt_ref; /* number of event procs using token */
4397 + int te_app_ref; /* number of app procs using token */
4398 + int te_app_slp; /* number of app procs sleeping */
4399 + int te_reply; /* return errno for sync messages */
4400 + dm_tokdata_t *te_tdp; /* list of handle/right pairs */
4401 + union {
4402 + __u64 align; /* force alignment of te_msg */
4403 + dm_eventmsg_t te_msg; /* user visible part */
4404 + } te_u;
4405 + __u64 te_event; /* start of dm_xxx_event_t message */
4406 +} dm_tokevent_t;
4407 +
4408 +#define te_msg te_u.te_msg
4409 +
4410 +/* values for te_flags */
4411 +
4412 +#define DM_TEF_LOCKED 0x0001 /* event "locked" by dm_get_events() */
4413 +#define DM_TEF_INTERMED 0x0002 /* a dm_pending reply was received */
4414 +#define DM_TEF_FINAL 0x0004 /* dm_respond_event has been received */
4415 +#define DM_TEF_HASHED 0x0010 /* event is on hash chain */
4416 +#define DM_TEF_FLUSH 0x0020 /* flushing threads from queues */
4417 +
4418 +
4419 +#ifdef CONFIG_DMAPI_DEBUG
4420 +#define DM_SHASH_DEBUG
4421 +#endif
4422 +
4423 +typedef struct dm_sesshash {
4424 + dm_tokevent_t *h_next; /* ptr to chain of tokevents */
4425 +#ifdef DM_SHASH_DEBUG
4426 + int maxlength;
4427 + int curlength;
4428 + int num_adds;
4429 + int num_dels;
4430 + int dup_hits;
4431 +#endif
4432 +} dm_sesshash_t;
4433 +
4434 +
4435 +typedef struct dm_eventq {
4436 + dm_tokevent_t *eq_head;
4437 + dm_tokevent_t *eq_tail;
4438 + int eq_count; /* size of queue */
4439 +} dm_eventq_t;
4440 +
4441 +
4442 +typedef struct dm_session {
4443 + struct dm_session *sn_next; /* sessions linkage */
4444 + dm_sessid_t sn_sessid; /* user-visible session number */
4445 + u_int sn_flags;
4446 + lock_t sn_qlock; /* lock for newq/delq related fields */
4447 + sv_t sn_readerq; /* waiting for message on sn_newq */
4448 + sv_t sn_writerq; /* waiting for room on sn_newq */
4449 + u_int sn_readercnt; /* count of waiting readers */
4450 + u_int sn_writercnt; /* count of waiting readers */
4451 + dm_eventq_t sn_newq; /* undelivered event queue */
4452 + dm_eventq_t sn_delq; /* delivered event queue */
4453 + dm_eventq_t sn_evt_writerq; /* events of thrds in sn_writerq */
4454 + dm_sesshash_t *sn_sesshash; /* buckets for tokevent hash chains */
4455 +#ifdef DM_SHASH_DEBUG
4456 + int sn_buckets_in_use;
4457 + int sn_max_buckets_in_use;
4458 +#endif
4459 + char sn_info[DM_SESSION_INFO_LEN]; /* user-supplied info */
4460 +} dm_session_t;
4461 +
4462 +/* values for sn_flags */
4463 +
4464 +#define DM_SN_WANTMOUNT 0x0001 /* session wants to get mount events */
4465 +
4466 +
4467 +typedef enum {
4468 + DM_STATE_MOUNTING,
4469 + DM_STATE_MOUNTED,
4470 + DM_STATE_UNMOUNTING,
4471 + DM_STATE_UNMOUNTED
4472 +} dm_fsstate_t;
4473 +
4474 +
4475 +typedef struct dm_fsreg {
4476 + struct dm_fsreg *fr_next;
4477 + struct super_block *fr_sb; /* filesystem pointer */
4478 + dm_tokevent_t *fr_tevp;
4479 + dm_fsid_t fr_fsid; /* filesystem ID */
4480 + void *fr_msg; /* dm_mount_event_t for filesystem */
4481 + int fr_msgsize; /* size of dm_mount_event_t */
4482 + dm_fsstate_t fr_state;
4483 + sv_t fr_dispq;
4484 + int fr_dispcnt;
4485 + dm_eventq_t fr_evt_dispq; /* events of thrds in fr_dispq */
4486 + sv_t fr_queue; /* queue for hdlcnt/sbcnt/unmount */
4487 + lock_t fr_lock;
4488 + int fr_hdlcnt; /* threads blocked during unmount */
4489 + int fr_vfscnt; /* threads in VFS_VGET or VFS_ROOT */
4490 + int fr_unmount; /* if non-zero, umount is sleeping */
4491 + dm_attrname_t fr_rattr; /* dm_set_return_on_destroy attribute */
4492 + dm_session_t *fr_sessp [DM_EVENT_MAX];
4493 +} dm_fsreg_t;
4494 +
4495 +
4496 +
4497 +
4498 +/* events valid in dm_set_disp() when called with a filesystem handle. */
4499 +
4500 +#define DM_VALID_DISP_EVENTS ( \
4501 + (1 << DM_EVENT_PREUNMOUNT) | \
4502 + (1 << DM_EVENT_UNMOUNT) | \
4503 + (1 << DM_EVENT_NOSPACE) | \
4504 + (1 << DM_EVENT_DEBUT) | \
4505 + (1 << DM_EVENT_CREATE) | \
4506 + (1 << DM_EVENT_POSTCREATE) | \
4507 + (1 << DM_EVENT_REMOVE) | \
4508 + (1 << DM_EVENT_POSTREMOVE) | \
4509 + (1 << DM_EVENT_RENAME) | \
4510 + (1 << DM_EVENT_POSTRENAME) | \
4511 + (1 << DM_EVENT_LINK) | \
4512 + (1 << DM_EVENT_POSTLINK) | \
4513 + (1 << DM_EVENT_SYMLINK) | \
4514 + (1 << DM_EVENT_POSTSYMLINK) | \
4515 + (1 << DM_EVENT_READ) | \
4516 + (1 << DM_EVENT_WRITE) | \
4517 + (1 << DM_EVENT_TRUNCATE) | \
4518 + (1 << DM_EVENT_ATTRIBUTE) | \
4519 + (1 << DM_EVENT_DESTROY) )
4520 +
4521 +
4522 +/* isolate the read/write/trunc events of a dm_tokevent_t */
4523 +
4524 +#define DM_EVENT_RDWRTRUNC(tevp) ( \
4525 + ((tevp)->te_msg.ev_type == DM_EVENT_READ) || \
4526 + ((tevp)->te_msg.ev_type == DM_EVENT_WRITE) || \
4527 + ((tevp)->te_msg.ev_type == DM_EVENT_TRUNCATE) )
4528 +
4529 +
4530 +/*
4531 + * Global handle hack isolation.
4532 + */
4533 +
4534 +#define DM_GLOBALHAN(hanp, hlen) (((hanp) == DM_GLOBAL_HANP) && \
4535 + ((hlen) == DM_GLOBAL_HLEN))
4536 +
4537 +
4538 +#define DM_MAX_MSG_DATA 3960
4539 +
4540 +
4541 +
4542 +/* Supported filesystem function vector functions. */
4543 +
4544 +
4545 +typedef struct {
4546 + int code_level;
4547 + dm_fsys_clear_inherit_t clear_inherit;
4548 + dm_fsys_create_by_handle_t create_by_handle;
4549 + dm_fsys_downgrade_right_t downgrade_right;
4550 + dm_fsys_get_allocinfo_rvp_t get_allocinfo_rvp;
4551 + dm_fsys_get_bulkall_rvp_t get_bulkall_rvp;
4552 + dm_fsys_get_bulkattr_rvp_t get_bulkattr_rvp;
4553 + dm_fsys_get_config_t get_config;
4554 + dm_fsys_get_config_events_t get_config_events;
4555 + dm_fsys_get_destroy_dmattr_t get_destroy_dmattr;
4556 + dm_fsys_get_dioinfo_t get_dioinfo;
4557 + dm_fsys_get_dirattrs_rvp_t get_dirattrs_rvp;
4558 + dm_fsys_get_dmattr_t get_dmattr;
4559 + dm_fsys_get_eventlist_t get_eventlist;
4560 + dm_fsys_get_fileattr_t get_fileattr;
4561 + dm_fsys_get_region_t get_region;
4562 + dm_fsys_getall_dmattr_t getall_dmattr;
4563 + dm_fsys_getall_inherit_t getall_inherit;
4564 + dm_fsys_init_attrloc_t init_attrloc;
4565 + dm_fsys_mkdir_by_handle_t mkdir_by_handle;
4566 + dm_fsys_probe_hole_t probe_hole;
4567 + dm_fsys_punch_hole_t punch_hole;
4568 + dm_fsys_read_invis_rvp_t read_invis_rvp;
4569 + dm_fsys_release_right_t release_right;
4570 + dm_fsys_remove_dmattr_t remove_dmattr;
4571 + dm_fsys_request_right_t request_right;
4572 + dm_fsys_set_dmattr_t set_dmattr;
4573 + dm_fsys_set_eventlist_t set_eventlist;
4574 + dm_fsys_set_fileattr_t set_fileattr;
4575 + dm_fsys_set_inherit_t set_inherit;
4576 + dm_fsys_set_region_t set_region;
4577 + dm_fsys_symlink_by_handle_t symlink_by_handle;
4578 + dm_fsys_sync_by_handle_t sync_by_handle;
4579 + dm_fsys_upgrade_right_t upgrade_right;
4580 + dm_fsys_write_invis_rvp_t write_invis_rvp;
4581 + dm_fsys_obj_ref_hold_t obj_ref_hold;
4582 +} dm_fsys_vector_t;
4583 +
4584 +
4585 +typedef struct {
4586 + struct list_head ftype_list; /* list of fstypes */
4587 + struct list_head sb_list; /* list of sb's per fstype */
4588 + struct file_system_type *f_type;
4589 + struct filesystem_dmapi_operations *dmapiops;
4590 + dm_fsys_vector_t *vptr;
4591 + struct super_block *sb;
4592 +} dm_vector_map_t;
4593 +
4594 +
4595 +extern dm_session_t *dm_sessions; /* head of session list */
4596 +extern dm_fsreg_t *dm_registers;
4597 +extern lock_t dm_reg_lock; /* lock for registration list */
4598 +
4599 +/*
4600 + * Kernel only prototypes.
4601 + */
4602 +
4603 +int dm_find_session_and_lock(
4604 + dm_sessid_t sid,
4605 + dm_session_t **sessionpp,
4606 + unsigned long *lcp);
4607 +
4608 +int dm_find_msg_and_lock(
4609 + dm_sessid_t sid,
4610 + dm_token_t token,
4611 + dm_tokevent_t **tevpp,
4612 + unsigned long *lcp);
4613 +
4614 +dm_tokevent_t * dm_evt_create_tevp(
4615 + dm_eventtype_t event,
4616 + int variable_size,
4617 + void **msgpp);
4618 +
4619 +int dm_app_get_tdp(
4620 + dm_sessid_t sid,
4621 + void __user *hanp,
4622 + size_t hlen,
4623 + dm_token_t token,
4624 + short types,
4625 + dm_right_t right,
4626 + dm_tokdata_t **tdpp);
4627 +
4628 +int dm_get_config_tdp(
4629 + void __user *hanp,
4630 + size_t hlen,
4631 + dm_tokdata_t **tdpp);
4632 +
4633 +void dm_app_put_tdp(
4634 + dm_tokdata_t *tdp);
4635 +
4636 +void dm_put_tevp(
4637 + dm_tokevent_t *tevp,
4638 + dm_tokdata_t *tdp);
4639 +
4640 +void dm_evt_rele_tevp(
4641 + dm_tokevent_t *tevp,
4642 + int droprights);
4643 +
4644 +int dm_enqueue_normal_event(
4645 + struct super_block *sbp,
4646 + dm_tokevent_t **tevpp,
4647 + int flags);
4648 +
4649 +int dm_enqueue_mount_event(
4650 + struct super_block *sbp,
4651 + dm_tokevent_t *tevp);
4652 +
4653 +int dm_enqueue_sendmsg_event(
4654 + dm_sessid_t targetsid,
4655 + dm_tokevent_t *tevp,
4656 + int synch);
4657 +
4658 +int dm_enqueue_user_event(
4659 + dm_sessid_t sid,
4660 + dm_tokevent_t *tevp,
4661 + dm_token_t *tokenp);
4662 +
4663 +int dm_obj_ref_query_rvp(
4664 + dm_sessid_t sid,
4665 + dm_token_t token,
4666 + void __user *hanp,
4667 + size_t hlen,
4668 + int *rvp);
4669 +
4670 +int dm_read_invis_rvp(
4671 + dm_sessid_t sid,
4672 + void __user *hanp,
4673 + size_t hlen,
4674 + dm_token_t token,
4675 + dm_off_t off,
4676 + dm_size_t len,
4677 + void __user *bufp,
4678 + int *rvp);
4679 +
4680 +int dm_write_invis_rvp(
4681 + dm_sessid_t sid,
4682 + void __user *hanp,
4683 + size_t hlen,
4684 + dm_token_t token,
4685 + int flags,
4686 + dm_off_t off,
4687 + dm_size_t len,
4688 + void __user *bufp,
4689 + int *rvp);
4690 +
4691 +int dm_get_bulkattr_rvp(
4692 + dm_sessid_t sid,
4693 + void __user *hanp,
4694 + size_t hlen,
4695 + dm_token_t token,
4696 + u_int mask,
4697 + dm_attrloc_t __user *locp,
4698 + size_t buflen,
4699 + void __user *bufp,
4700 + size_t __user *rlenp,
4701 + int *rvp);
4702 +
4703 +int dm_get_bulkall_rvp(
4704 + dm_sessid_t sid,
4705 + void __user *hanp,
4706 + size_t hlen,
4707 + dm_token_t token,
4708 + u_int mask,
4709 + dm_attrname_t __user *attrnamep,
4710 + dm_attrloc_t __user *locp,
4711 + size_t buflen,
4712 + void __user *bufp,
4713 + size_t __user *rlenp,
4714 + int *rvp);
4715 +
4716 +int dm_get_dirattrs_rvp(
4717 + dm_sessid_t sid,
4718 + void __user *hanp,
4719 + size_t hlen,
4720 + dm_token_t token,
4721 + u_int mask,
4722 + dm_attrloc_t __user *locp,
4723 + size_t buflen,
4724 + void __user *bufp,
4725 + size_t __user *rlenp,
4726 + int *rvp);
4727 +
4728 +int dm_get_allocinfo_rvp(
4729 + dm_sessid_t sid,
4730 + void __user *hanp,
4731 + size_t hlen,
4732 + dm_token_t token,
4733 + dm_off_t __user *offp,
4734 + u_int nelem,
4735 + dm_extent_t __user *extentp,
4736 + u_int __user *nelemp,
4737 + int *rvp);
4738 +
4739 +int dm_waitfor_destroy_attrname(
4740 + struct super_block *sb,
4741 + dm_attrname_t *attrnamep);
4742 +
4743 +void dm_clear_fsreg(
4744 + dm_session_t *s);
4745 +
4746 +int dm_add_fsys_entry(
4747 + struct super_block *sb,
4748 + dm_tokevent_t *tevp);
4749 +
4750 +void dm_change_fsys_entry(
4751 + struct super_block *sb,
4752 + dm_fsstate_t newstate);
4753 +
4754 +void dm_remove_fsys_entry(
4755 + struct super_block *sb);
4756 +
4757 +dm_fsys_vector_t *dm_fsys_vector(
4758 + struct inode *ip);
4759 +
4760 +struct filesystem_dmapi_operations *dm_fsys_ops(
4761 + struct super_block *sb);
4762 +
4763 +void dm_fsys_ops_release(
4764 + struct super_block *sb);
4765 +
4766 +int dm_waitfor_disp_session(
4767 + struct super_block *sb,
4768 + dm_tokevent_t *tevp,
4769 + dm_session_t **sessionpp,
4770 + unsigned long *lcp);
4771 +
4772 +struct inode * dm_handle_to_ip (
4773 + dm_handle_t *handlep,
4774 + short *typep);
4775 +
4776 +int dm_check_dmapi_ip(
4777 + struct inode *ip);
4778 +
4779 +dm_tokevent_t * dm_find_mount_tevp_and_lock(
4780 + dm_fsid_t *fsidp,
4781 + unsigned long *lcp);
4782 +
4783 +int dm_path_to_hdl(
4784 + char __user *path,
4785 + void __user *hanp,
4786 + size_t __user *hlenp);
4787 +
4788 +int dm_path_to_fshdl(
4789 + char __user *path,
4790 + void __user *hanp,
4791 + size_t __user *hlenp);
4792 +
4793 +int dm_fd_to_hdl(
4794 + int fd,
4795 + void __user *hanp,
4796 + size_t __user *hlenp);
4797 +
4798 +int dm_upgrade_right(
4799 + dm_sessid_t sid,
4800 + void __user *hanp,
4801 + size_t hlen,
4802 + dm_token_t token);
4803 +
4804 +int dm_downgrade_right(
4805 + dm_sessid_t sid,
4806 + void __user *hanp,
4807 + size_t hlen,
4808 + dm_token_t token);
4809 +
4810 +int dm_request_right(
4811 + dm_sessid_t sid,
4812 + void __user *hanp,
4813 + size_t hlen,
4814 + dm_token_t token,
4815 + u_int flags,
4816 + dm_right_t right);
4817 +
4818 +int dm_release_right(
4819 + dm_sessid_t sid,
4820 + void __user *hanp,
4821 + size_t hlen,
4822 + dm_token_t token);
4823 +
4824 +int dm_query_right(
4825 + dm_sessid_t sid,
4826 + void __user *hanp,
4827 + size_t hlen,
4828 + dm_token_t token,
4829 + dm_right_t __user *rightp);
4830 +
4831 +
4832 +int dm_set_eventlist(
4833 + dm_sessid_t sid,
4834 + void __user *hanp,
4835 + size_t hlen,
4836 + dm_token_t token,
4837 + dm_eventset_t __user *eventsetp,
4838 + u_int maxevent);
4839 +
4840 +int dm_obj_ref_hold(
4841 + dm_sessid_t sid,
4842 + dm_token_t token,
4843 + void __user *hanp,
4844 + size_t hlen);
4845 +
4846 +int dm_obj_ref_rele(
4847 + dm_sessid_t sid,
4848 + dm_token_t token,
4849 + void __user *hanp,
4850 + size_t hlen);
4851 +
4852 +int dm_get_eventlist(
4853 + dm_sessid_t sid,
4854 + void __user *hanp,
4855 + size_t hlen,
4856 + dm_token_t token,
4857 + u_int nelem,
4858 + dm_eventset_t __user *eventsetp,
4859 + u_int __user *nelemp);
4860 +
4861 +
4862 +int dm_set_disp(
4863 + dm_sessid_t sid,
4864 + void __user *hanp,
4865 + size_t hlen,
4866 + dm_token_t token,
4867 + dm_eventset_t __user *eventsetp,
4868 + u_int maxevent);
4869 +
4870 +
4871 +int dm_set_return_on_destroy(
4872 + dm_sessid_t sid,
4873 + void __user *hanp,
4874 + size_t hlen,
4875 + dm_token_t token,
4876 + dm_attrname_t __user *attrnamep,
4877 + dm_boolean_t enable);
4878 +
4879 +
4880 +int dm_get_mountinfo(
4881 + dm_sessid_t sid,
4882 + void __user *hanp,
4883 + size_t hlen,
4884 + dm_token_t token,
4885 + size_t buflen,
4886 + void __user *bufp,
4887 + size_t __user *rlenp);
4888 +
4889 +void dm_link_event(
4890 + dm_tokevent_t *tevp,
4891 + dm_eventq_t *queue);
4892 +
4893 +void dm_unlink_event(
4894 + dm_tokevent_t *tevp,
4895 + dm_eventq_t *queue);
4896 +
4897 +int dm_open_by_handle_rvp(
4898 + unsigned int fd,
4899 + void __user *hanp,
4900 + size_t hlen,
4901 + int mode,
4902 + int *rvp);
4903 +
4904 +int dm_copyin_handle(
4905 + void __user *hanp,
4906 + size_t hlen,
4907 + dm_handle_t *handlep);
4908 +
4909 +int dm_release_disp_threads(
4910 + dm_fsid_t *fsid,
4911 + struct inode *inode,
4912 + int errno);
4913 +
4914 +#endif /* _DMAPI_PRIVATE_H */
4915 Index: linux-2.6.26/fs/dmapi/dmapi_region.c
4916 ===================================================================
4917 --- /dev/null
4918 +++ linux-2.6.26/fs/dmapi/dmapi_region.c
4919 @@ -0,0 +1,91 @@
4920 +/*
4921 + * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved.
4922 + *
4923 + * This program is free software; you can redistribute it and/or modify it
4924 + * under the terms of version 2 of the GNU General Public License as
4925 + * published by the Free Software Foundation.
4926 + *
4927 + * This program is distributed in the hope that it would be useful, but
4928 + * WITHOUT ANY WARRANTY; without even the implied warranty of
4929 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
4930 + *
4931 + * Further, this software is distributed without any warranty that it is
4932 + * free of the rightful claim of any third person regarding infringement
4933 + * or the like. Any license provided herein, whether implied or
4934 + * otherwise, applies only to this software file. Patent licenses, if
4935 + * any, provided herein do not apply to combinations of this program with
4936 + * other software, or any other product whatsoever.
4937 + *
4938 + * You should have received a copy of the GNU General Public License along
4939 + * with this program; if not, write the Free Software Foundation, Inc., 59
4940 + * Temple Place - Suite 330, Boston MA 02111-1307, USA.
4941 + *
4942 + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
4943 + * Mountain View, CA 94043, or:
4944 + *
4945 + * http://www.sgi.com
4946 + *
4947 + * For further information regarding this notice, see:
4948 + *
4949 + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
4950 + */
4951 +#include "dmapi.h"
4952 +#include "dmapi_kern.h"
4953 +#include "dmapi_private.h"
4954 +
4955 +
4956 +int
4957 +dm_get_region(
4958 + dm_sessid_t sid,
4959 + void __user *hanp,
4960 + size_t hlen,
4961 + dm_token_t token,
4962 + u_int nelem,
4963 + dm_region_t __user *regbufp,
4964 + u_int __user *nelemp)
4965 +{
4966 + dm_fsys_vector_t *fsys_vector;
4967 + dm_tokdata_t *tdp;
4968 + int error;
4969 +
4970 + error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_REG,
4971 + DM_RIGHT_SHARED, &tdp);
4972 + if (error != 0)
4973 + return(error);
4974 +
4975 + fsys_vector = dm_fsys_vector(tdp->td_ip);
4976 + error = fsys_vector->get_region(tdp->td_ip, tdp->td_right,
4977 + nelem, regbufp, nelemp);
4978 +
4979 + dm_app_put_tdp(tdp);
4980 + return(error);
4981 +}
4982 +
4983 +
4984 +
4985 +int
4986 +dm_set_region(
4987 + dm_sessid_t sid,
4988 + void __user *hanp,
4989 + size_t hlen,
4990 + dm_token_t token,
4991 + u_int nelem,
4992 + dm_region_t __user *regbufp,
4993 + dm_boolean_t __user *exactflagp)
4994 +{
4995 + dm_fsys_vector_t *fsys_vector;
4996 + dm_tokdata_t *tdp;
4997 + int error;
4998 +
4999 + error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_REG,
5000 + DM_RIGHT_EXCL, &tdp);
5001 + if (error != 0)
5002 + return(error);
5003 +
5004 + fsys_vector = dm_fsys_vector(tdp->td_ip);
5005 + error = fsys_vector->set_region(tdp->td_ip, tdp->td_right,
5006 + nelem, regbufp, exactflagp);
5007 +
5008 + dm_app_put_tdp(tdp);
5009 + return(error);
5010 +}
5011 Index: linux-2.6.26/fs/dmapi/dmapi_register.c
5012 ===================================================================
5013 --- /dev/null
5014 +++ linux-2.6.26/fs/dmapi/dmapi_register.c
5015 @@ -0,0 +1,1644 @@
5016 +/*
5017 + * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
5018 + *
5019 + * This program is free software; you can redistribute it and/or modify it
5020 + * under the terms of version 2 of the GNU General Public License as
5021 + * published by the Free Software Foundation.
5022 + *
5023 + * This program is distributed in the hope that it would be useful, but
5024 + * WITHOUT ANY WARRANTY; without even the implied warranty of
5025 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
5026 + *
5027 + * Further, this software is distributed without any warranty that it is
5028 + * free of the rightful claim of any third person regarding infringement
5029 + * or the like. Any license provided herein, whether implied or
5030 + * otherwise, applies only to this software file. Patent licenses, if
5031 + * any, provided herein do not apply to combinations of this program with
5032 + * other software, or any other product whatsoever.
5033 + *
5034 + * You should have received a copy of the GNU General Public License along
5035 + * with this program; if not, write the Free Software Foundation, Inc., 59
5036 + * Temple Place - Suite 330, Boston MA 02111-1307, USA.
5037 + *
5038 + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
5039 + * Mountain View, CA 94043, or:
5040 + *
5041 + * http://www.sgi.com
5042 + *
5043 + * For further information regarding this notice, see:
5044 + *
5045 + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
5046 + */
5047 +
5048 +#include <linux/version.h>
5049 +#include <linux/mm.h>
5050 +#include <linux/proc_fs.h>
5051 +#include <linux/module.h>
5052 +#include <linux/mount.h>
5053 +#include <linux/namei.h>
5054 +#include <asm/uaccess.h>
5055 +#include <linux/fs.h>
5056 +#include <linux/file.h>
5057 +#include "dmapi.h"
5058 +#include "dmapi_kern.h"
5059 +#include "dmapi_private.h"
5060 +
5061 +/* LOOKUP_POSTIVE was removed in Linux 2.6 */
5062 +#ifndef LOOKUP_POSITIVE
5063 +#define LOOKUP_POSITIVE 0
5064 +#endif
5065 +
5066 +dm_fsreg_t *dm_registers; /* head of filesystem registration list */
5067 +int dm_fsys_cnt; /* number of filesystems on dm_registers list */
5068 +lock_t dm_reg_lock = SPIN_LOCK_UNLOCKED;/* lock for dm_registers */
5069 +
5070 +
5071 +
5072 +#ifdef CONFIG_PROC_FS
5073 +static int
5074 +fsreg_read_pfs(char *buffer, char **start, off_t offset,
5075 + int count, int *eof, void *data)
5076 +{
5077 + int len;
5078 + int i;
5079 + dm_fsreg_t *fsrp = (dm_fsreg_t*)data;
5080 + char statebuf[30];
5081 +
5082 +#define CHKFULL if(len >= count) break;
5083 +#define ADDBUF(a,b) len += sprintf(buffer + len, a, b); CHKFULL;
5084 +
5085 + switch (fsrp->fr_state) {
5086 + case DM_STATE_MOUNTING: sprintf(statebuf, "mounting"); break;
5087 + case DM_STATE_MOUNTED: sprintf(statebuf, "mounted"); break;
5088 + case DM_STATE_UNMOUNTING: sprintf(statebuf, "unmounting"); break;
5089 + case DM_STATE_UNMOUNTED: sprintf(statebuf, "unmounted"); break;
5090 + default:
5091 + sprintf(statebuf, "unknown:%d", (int)fsrp->fr_state);
5092 + break;
5093 + }
5094 +
5095 + len=0;
5096 + while(1){
5097 + ADDBUF("fsrp=0x%p\n", fsrp);
5098 + ADDBUF("fr_next=0x%p\n", fsrp->fr_next);
5099 + ADDBUF("fr_sb=0x%p\n", fsrp->fr_sb);
5100 + ADDBUF("fr_tevp=0x%p\n", fsrp->fr_tevp);
5101 + ADDBUF("fr_fsid=%c\n", '?');
5102 + ADDBUF("fr_msg=0x%p\n", fsrp->fr_msg);
5103 + ADDBUF("fr_msgsize=%d\n", fsrp->fr_msgsize);
5104 + ADDBUF("fr_state=%s\n", statebuf);
5105 + ADDBUF("fr_dispq=%c\n", '?');
5106 + ADDBUF("fr_dispcnt=%d\n", fsrp->fr_dispcnt);
5107 +
5108 + ADDBUF("fr_evt_dispq.eq_head=0x%p\n", fsrp->fr_evt_dispq.eq_head);
5109 + ADDBUF("fr_evt_dispq.eq_tail=0x%p\n", fsrp->fr_evt_dispq.eq_tail);
5110 + ADDBUF("fr_evt_dispq.eq_count=%d\n", fsrp->fr_evt_dispq.eq_count);
5111 +
5112 + ADDBUF("fr_queue=%c\n", '?');
5113 + ADDBUF("fr_lock=%c\n", '?');
5114 + ADDBUF("fr_hdlcnt=%d\n", fsrp->fr_hdlcnt);
5115 + ADDBUF("fr_vfscnt=%d\n", fsrp->fr_vfscnt);
5116 + ADDBUF("fr_unmount=%d\n", fsrp->fr_unmount);
5117 +
5118 + len += sprintf(buffer + len, "fr_rattr=");
5119 + CHKFULL;
5120 + for(i = 0; i <= DM_ATTR_NAME_SIZE; ++i){
5121 + ADDBUF("%c", fsrp->fr_rattr.an_chars[i]);
5122 + }
5123 + CHKFULL;
5124 + len += sprintf(buffer + len, "\n");
5125 + CHKFULL;
5126 +
5127 + for(i = 0; i < DM_EVENT_MAX; i++){
5128 + if( fsrp->fr_sessp[i] != NULL ){
5129 + ADDBUF("fr_sessp[%d]=", i);
5130 + ADDBUF("0x%p\n", fsrp->fr_sessp[i]);
5131 + }
5132 + }
5133 + CHKFULL;
5134 +
5135 + break;
5136 + }
5137 +
5138 + if (offset >= len) {
5139 + *start = buffer;
5140 + *eof = 1;
5141 + return 0;
5142 + }
5143 + *start = buffer + offset;
5144 + if ((len -= offset) > count)
5145 + return count;
5146 + *eof = 1;
5147 +
5148 + return len;
5149 +}
5150 +#endif
5151 +
5152 +
5153 +/* Returns a pointer to the filesystem structure for the filesystem
5154 + referenced by fsidp. The caller is responsible for obtaining dm_reg_lock
5155 + before calling this routine.
5156 +*/
5157 +
5158 +static dm_fsreg_t *
5159 +dm_find_fsreg(
5160 + dm_fsid_t *fsidp)
5161 +{
5162 + dm_fsreg_t *fsrp;
5163 +
5164 + for (fsrp = dm_registers; fsrp; fsrp = fsrp->fr_next) {
5165 + if (!memcmp(&fsrp->fr_fsid, fsidp, sizeof(*fsidp)))
5166 + break;
5167 + }
5168 + return(fsrp);
5169 +}
5170 +
5171 +
5172 +/* Given a fsid_t, dm_find_fsreg_and_lock() finds the dm_fsreg_t structure
5173 + for that filesytem if one exists, and returns a pointer to the structure
5174 + after obtaining its 'fr_lock' so that the caller can safely modify the
5175 + dm_fsreg_t. The caller is responsible for releasing 'fr_lock'.
5176 +*/
5177 +
5178 +static dm_fsreg_t *
5179 +dm_find_fsreg_and_lock(
5180 + dm_fsid_t *fsidp,
5181 + unsigned long *lcp) /* address of returned lock cookie */
5182 +{
5183 + dm_fsreg_t *fsrp;
5184 +
5185 + for (;;) {
5186 + *lcp = mutex_spinlock(&dm_reg_lock);
5187 +
5188 + if ((fsrp = dm_find_fsreg(fsidp)) == NULL) {
5189 + mutex_spinunlock(&dm_reg_lock, *lcp);
5190 + return(NULL);
5191 + }
5192 + if (spin_trylock(&fsrp->fr_lock)) {
5193 + nested_spinunlock(&dm_reg_lock);
5194 + return(fsrp); /* success */
5195 + }
5196 +
5197 + /* If the second lock is not available, drop the first and
5198 + start over. This gives the CPU a chance to process any
5199 + interrupts, and also allows processes which want a fr_lock
5200 + for a different filesystem to proceed.
5201 + */
5202 +
5203 + mutex_spinunlock(&dm_reg_lock, *lcp);
5204 + }
5205 +}
5206 +
5207 +
5208 +/* dm_add_fsys_entry() is called when a DM_EVENT_MOUNT event is about to be
5209 + sent. It creates a dm_fsreg_t structure for the filesystem and stores a
5210 + pointer to a copy of the mount event within that structure so that it is
5211 + available for subsequent dm_get_mountinfo() calls.
5212 +*/
5213 +
5214 +int
5215 +dm_add_fsys_entry(
5216 + struct super_block *sb,
5217 + dm_tokevent_t *tevp)
5218 +{
5219 + dm_fsreg_t *fsrp;
5220 + int msgsize;
5221 + void *msg;
5222 + unsigned long lc; /* lock cookie */
5223 + dm_fsid_t fsid;
5224 + struct filesystem_dmapi_operations *dops;
5225 +
5226 + dops = dm_fsys_ops(sb);
5227 + ASSERT(dops);
5228 + dops->get_fsid(sb, &fsid);
5229 +
5230 + /* Allocate and initialize a dm_fsreg_t structure for the filesystem. */
5231 +
5232 + msgsize = tevp->te_allocsize - offsetof(dm_tokevent_t, te_event);
5233 + msg = kmalloc(msgsize, GFP_KERNEL);
5234 + if (msg == NULL) {
5235 + printk("%s/%d: kmalloc returned NULL\n", __FUNCTION__, __LINE__);
5236 + return -ENOMEM;
5237 + }
5238 + memcpy(msg, &tevp->te_event, msgsize);
5239 +
5240 + fsrp = kmem_cache_alloc(dm_fsreg_cachep, GFP_KERNEL);
5241 + if (fsrp == NULL) {
5242 + kfree(msg);
5243 + printk("%s/%d: kmem_cache_alloc(dm_fsreg_cachep) returned NULL\n", __FUNCTION__, __LINE__);
5244 + return -ENOMEM;
5245 + }
5246 + memset(fsrp, 0, sizeof(*fsrp));
5247 +
5248 + fsrp->fr_sb = sb;
5249 + fsrp->fr_tevp = tevp;
5250 + memcpy(&fsrp->fr_fsid, &fsid, sizeof(fsid));
5251 + fsrp->fr_msg = msg;
5252 + fsrp->fr_msgsize = msgsize;
5253 + fsrp->fr_state = DM_STATE_MOUNTING;
5254 + sv_init(&fsrp->fr_dispq, SV_DEFAULT, "fr_dispq");
5255 + sv_init(&fsrp->fr_queue, SV_DEFAULT, "fr_queue");
5256 + spinlock_init(&fsrp->fr_lock, "fr_lock");
5257 +
5258 + /* If no other mounted DMAPI filesystem already has this same
5259 + fsid_t, then add this filesystem to the list.
5260 + */
5261 +
5262 + lc = mutex_spinlock(&dm_reg_lock);
5263 +
5264 + if (!dm_find_fsreg(&fsid)) {
5265 + fsrp->fr_next = dm_registers;
5266 + dm_registers = fsrp;
5267 + dm_fsys_cnt++;
5268 + mutex_spinunlock(&dm_reg_lock, lc);
5269 +#ifdef CONFIG_PROC_FS
5270 + {
5271 + char buf[100];
5272 + struct proc_dir_entry *entry;
5273 +
5274 + sprintf(buf, DMAPI_DBG_PROCFS "/fsreg/0x%p", fsrp);
5275 + entry = create_proc_read_entry(buf, 0, NULL, fsreg_read_pfs, fsrp);
5276 + entry->owner = THIS_MODULE;
5277 + }
5278 +#endif
5279 + return(0);
5280 + }
5281 +
5282 + /* A fsid_t collision occurred, so prevent this new filesystem from
5283 + mounting.
5284 + */
5285 +
5286 + mutex_spinunlock(&dm_reg_lock, lc);
5287 +
5288 + sv_destroy(&fsrp->fr_dispq);
5289 + sv_destroy(&fsrp->fr_queue);
5290 + spinlock_destroy(&fsrp->fr_lock);
5291 + kfree(msg);
5292 + kmem_cache_free(dm_fsreg_cachep, fsrp);
5293 + return(-EBUSY);
5294 +}
5295 +
5296 +
5297 +/* dm_change_fsys_entry() is called whenever a filesystem's mount state is
5298 + about to change. The state is changed to DM_STATE_MOUNTED after a
5299 + successful DM_EVENT_MOUNT event or after a failed unmount. It is changed
5300 + to DM_STATE_UNMOUNTING after a successful DM_EVENT_PREUNMOUNT event.
5301 + Finally, the state is changed to DM_STATE_UNMOUNTED after a successful
5302 + unmount. It stays in this state until the DM_EVENT_UNMOUNT event is
5303 + queued, at which point the filesystem entry is removed.
5304 +*/
5305 +
5306 +void
5307 +dm_change_fsys_entry(
5308 + struct super_block *sb,
5309 + dm_fsstate_t newstate)
5310 +{
5311 + dm_fsreg_t *fsrp;
5312 + int seq_error;
5313 + unsigned long lc; /* lock cookie */
5314 + dm_fsid_t fsid;
5315 + struct filesystem_dmapi_operations *dops;
5316 +
5317 + /* Find the filesystem referenced by the sb's fsid_t. This should
5318 + always succeed.
5319 + */
5320 +
5321 + dops = dm_fsys_ops(sb);
5322 + ASSERT(dops);
5323 + dops->get_fsid(sb, &fsid);
5324 +
5325 + if ((fsrp = dm_find_fsreg_and_lock(&fsid, &lc)) == NULL) {
5326 + panic("dm_change_fsys_entry: can't find DMAPI fsrp for "
5327 + "sb %p\n", sb);
5328 + }
5329 +
5330 + /* Make sure that the new state is acceptable given the current state
5331 + of the filesystem. Any error here is a major DMAPI/filesystem
5332 + screwup.
5333 + */
5334 +
5335 + seq_error = 0;
5336 + switch (newstate) {
5337 + case DM_STATE_MOUNTED:
5338 + if (fsrp->fr_state != DM_STATE_MOUNTING &&
5339 + fsrp->fr_state != DM_STATE_UNMOUNTING) {
5340 + seq_error++;
5341 + }
5342 + break;
5343 + case DM_STATE_UNMOUNTING:
5344 + if (fsrp->fr_state != DM_STATE_MOUNTED)
5345 + seq_error++;
5346 + break;
5347 + case DM_STATE_UNMOUNTED:
5348 + if (fsrp->fr_state != DM_STATE_UNMOUNTING)
5349 + seq_error++;
5350 + break;
5351 + default:
5352 + seq_error++;
5353 + break;
5354 + }
5355 + if (seq_error) {
5356 + panic("dm_change_fsys_entry: DMAPI sequence error: old state "
5357 + "%d, new state %d, fsrp %p\n", fsrp->fr_state,
5358 + newstate, fsrp);
5359 + }
5360 +
5361 + /* If the old state was DM_STATE_UNMOUNTING, then processes could be
5362 + sleeping in dm_handle_to_ip() waiting for their DM_NO_TOKEN handles
5363 + to be translated to inodes. Wake them up so that they either
5364 + continue (new state is DM_STATE_MOUNTED) or fail (new state is
5365 + DM_STATE_UNMOUNTED).
5366 + */
5367 +
5368 + if (fsrp->fr_state == DM_STATE_UNMOUNTING) {
5369 + if (fsrp->fr_hdlcnt)
5370 + sv_broadcast(&fsrp->fr_queue);
5371 + }
5372 +
5373 + /* Change the filesystem's mount state to its new value. */
5374 +
5375 + fsrp->fr_state = newstate;
5376 + fsrp->fr_tevp = NULL; /* not valid after DM_STATE_MOUNTING */
5377 +
5378 + /* If the new state is DM_STATE_UNMOUNTING, wait until any application
5379 + threads currently in the process of making VFS_VGET and VFS_ROOT
5380 + calls are done before we let this unmount thread continue the
5381 + unmount. (We want to make sure that the unmount will see these
5382 + inode references during its scan.)
5383 + */
5384 +
5385 + if (newstate == DM_STATE_UNMOUNTING) {
5386 + while (fsrp->fr_vfscnt) {
5387 + fsrp->fr_unmount++;
5388 + sv_wait(&fsrp->fr_queue, 1, &fsrp->fr_lock, lc);
5389 + lc = mutex_spinlock(&fsrp->fr_lock);
5390 + fsrp->fr_unmount--;
5391 + }
5392 + }
5393 +
5394 + mutex_spinunlock(&fsrp->fr_lock, lc);
5395 +}
5396 +
5397 +
5398 +/* dm_remove_fsys_entry() gets called after a failed mount or after an
5399 + DM_EVENT_UNMOUNT event has been queued. (The filesystem entry must stay
5400 + until the DM_EVENT_UNMOUNT reply is queued so that the event can use the
5401 + 'fr_sessp' list to see which session to send the event to.)
5402 +*/
5403 +
5404 +void
5405 +dm_remove_fsys_entry(
5406 + struct super_block *sb)
5407 +{
5408 + dm_fsreg_t **fsrpp;
5409 + dm_fsreg_t *fsrp;
5410 + unsigned long lc; /* lock cookie */
5411 + struct filesystem_dmapi_operations *dops;
5412 + dm_fsid_t fsid;
5413 +
5414 + dops = dm_fsys_ops(sb);
5415 + ASSERT(dops);
5416 + dops->get_fsid(sb, &fsid);
5417 +
5418 + /* Find the filesystem referenced by the sb's fsid_t and dequeue
5419 + it after verifying that the fr_state shows a filesystem that is
5420 + either mounting or unmounted.
5421 + */
5422 +
5423 + lc = mutex_spinlock(&dm_reg_lock);
5424 +
5425 + fsrpp = &dm_registers;
5426 + while ((fsrp = *fsrpp) != NULL) {
5427 + if (!memcmp(&fsrp->fr_fsid, &fsid, sizeof(fsrp->fr_fsid)))
5428 + break;
5429 + fsrpp = &fsrp->fr_next;
5430 + }
5431 + if (fsrp == NULL) {
5432 + mutex_spinunlock(&dm_reg_lock, lc);
5433 + panic("dm_remove_fsys_entry: can't find DMAPI fsrp for "
5434 + "sb %p\n", sb);
5435 + }
5436 +
5437 + nested_spinlock(&fsrp->fr_lock);
5438 +
5439 + /* Verify that it makes sense to remove this entry. */
5440 +
5441 + if (fsrp->fr_state != DM_STATE_MOUNTING &&
5442 + fsrp->fr_state != DM_STATE_UNMOUNTED) {
5443 + nested_spinunlock(&fsrp->fr_lock);
5444 + mutex_spinunlock(&dm_reg_lock, lc);
5445 + panic("dm_remove_fsys_entry: DMAPI sequence error: old state "
5446 + "%d, fsrp %p\n", fsrp->fr_state, fsrp);
5447 + }
5448 +
5449 + *fsrpp = fsrp->fr_next;
5450 + dm_fsys_cnt--;
5451 +
5452 + nested_spinunlock(&dm_reg_lock);
5453 +
5454 + /* Since the filesystem is about to finish unmounting, we must be sure
5455 + that no inodes are being referenced within the filesystem before we
5456 + let this event thread continue. If the filesystem is currently in
5457 + state DM_STATE_MOUNTING, then we know by definition that there can't
5458 + be any references. If the filesystem is DM_STATE_UNMOUNTED, then
5459 + any application threads referencing handles with DM_NO_TOKEN should
5460 + have already been awakened by dm_change_fsys_entry and should be
5461 + long gone by now. Just in case they haven't yet left, sleep here
5462 + until they are really gone.
5463 + */
5464 +
5465 + while (fsrp->fr_hdlcnt) {
5466 + fsrp->fr_unmount++;
5467 + sv_wait(&fsrp->fr_queue, 1, &fsrp->fr_lock, lc);
5468 + lc = mutex_spinlock(&fsrp->fr_lock);
5469 + fsrp->fr_unmount--;
5470 + }
5471 + mutex_spinunlock(&fsrp->fr_lock, lc);
5472 +
5473 + /* Release all memory. */
5474 +
5475 +#ifdef CONFIG_PROC_FS
5476 + {
5477 + char buf[100];
5478 + sprintf(buf, DMAPI_DBG_PROCFS "/fsreg/0x%p", fsrp);
5479 + remove_proc_entry(buf, NULL);
5480 + }
5481 +#endif
5482 + dm_fsys_ops_release(sb);
5483 + sv_destroy(&fsrp->fr_dispq);
5484 + sv_destroy(&fsrp->fr_queue);
5485 + spinlock_destroy(&fsrp->fr_lock);
5486 + kfree(fsrp->fr_msg);
5487 + kmem_cache_free(dm_fsreg_cachep, fsrp);
5488 +}
5489 +
5490 +
5491 +/* Get an inode for the object referenced by handlep. We cannot use
5492 + altgetvfs() because it fails if the VFS_OFFLINE bit is set, which means
5493 + that any call to dm_handle_to_ip() while a umount is in progress would
5494 + return an error, even if the umount can't possibly succeed because users
5495 + are in the filesystem. The requests would start to fail as soon as the
5496 + umount begins, even before the application receives the DM_EVENT_PREUNMOUNT
5497 + event.
5498 +
5499 + dm_handle_to_ip() emulates the behavior of lookup() while an unmount is
5500 + in progress. Any call to dm_handle_to_ip() while the filesystem is in the
5501 + DM_STATE_UNMOUNTING state will block. If the unmount eventually succeeds,
5502 + the requests will wake up and fail. If the unmount fails, the requests will
5503 + wake up and complete normally.
5504 +
5505 + While a filesystem is in state DM_STATE_MOUNTING, dm_handle_to_ip() will
5506 + fail all requests. Per the DMAPI spec, the only handles in the filesystem
5507 + which are valid during a mount event are the handles within the event
5508 + itself.
5509 +*/
5510 +
5511 +struct inode *
5512 +dm_handle_to_ip(
5513 + dm_handle_t *handlep,
5514 + short *typep)
5515 +{
5516 + dm_fsreg_t *fsrp;
5517 + short type;
5518 + unsigned long lc; /* lock cookie */
5519 + int error = 0;
5520 + dm_fid_t *fidp;
5521 + struct super_block *sb;
5522 + struct inode *ip;
5523 + int filetype;
5524 + struct filesystem_dmapi_operations *dmapiops;
5525 +
5526 + if ((fsrp = dm_find_fsreg_and_lock(&handlep->ha_fsid, &lc)) == NULL)
5527 + return NULL;
5528 +
5529 + fidp = (dm_fid_t*)&handlep->ha_fid;
5530 + /* If mounting, and we are not asking for a filesystem handle,
5531 + * then fail the request. (dm_fid_len==0 for fshandle)
5532 + */
5533 + if ((fsrp->fr_state == DM_STATE_MOUNTING) &&
5534 + (fidp->dm_fid_len != 0)) {
5535 + mutex_spinunlock(&fsrp->fr_lock, lc);
5536 + return NULL;
5537 + }
5538 +
5539 + for (;;) {
5540 + if (fsrp->fr_state == DM_STATE_MOUNTING)
5541 + break;
5542 + if (fsrp->fr_state == DM_STATE_MOUNTED)
5543 + break;
5544 + if (fsrp->fr_state == DM_STATE_UNMOUNTED) {
5545 + if (fsrp->fr_unmount && fsrp->fr_hdlcnt == 0)
5546 + sv_broadcast(&fsrp->fr_queue);
5547 + mutex_spinunlock(&fsrp->fr_lock, lc);
5548 + return NULL;
5549 + }
5550 +
5551 + /* Must be DM_STATE_UNMOUNTING. */
5552 +
5553 + fsrp->fr_hdlcnt++;
5554 + sv_wait(&fsrp->fr_queue, 1, &fsrp->fr_lock, lc);
5555 + lc = mutex_spinlock(&fsrp->fr_lock);
5556 + fsrp->fr_hdlcnt--;
5557 + }
5558 +
5559 + fsrp->fr_vfscnt++;
5560 + mutex_spinunlock(&fsrp->fr_lock, lc);
5561 +
5562 + /* Now that the mutex is released, wait until we have access to the
5563 + inode.
5564 + */
5565 +
5566 + sb = fsrp->fr_sb;
5567 + error = -ENOSYS;
5568 + dmapiops = dm_fsys_ops(sb);
5569 + ASSERT(dmapiops);
5570 + if (dmapiops->fh_to_inode)
5571 + error = dmapiops->fh_to_inode(sb, &ip, (void*)fidp);
5572 +
5573 + lc = mutex_spinlock(&fsrp->fr_lock);
5574 +
5575 + fsrp->fr_vfscnt--;
5576 + if (fsrp->fr_unmount && fsrp->fr_vfscnt == 0)
5577 + sv_broadcast(&fsrp->fr_queue);
5578 +
5579 + mutex_spinunlock(&fsrp->fr_lock, lc);
5580 + if (error || ip == NULL)
5581 + return NULL;
5582 +
5583 + filetype = ip->i_mode & S_IFMT;
5584 + if (fidp->dm_fid_len == 0) {
5585 + type = DM_TDT_VFS;
5586 + } else if (filetype == S_IFREG) {
5587 + type = DM_TDT_REG;
5588 + } else if (filetype == S_IFDIR) {
5589 + type = DM_TDT_DIR;
5590 + } else if (filetype == S_IFLNK) {
5591 + type = DM_TDT_LNK;
5592 + } else {
5593 + type = DM_TDT_OTH;
5594 + }
5595 + *typep = type;
5596 + return ip;
5597 +}
5598 +
5599 +
5600 +int
5601 +dm_ip_to_handle(
5602 + struct inode *ip,
5603 + dm_handle_t *handlep)
5604 +{
5605 + int error;
5606 + dm_fid_t fid;
5607 + dm_fsid_t fsid;
5608 + int hsize;
5609 + struct filesystem_dmapi_operations *dops;
5610 +
5611 + dops = dm_fsys_ops(ip->i_sb);
5612 + ASSERT(dops);
5613 +
5614 + error = dops->inode_to_fh(ip, &fid, &fsid);
5615 + if (error)
5616 + return error;
5617 +
5618 + memcpy(&handlep->ha_fsid, &fsid, sizeof(fsid));
5619 + memcpy(&handlep->ha_fid, &fid, fid.dm_fid_len + sizeof fid.dm_fid_len);
5620 + hsize = DM_HSIZE(*handlep);
5621 + memset((char *)handlep + hsize, 0, sizeof(*handlep) - hsize);
5622 + return 0;
5623 +}
5624 +
5625 +
5626 +/* Given an inode, check if that inode resides in filesystem that supports
5627 + DMAPI. Returns zero if the inode is in a DMAPI filesystem, otherwise
5628 + returns an errno.
5629 +*/
5630 +
5631 +int
5632 +dm_check_dmapi_ip(
5633 + struct inode *ip)
5634 +{
5635 + dm_handle_t handle;
5636 + /* REFERENCED */
5637 + dm_fsreg_t *fsrp;
5638 + int error;
5639 + unsigned long lc; /* lock cookie */
5640 +
5641 + if ((error = dm_ip_to_handle(ip, &handle)) != 0)
5642 + return(error);
5643 +
5644 + if ((fsrp = dm_find_fsreg_and_lock(&handle.ha_fsid, &lc)) == NULL)
5645 + return(-EBADF);
5646 + mutex_spinunlock(&fsrp->fr_lock, lc);
5647 + return(0);
5648 +}
5649 +
5650 +
5651 +/* Return a pointer to the DM_EVENT_MOUNT event while a mount is still in
5652 + progress. This is only called by dm_get_config and dm_get_config_events
5653 + which need to access the filesystem during a mount but which don't have
5654 + a session and token to use.
5655 +*/
5656 +
5657 +dm_tokevent_t *
5658 +dm_find_mount_tevp_and_lock(
5659 + dm_fsid_t *fsidp,
5660 + unsigned long *lcp) /* address of returned lock cookie */
5661 +{
5662 + dm_fsreg_t *fsrp;
5663 +
5664 + if ((fsrp = dm_find_fsreg_and_lock(fsidp, lcp)) == NULL)
5665 + return(NULL);
5666 +
5667 + if (!fsrp->fr_tevp || fsrp->fr_state != DM_STATE_MOUNTING) {
5668 + mutex_spinunlock(&fsrp->fr_lock, *lcp);
5669 + return(NULL);
5670 + }
5671 + nested_spinlock(&fsrp->fr_tevp->te_lock);
5672 + nested_spinunlock(&fsrp->fr_lock);
5673 + return(fsrp->fr_tevp);
5674 +}
5675 +
5676 +
5677 +/* Wait interruptibly until a session registers disposition for 'event' in
5678 + filesystem 'sb'. Upon successful exit, both the filesystem's dm_fsreg_t
5679 + structure and the session's dm_session_t structure are locked. The caller
5680 + is responsible for unlocking both structures using the returned cookies.
5681 +
5682 + Warning: The locks can be dropped in any order, but the 'lc2p' cookie MUST
5683 + BE USED FOR THE FIRST UNLOCK, and the lc1p cookie must be used for the
5684 + second unlock. If this is not done, the CPU will be interruptible while
5685 + holding a mutex, which could deadlock the machine!
5686 +*/
5687 +
5688 +static int
5689 +dm_waitfor_disp(
5690 + struct super_block *sb,
5691 + dm_tokevent_t *tevp,
5692 + dm_fsreg_t **fsrpp,
5693 + unsigned long *lc1p, /* addr of first returned lock cookie */
5694 + dm_session_t **sessionpp,
5695 + unsigned long *lc2p) /* addr of 2nd returned lock cookie */
5696 +{
5697 + dm_eventtype_t event = tevp->te_msg.ev_type;
5698 + dm_session_t *s;
5699 + dm_fsreg_t *fsrp;
5700 + dm_fsid_t fsid;
5701 + struct filesystem_dmapi_operations *dops;
5702 +
5703 + dops = dm_fsys_ops(sb);
5704 + ASSERT(dops);
5705 +
5706 + dops->get_fsid(sb, &fsid);
5707 + if ((fsrp = dm_find_fsreg_and_lock(&fsid, lc1p)) == NULL)
5708 + return -ENOENT;
5709 +
5710 + /* If no session is registered for this event in the specified
5711 + filesystem, then sleep interruptibly until one does.
5712 + */
5713 +
5714 + for (;;) {
5715 + int rc = 0;
5716 +
5717 + /* The dm_find_session_and_lock() call is needed because a
5718 + session that is in the process of being removed might still
5719 + be in the dm_fsreg_t structure but won't be in the
5720 + dm_sessions list.
5721 + */
5722 +
5723 + if ((s = fsrp->fr_sessp[event]) != NULL &&
5724 + dm_find_session_and_lock(s->sn_sessid, &s, lc2p) == 0) {
5725 + break;
5726 + }
5727 +
5728 + /* Noone is currently registered. DM_EVENT_UNMOUNT events
5729 + don't wait for anyone to register because the unmount is
5730 + already past the point of no return.
5731 + */
5732 +
5733 + if (event == DM_EVENT_UNMOUNT) {
5734 + mutex_spinunlock(&fsrp->fr_lock, *lc1p);
5735 + return -ENOENT;
5736 + }
5737 +
5738 + /* Wait until a session registers for disposition of this
5739 + event.
5740 + */
5741 +
5742 + fsrp->fr_dispcnt++;
5743 + dm_link_event(tevp, &fsrp->fr_evt_dispq);
5744 +
5745 + sv_wait_sig(&fsrp->fr_dispq, 1, &fsrp->fr_lock, *lc1p);
5746 + rc = signal_pending(current);
5747 +
5748 + *lc1p = mutex_spinlock(&fsrp->fr_lock);
5749 + fsrp->fr_dispcnt--;
5750 + dm_unlink_event(tevp, &fsrp->fr_evt_dispq);
5751 +#ifdef HAVE_DM_QUEUE_FLUSH
5752 + if (tevp->te_flags & DM_TEF_FLUSH) {
5753 + mutex_spinunlock(&fsrp->fr_lock, *lc1p);
5754 + return tevp->te_reply;
5755 + }
5756 +#endif /* HAVE_DM_QUEUE_FLUSH */
5757 + if (rc) { /* if signal was received */
5758 + mutex_spinunlock(&fsrp->fr_lock, *lc1p);
5759 + return -EINTR;
5760 + }
5761 + }
5762 + *sessionpp = s;
5763 + *fsrpp = fsrp;
5764 + return 0;
5765 +}
5766 +
5767 +
5768 +/* Returns the session pointer for the session registered for an event
5769 + in the given sb. If successful, the session is locked upon return. The
5770 + caller is responsible for releasing the lock. If no session is currently
5771 + registered for the event, dm_waitfor_disp_session() will sleep interruptibly
5772 + until a registration occurs.
5773 +*/
5774 +
5775 +int
5776 +dm_waitfor_disp_session(
5777 + struct super_block *sb,
5778 + dm_tokevent_t *tevp,
5779 + dm_session_t **sessionpp,
5780 + unsigned long *lcp)
5781 +{
5782 + dm_fsreg_t *fsrp;
5783 + unsigned long lc2;
5784 + int error;
5785 +
5786 + if (tevp->te_msg.ev_type < 0 || tevp->te_msg.ev_type > DM_EVENT_MAX)
5787 + return(-EIO);
5788 +
5789 + error = dm_waitfor_disp(sb, tevp, &fsrp, lcp, sessionpp, &lc2);
5790 + if (!error)
5791 + mutex_spinunlock(&fsrp->fr_lock, lc2); /* rev. cookie order*/
5792 + return(error);
5793 +}
5794 +
5795 +
5796 +/* Find the session registered for the DM_EVENT_DESTROY event on the specified
5797 + filesystem, sleeping if necessary until registration occurs. Once found,
5798 + copy the session's return-on-destroy attribute name, if any, back to the
5799 + caller.
5800 +*/
5801 +
5802 +int
5803 +dm_waitfor_destroy_attrname(
5804 + struct super_block *sbp,
5805 + dm_attrname_t *attrnamep)
5806 +{
5807 + dm_tokevent_t *tevp;
5808 + dm_session_t *s;
5809 + dm_fsreg_t *fsrp;
5810 + int error;
5811 + unsigned long lc1; /* first lock cookie */
5812 + unsigned long lc2; /* second lock cookie */
5813 + void *msgp;
5814 +
5815 + tevp = dm_evt_create_tevp(DM_EVENT_DESTROY, 1, (void**)&msgp);
5816 + error = dm_waitfor_disp(sbp, tevp, &fsrp, &lc1, &s, &lc2);
5817 + if (!error) {
5818 + *attrnamep = fsrp->fr_rattr; /* attribute or zeros */
5819 + mutex_spinunlock(&s->sn_qlock, lc2); /* rev. cookie order */
5820 + mutex_spinunlock(&fsrp->fr_lock, lc1);
5821 + }
5822 + dm_evt_rele_tevp(tevp,0);
5823 + return(error);
5824 +}
5825 +
5826 +
5827 +/* Unregisters the session for the disposition of all events on all
5828 + filesystems. This routine is not called until the session has been
5829 + dequeued from the session list and its session lock has been dropped,
5830 + but before the actual structure is freed, so it is safe to grab the
5831 + 'dm_reg_lock' here. If dm_waitfor_disp_session() happens to be called
5832 + by another thread, it won't find this session on the session list and
5833 + will wait until a new session registers.
5834 +*/
5835 +
5836 +void
5837 +dm_clear_fsreg(
5838 + dm_session_t *s)
5839 +{
5840 + dm_fsreg_t *fsrp;
5841 + int event;
5842 + unsigned long lc; /* lock cookie */
5843 +
5844 + lc = mutex_spinlock(&dm_reg_lock);
5845 +
5846 + for (fsrp = dm_registers; fsrp != NULL; fsrp = fsrp->fr_next) {
5847 + nested_spinlock(&fsrp->fr_lock);
5848 + for (event = 0; event < DM_EVENT_MAX; event++) {
5849 + if (fsrp->fr_sessp[event] != s)
5850 + continue;
5851 + fsrp->fr_sessp[event] = NULL;
5852 + if (event == DM_EVENT_DESTROY)
5853 + memset(&fsrp->fr_rattr, 0, sizeof(fsrp->fr_rattr));
5854 + }
5855 + nested_spinunlock(&fsrp->fr_lock);
5856 + }
5857 +
5858 + mutex_spinunlock(&dm_reg_lock, lc);
5859 +}
5860 +
5861 +
5862 +/*
5863 + * Return the handle for the object named by path.
5864 + */
5865 +
5866 +int
5867 +dm_path_to_hdl(
5868 + char __user *path, /* any path name */
5869 + void __user *hanp, /* user's data buffer */
5870 + size_t __user *hlenp) /* set to size of data copied */
5871 +{
5872 + /* REFERENCED */
5873 + dm_fsreg_t *fsrp;
5874 + dm_handle_t handle;
5875 + size_t hlen;
5876 + int error;
5877 + unsigned long lc; /* lock cookie */
5878 + struct nameidata nd;
5879 + struct inode *inode;
5880 + size_t len;
5881 + char *name;
5882 + struct filesystem_dmapi_operations *dops;
5883 +
5884 + /* XXX get things straightened out so getname() works here? */
5885 + if (!(len = strnlen_user(path, PATH_MAX)))
5886 + return(-EFAULT);
5887 + if (len == 1)
5888 + return(-ENOENT);
5889 + if (len > PATH_MAX)
5890 + return(-ENAMETOOLONG);
5891 + name = kmalloc(len, GFP_KERNEL);
5892 + if (name == NULL) {
5893 + printk("%s/%d: kmalloc returned NULL\n", __FUNCTION__, __LINE__);
5894 + return(-ENOMEM);
5895 + }
5896 + if (copy_from_user(name, path, len)) {
5897 + kfree(name);
5898 + return(-EFAULT);
5899 + }
5900 +
5901 + error = path_lookup(name, LOOKUP_POSITIVE, &nd);
5902 + kfree(name);
5903 + if (error)
5904 + return error;
5905 +
5906 + ASSERT(nd.path.dentry);
5907 + ASSERT(nd.path.dentry->d_inode);
5908 + inode = igrab(nd.path.dentry->d_inode);
5909 + path_put(&nd.path);
5910 +
5911 + dops = dm_fsys_ops(inode->i_sb);
5912 + if (dops == NULL) {
5913 + /* No longer in a dmapi-capable filesystem...Toto */
5914 + iput(inode);
5915 + return -EINVAL;
5916 + }
5917 +
5918 + /* we need the inode */
5919 + error = dm_ip_to_handle(inode, &handle);
5920 + iput(inode);
5921 + if (error)
5922 + return(error);
5923 +
5924 + if ((fsrp = dm_find_fsreg_and_lock(&handle.ha_fsid, &lc)) == NULL)
5925 + return(-EBADF);
5926 + mutex_spinunlock(&fsrp->fr_lock, lc);
5927 +
5928 + hlen = DM_HSIZE(handle);
5929 +
5930 + if (copy_to_user(hanp, &handle, (int)hlen))
5931 + return(-EFAULT);
5932 + if (put_user(hlen,hlenp))
5933 + return(-EFAULT);
5934 + return 0;
5935 +}
5936 +
5937 +
5938 +/*
5939 + * Return the handle for the file system containing the object named by path.
5940 + */
5941 +
5942 +int
5943 +dm_path_to_fshdl(
5944 + char __user *path, /* any path name */
5945 + void __user *hanp, /* user's data buffer */
5946 + size_t __user *hlenp) /* set to size of data copied */
5947 +{
5948 + /* REFERENCED */
5949 + dm_fsreg_t *fsrp;
5950 + dm_handle_t handle;
5951 + size_t hlen;
5952 + int error;
5953 + unsigned long lc; /* lock cookie */
5954 + struct nameidata nd;
5955 + struct inode *inode;
5956 + size_t len;
5957 + char *name;
5958 + struct filesystem_dmapi_operations *dops;
5959 +
5960 + /* XXX get things straightened out so getname() works here? */
5961 + if(!(len = strnlen_user(path, PATH_MAX)))
5962 + return(-EFAULT);
5963 + if (len == 1)
5964 + return(-ENOENT);
5965 + if (len > PATH_MAX)
5966 + return(-ENAMETOOLONG);
5967 + name = kmalloc(len, GFP_KERNEL);
5968 + if (name == NULL) {
5969 + printk("%s/%d: kmalloc returned NULL\n", __FUNCTION__, __LINE__);
5970 + return(-ENOMEM);
5971 + }
5972 + if (copy_from_user(name, path, len)) {
5973 + kfree(name);
5974 + return(-EFAULT);
5975 + }
5976 +
5977 + error = path_lookup(name, LOOKUP_POSITIVE|LOOKUP_FOLLOW, &nd);
5978 + kfree(name);
5979 + if (error)
5980 + return error;
5981 +
5982 + ASSERT(nd.path.dentry);
5983 + ASSERT(nd.path.dentry->d_inode);
5984 +
5985 + inode = igrab(nd.path.dentry->d_inode);
5986 + path_put(&nd.path);
5987 +
5988 + dops = dm_fsys_ops(inode->i_sb);
5989 + if (dops == NULL) {
5990 + /* No longer in a dmapi-capable filesystem...Toto */
5991 + iput(inode);
5992 + return -EINVAL;
5993 + }
5994 +
5995 + error = dm_ip_to_handle(inode, &handle);
5996 + iput(inode);
5997 +
5998 + if (error)
5999 + return(error);
6000 +
6001 + if ((fsrp = dm_find_fsreg_and_lock(&handle.ha_fsid, &lc)) == NULL)
6002 + return(-EBADF);
6003 + mutex_spinunlock(&fsrp->fr_lock, lc);
6004 +
6005 + hlen = DM_FSHSIZE;
6006 + if(copy_to_user(hanp, &handle, (int)hlen))
6007 + return(-EFAULT);
6008 + if(put_user(hlen,hlenp))
6009 + return(-EFAULT);
6010 + return 0;
6011 +}
6012 +
6013 +
6014 +int
6015 +dm_fd_to_hdl(
6016 + int fd, /* any file descriptor */
6017 + void __user *hanp, /* user's data buffer */
6018 + size_t __user *hlenp) /* set to size of data copied */
6019 +{
6020 + /* REFERENCED */
6021 + dm_fsreg_t *fsrp;
6022 + dm_handle_t handle;
6023 + size_t hlen;
6024 + int error;
6025 + unsigned long lc; /* lock cookie */
6026 + struct file *filep = fget(fd);
6027 + struct inode *ip = filep->f_dentry->d_inode;
6028 +
6029 + if (!filep)
6030 + return(-EBADF);
6031 + if ((error = dm_ip_to_handle(ip, &handle)) != 0)
6032 + return(error);
6033 +
6034 + if ((fsrp = dm_find_fsreg_and_lock(&handle.ha_fsid, &lc)) == NULL)
6035 + return(-EBADF);
6036 + mutex_spinunlock(&fsrp->fr_lock, lc);
6037 +
6038 + hlen = DM_HSIZE(handle);
6039 + if (copy_to_user(hanp, &handle, (int)hlen))
6040 + return(-EFAULT);
6041 + fput(filep);
6042 + if(put_user(hlen, hlenp))
6043 + return(-EFAULT);
6044 + return 0;
6045 +}
6046 +
6047 +
6048 +/* Enable events on an object. */
6049 +
6050 +int
6051 +dm_set_eventlist(
6052 + dm_sessid_t sid,
6053 + void __user *hanp,
6054 + size_t hlen,
6055 + dm_token_t token,
6056 + dm_eventset_t __user *eventsetp,
6057 + u_int maxevent)
6058 +{
6059 + dm_fsys_vector_t *fsys_vector;
6060 + dm_eventset_t eventset;
6061 + dm_tokdata_t *tdp;
6062 + int error;
6063 +
6064 + if (copy_from_user(&eventset, eventsetp, sizeof(eventset)))
6065 + return(-EFAULT);
6066 +
6067 + /* Do some minor sanity checking. */
6068 +
6069 + if (maxevent == 0 || maxevent > DM_EVENT_MAX)
6070 + return(-EINVAL);
6071 +
6072 + /* Access the specified object. */
6073 +
6074 + error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_ANY,
6075 + DM_RIGHT_EXCL, &tdp);
6076 + if (error != 0)
6077 + return(error);
6078 +
6079 + fsys_vector = dm_fsys_vector(tdp->td_ip);
6080 + error = fsys_vector->set_eventlist(tdp->td_ip, tdp->td_right,
6081 + (tdp->td_type == DM_TDT_VFS ? DM_FSYS_OBJ : 0),
6082 + &eventset, maxevent);
6083 +
6084 + dm_app_put_tdp(tdp);
6085 + return(error);
6086 +}
6087 +
6088 +
6089 +/* Return the list of enabled events for an object. */
6090 +
6091 +int
6092 +dm_get_eventlist(
6093 + dm_sessid_t sid,
6094 + void __user *hanp,
6095 + size_t hlen,
6096 + dm_token_t token,
6097 + u_int nelem,
6098 + dm_eventset_t __user *eventsetp,
6099 + u_int __user *nelemp)
6100 +{
6101 + dm_fsys_vector_t *fsys_vector;
6102 + dm_tokdata_t *tdp;
6103 + dm_eventset_t eventset;
6104 + u_int elem;
6105 + int error;
6106 +
6107 + if (nelem == 0)
6108 + return(-EINVAL);
6109 +
6110 + /* Access the specified object. */
6111 +
6112 + error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_ANY,
6113 + DM_RIGHT_SHARED, &tdp);
6114 + if (error != 0)
6115 + return(error);
6116 +
6117 + /* Get the object's event list. */
6118 +
6119 + fsys_vector = dm_fsys_vector(tdp->td_ip);
6120 + error = fsys_vector->get_eventlist(tdp->td_ip, tdp->td_right,
6121 + (tdp->td_type == DM_TDT_VFS ? DM_FSYS_OBJ : 0),
6122 + nelem, &eventset, &elem);
6123 +
6124 + dm_app_put_tdp(tdp);
6125 +
6126 + if (error)
6127 + return(error);
6128 +
6129 + if (copy_to_user(eventsetp, &eventset, sizeof(eventset)))
6130 + return(-EFAULT);
6131 + if (put_user(nelem, nelemp))
6132 + return(-EFAULT);
6133 + return(0);
6134 +}
6135 +
6136 +
6137 +/* Register for disposition of events. The handle must either be the
6138 + global handle or must be the handle of a file system. The list of events
6139 + is pointed to by eventsetp.
6140 +*/
6141 +
6142 +int
6143 +dm_set_disp(
6144 + dm_sessid_t sid,
6145 + void __user *hanp,
6146 + size_t hlen,
6147 + dm_token_t token,
6148 + dm_eventset_t __user *eventsetp,
6149 + u_int maxevent)
6150 +{
6151 + dm_session_t *s;
6152 + dm_fsreg_t *fsrp;
6153 + dm_tokdata_t *tdp;
6154 + dm_eventset_t eventset;
6155 + int error;
6156 + unsigned long lc1; /* first lock cookie */
6157 + unsigned long lc2; /* second lock cookie */
6158 + u_int i;
6159 +
6160 + /* Copy in and validate the event mask. Only the lower maxevent bits
6161 + are meaningful, so clear any bits set above maxevent.
6162 + */
6163 +
6164 + if (maxevent == 0 || maxevent > DM_EVENT_MAX)
6165 + return(-EINVAL);
6166 + if (copy_from_user(&eventset, eventsetp, sizeof(eventset)))
6167 + return(-EFAULT);
6168 + eventset &= (1 << maxevent) - 1;
6169 +
6170 + /* If the caller specified the global handle, then the only valid token
6171 + is DM_NO_TOKEN, and the only valid event in the event mask is
6172 + DM_EVENT_MOUNT. If it is set, add the session to the list of
6173 + sessions that want to receive mount events. If it is clear, remove
6174 + the session from the list. Since DM_EVENT_MOUNT events never block
6175 + waiting for a session to register, there is noone to wake up if we
6176 + do add the session to the list.
6177 + */
6178 +
6179 + if (DM_GLOBALHAN(hanp, hlen)) {
6180 + if (token != DM_NO_TOKEN)
6181 + return(-EINVAL);
6182 + if ((error = dm_find_session_and_lock(sid, &s, &lc1)) != 0)
6183 + return(error);
6184 + if (eventset == 0) {
6185 + s->sn_flags &= ~DM_SN_WANTMOUNT;
6186 + error = 0;
6187 + } else if (eventset == 1 << DM_EVENT_MOUNT) {
6188 + s->sn_flags |= DM_SN_WANTMOUNT;
6189 + error = 0;
6190 + } else {
6191 + error = -EINVAL;
6192 + }
6193 + mutex_spinunlock(&s->sn_qlock, lc1);
6194 + return(error);
6195 + }
6196 +
6197 + /* Since it's not the global handle, it had better be a filesystem
6198 + handle. Verify that the first 'maxevent' events in the event list
6199 + are all valid for a filesystem handle.
6200 + */
6201 +
6202 + if (eventset & ~DM_VALID_DISP_EVENTS)
6203 + return(-EINVAL);
6204 +
6205 + /* Verify that the session is valid, that the handle is a filesystem
6206 + handle, and that the filesystem is capable of sending events. (If
6207 + a dm_fsreg_t structure exists, then the filesystem can issue events.)
6208 + */
6209 +
6210 + error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_VFS,
6211 + DM_RIGHT_EXCL, &tdp);
6212 + if (error != 0)
6213 + return(error);
6214 +
6215 + fsrp = dm_find_fsreg_and_lock(&tdp->td_handle.ha_fsid, &lc1);
6216 + if (fsrp == NULL) {
6217 + dm_app_put_tdp(tdp);
6218 + return(-EINVAL);
6219 + }
6220 +
6221 + /* Now that we own 'fsrp->fr_lock', get the lock on the session so that
6222 + it can't disappear while we add it to the filesystem's event mask.
6223 + */
6224 +
6225 + if ((error = dm_find_session_and_lock(sid, &s, &lc2)) != 0) {
6226 + mutex_spinunlock(&fsrp->fr_lock, lc1);
6227 + dm_app_put_tdp(tdp);
6228 + return(error);
6229 + }
6230 +
6231 + /* Update the event disposition array for this filesystem, adding
6232 + and/or removing the session as appropriate. If this session is
6233 + dropping registration for DM_EVENT_DESTROY, or is overriding some
6234 + other session's registration for DM_EVENT_DESTROY, then clear any
6235 + any attr-on-destroy attribute name also.
6236 + */
6237 +
6238 + for (i = 0; i < DM_EVENT_MAX; i++) {
6239 + if (DMEV_ISSET(i, eventset)) {
6240 + if (i == DM_EVENT_DESTROY && fsrp->fr_sessp[i] != s)
6241 + memset(&fsrp->fr_rattr, 0, sizeof(fsrp->fr_rattr));
6242 + fsrp->fr_sessp[i] = s;
6243 + } else if (fsrp->fr_sessp[i] == s) {
6244 + if (i == DM_EVENT_DESTROY)
6245 + memset(&fsrp->fr_rattr, 0, sizeof(fsrp->fr_rattr));
6246 + fsrp->fr_sessp[i] = NULL;
6247 + }
6248 + }
6249 + mutex_spinunlock(&s->sn_qlock, lc2); /* reverse cookie order */
6250 +
6251 + /* Wake up all processes waiting for a disposition on this filesystem
6252 + in case any of them happen to be waiting for an event which we just
6253 + added.
6254 + */
6255 +
6256 + if (fsrp->fr_dispcnt)
6257 + sv_broadcast(&fsrp->fr_dispq);
6258 +
6259 + mutex_spinunlock(&fsrp->fr_lock, lc1);
6260 +
6261 + dm_app_put_tdp(tdp);
6262 + return(0);
6263 +}
6264 +
6265 +
6266 +/*
6267 + * Register a specific attribute name with a filesystem. The value of
6268 + * the attribute is to be returned with an asynchronous destroy event.
6269 + */
6270 +
6271 +int
6272 +dm_set_return_on_destroy(
6273 + dm_sessid_t sid,
6274 + void __user *hanp,
6275 + size_t hlen,
6276 + dm_token_t token,
6277 + dm_attrname_t __user *attrnamep,
6278 + dm_boolean_t enable)
6279 +{
6280 + dm_attrname_t attrname;
6281 + dm_tokdata_t *tdp;
6282 + dm_fsreg_t *fsrp;
6283 + dm_session_t *s;
6284 + int error;
6285 + unsigned long lc1; /* first lock cookie */
6286 + unsigned long lc2; /* second lock cookie */
6287 +
6288 + /* If a dm_attrname_t is provided, copy it in and validate it. */
6289 +
6290 + if (enable && (error = copy_from_user(&attrname, attrnamep, sizeof(attrname))) != 0)
6291 + return(error);
6292 +
6293 + /* Validate the filesystem handle and use it to get the filesystem's
6294 + disposition structure.
6295 + */
6296 +
6297 + error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_VFS,
6298 + DM_RIGHT_EXCL, &tdp);
6299 + if (error != 0)
6300 + return(error);
6301 +
6302 + fsrp = dm_find_fsreg_and_lock(&tdp->td_handle.ha_fsid, &lc1);
6303 + if (fsrp == NULL) {
6304 + dm_app_put_tdp(tdp);
6305 + return(-EINVAL);
6306 + }
6307 +
6308 + /* Now that we own 'fsrp->fr_lock', get the lock on the session so that
6309 + it can't disappear while we add it to the filesystem's event mask.
6310 + */
6311 +
6312 + if ((error = dm_find_session_and_lock(sid, &s, &lc2)) != 0) {
6313 + mutex_spinunlock(&fsrp->fr_lock, lc1);
6314 + dm_app_put_tdp(tdp);
6315 + return(error);
6316 + }
6317 +
6318 + /* A caller cannot disable return-on-destroy if he is not registered
6319 + for DM_EVENT_DESTROY. Enabling return-on-destroy is an implicit
6320 + dm_set_disp() for DM_EVENT_DESTROY; we wake up all processes
6321 + waiting for a disposition in case any was waiting for a
6322 + DM_EVENT_DESTROY event.
6323 + */
6324 +
6325 + error = 0;
6326 + if (enable) {
6327 + fsrp->fr_sessp[DM_EVENT_DESTROY] = s;
6328 + fsrp->fr_rattr = attrname;
6329 + if (fsrp->fr_dispcnt)
6330 + sv_broadcast(&fsrp->fr_dispq);
6331 + } else if (fsrp->fr_sessp[DM_EVENT_DESTROY] != s) {
6332 + error = -EINVAL;
6333 + } else {
6334 + memset(&fsrp->fr_rattr, 0, sizeof(fsrp->fr_rattr));
6335 + }
6336 + mutex_spinunlock(&s->sn_qlock, lc2); /* reverse cookie order */
6337 + mutex_spinunlock(&fsrp->fr_lock, lc1);
6338 + dm_app_put_tdp(tdp);
6339 + return(error);
6340 +}
6341 +
6342 +
6343 +int
6344 +dm_get_mountinfo(
6345 + dm_sessid_t sid,
6346 + void __user *hanp,
6347 + size_t hlen,
6348 + dm_token_t token,
6349 + size_t buflen,
6350 + void __user *bufp,
6351 + size_t __user *rlenp)
6352 +{
6353 + dm_fsreg_t *fsrp;
6354 + dm_tokdata_t *tdp;
6355 + int error;
6356 + unsigned long lc; /* lock cookie */
6357 +
6358 + /* Make sure that the caller's buffer is 8-byte aligned. */
6359 +
6360 + if (((unsigned long)bufp & (sizeof(__u64) - 1)) != 0)
6361 + return(-EFAULT);
6362 +
6363 + /* Verify that the handle is a filesystem handle, and that the
6364 + filesystem is capable of sending events. If not, return an error.
6365 + */
6366 +
6367 + error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_VFS,
6368 + DM_RIGHT_SHARED, &tdp);
6369 + if (error != 0)
6370 + return(error);
6371 +
6372 + /* Find the filesystem entry. This should always succeed as the
6373 + dm_app_get_tdp call created a filesystem reference. Once we find
6374 + the entry, drop the lock. The mountinfo message is never modified,
6375 + the filesystem entry can't disappear, and we don't want to hold a
6376 + spinlock while doing copyout calls.
6377 + */
6378 +
6379 + fsrp = dm_find_fsreg_and_lock(&tdp->td_handle.ha_fsid, &lc);
6380 + if (fsrp == NULL) {
6381 + dm_app_put_tdp(tdp);
6382 + return(-EINVAL);
6383 + }
6384 + mutex_spinunlock(&fsrp->fr_lock, lc);
6385 +
6386 + /* Copy the message into the user's buffer and update his 'rlenp'. */
6387 +
6388 + if (put_user(fsrp->fr_msgsize, rlenp)) {
6389 + error = -EFAULT;
6390 + } else if (fsrp->fr_msgsize > buflen) { /* user buffer not big enough */
6391 + error = -E2BIG;
6392 + } else if (copy_to_user(bufp, fsrp->fr_msg, fsrp->fr_msgsize)) {
6393 + error = -EFAULT;
6394 + } else {
6395 + error = 0;
6396 + }
6397 + dm_app_put_tdp(tdp);
6398 + return(error);
6399 +}
6400 +
6401 +
6402 +int
6403 +dm_getall_disp(
6404 + dm_sessid_t sid,
6405 + size_t buflen,
6406 + void __user *bufp,
6407 + size_t __user *rlenp)
6408 +{
6409 + dm_session_t *s; /* pointer to session given by sid */
6410 + unsigned long lc1; /* first lock cookie */
6411 + unsigned long lc2; /* second lock cookie */
6412 + int totalsize;
6413 + int msgsize;
6414 + int fsyscnt;
6415 + dm_dispinfo_t *prevmsg;
6416 + dm_fsreg_t *fsrp;
6417 + int error;
6418 + char *kbuf;
6419 +
6420 + int tmp3;
6421 + int tmp4;
6422 +
6423 + /* Because the dm_getall_disp structure contains a __u64 field,
6424 + make sure that the buffer provided by the caller is aligned so
6425 + that he can read such fields successfully.
6426 + */
6427 +
6428 + if (((unsigned long)bufp & (sizeof(__u64) - 1)) != 0)
6429 + return(-EFAULT);
6430 +
6431 + /* Compute the size of a dm_dispinfo structure, rounding up to an
6432 + 8-byte boundary so that any subsequent structures will also be
6433 + aligned.
6434 + */
6435 +
6436 +#if 0
6437 + /* XXX ug, what is going on here? */
6438 + msgsize = (sizeof(dm_dispinfo_t) + DM_FSHSIZE + sizeof(uint64_t) - 1) &
6439 + ~(sizeof(uint64_t) - 1);
6440 +#else
6441 + tmp3 = sizeof(dm_dispinfo_t) + DM_FSHSIZE;
6442 + tmp3 += sizeof(__u64);
6443 + tmp3 -= 1;
6444 + tmp4 = ~((int)sizeof(__u64) - 1);
6445 + msgsize = tmp3 & tmp4;
6446 +#endif
6447 +
6448 + /* Loop until we can get the right amount of temp space, being careful
6449 + not to hold a mutex during the allocation. Usually only one trip.
6450 + */
6451 +
6452 + for (;;) {
6453 + if ((fsyscnt = dm_fsys_cnt) == 0) {
6454 + /*if (dm_cpoutsizet(rlenp, 0))*/
6455 + if (put_user(0,rlenp))
6456 + return(-EFAULT);
6457 + return(0);
6458 + }
6459 + kbuf = kmalloc(fsyscnt * msgsize, GFP_KERNEL);
6460 + if (kbuf == NULL) {
6461 + printk("%s/%d: kmalloc returned NULL\n", __FUNCTION__, __LINE__);
6462 + return -ENOMEM;
6463 + }
6464 +
6465 + lc1 = mutex_spinlock(&dm_reg_lock);
6466 + if (fsyscnt == dm_fsys_cnt)
6467 + break;
6468 +
6469 + mutex_spinunlock(&dm_reg_lock, lc1);
6470 + kfree(kbuf);
6471 + }
6472 +
6473 + /* Find the indicated session and lock it. */
6474 +
6475 + if ((error = dm_find_session_and_lock(sid, &s, &lc2)) != 0) {
6476 + mutex_spinunlock(&dm_reg_lock, lc1);
6477 + kfree(kbuf);
6478 + return(error);
6479 + }
6480 +
6481 + /* Create a dm_dispinfo structure for each filesystem in which
6482 + this session has at least one event selected for disposition.
6483 + */
6484 +
6485 + totalsize = 0; /* total bytes to transfer to the user */
6486 + prevmsg = NULL;
6487 +
6488 + for (fsrp = dm_registers; fsrp; fsrp = fsrp->fr_next) {
6489 + dm_dispinfo_t *disp;
6490 + int event;
6491 + int found;
6492 +
6493 + disp = (dm_dispinfo_t *)(kbuf + totalsize);
6494 +
6495 + DMEV_ZERO(disp->di_eventset);
6496 +
6497 + for (event = 0, found = 0; event < DM_EVENT_MAX; event++) {
6498 + if (fsrp->fr_sessp[event] != s)
6499 + continue;
6500 + DMEV_SET(event, disp->di_eventset);
6501 + found++;
6502 + }
6503 + if (!found)
6504 + continue;
6505 +
6506 + disp->_link = 0;
6507 + disp->di_fshandle.vd_offset = sizeof(dm_dispinfo_t);
6508 + disp->di_fshandle.vd_length = DM_FSHSIZE;
6509 +
6510 + memcpy((char *)disp + disp->di_fshandle.vd_offset,
6511 + &fsrp->fr_fsid, disp->di_fshandle.vd_length);
6512 +
6513 + if (prevmsg)
6514 + prevmsg->_link = msgsize;
6515 +
6516 + prevmsg = disp;
6517 + totalsize += msgsize;
6518 + }
6519 + mutex_spinunlock(&s->sn_qlock, lc2); /* reverse cookie order */
6520 + mutex_spinunlock(&dm_reg_lock, lc1);
6521 +
6522 + if (put_user(totalsize, rlenp)) {
6523 + error = -EFAULT;
6524 + } else if (totalsize > buflen) { /* no more room */
6525 + error = -E2BIG;
6526 + } else if (totalsize && copy_to_user(bufp, kbuf, totalsize)) {
6527 + error = -EFAULT;
6528 + } else {
6529 + error = 0;
6530 + }
6531 +
6532 + kfree(kbuf);
6533 + return(error);
6534 +}
6535 +
6536 +int
6537 +dm_open_by_handle_rvp(
6538 + unsigned int fd,
6539 + void __user *hanp,
6540 + size_t hlen,
6541 + int flags,
6542 + int *rvp)
6543 +{
6544 + dm_handle_t handle;
6545 + int error;
6546 + short td_type;
6547 + struct dentry *dentry;
6548 + struct inode *inodep;
6549 + int new_fd;
6550 + struct file *mfilp;
6551 + struct file *filp;
6552 +
6553 + if ((error = dm_copyin_handle(hanp, hlen, &handle)) != 0) {
6554 + return(error);
6555 + }
6556 +
6557 + if ((inodep = dm_handle_to_ip(&handle, &td_type)) == NULL) {
6558 + return(-EBADF);
6559 + }
6560 + if ((td_type == DM_TDT_VFS) || (td_type == DM_TDT_OTH)) {
6561 + iput(inodep);
6562 + return(-EBADF);
6563 + }
6564 +
6565 + if ((new_fd = get_unused_fd()) < 0) {
6566 + iput(inodep);
6567 + return(-EMFILE);
6568 + }
6569 +
6570 + dentry = d_alloc_anon(inodep);
6571 + if (dentry == NULL) {
6572 + iput(inodep);
6573 + put_unused_fd(new_fd);
6574 + return(-ENOMEM);
6575 + }
6576 +
6577 + mfilp = fget(fd);
6578 + if (!mfilp) {
6579 + dput(dentry);
6580 + put_unused_fd(new_fd);
6581 + return(-EBADF);
6582 + }
6583 +
6584 + mntget(mfilp->f_vfsmnt);
6585 +
6586 + /* Create file pointer */
6587 + filp = dentry_open(dentry, mfilp->f_vfsmnt, flags);
6588 + if (IS_ERR(filp)) {
6589 + put_unused_fd(new_fd);
6590 + fput(mfilp);
6591 + return PTR_ERR(filp);
6592 + }
6593 +
6594 + if (td_type == DM_TDT_REG) {
6595 + struct filesystem_dmapi_operations *dmapiops;
6596 + dmapiops = dm_fsys_ops(inodep->i_sb);
6597 + if (dmapiops && dmapiops->get_invis_ops) {
6598 + /* invisible operation should not change atime */
6599 + filp->f_flags |= O_NOATIME;
6600 + filp->f_op = dmapiops->get_invis_ops(inodep);
6601 + }
6602 + }
6603 + fd_install(new_fd, filp);
6604 + fput(mfilp);
6605 + *rvp = new_fd;
6606 + return 0;
6607 +}
6608 +
6609 +
6610 +#ifdef HAVE_DM_QUEUE_FLUSH
6611 +/* Find the threads that have a reference to our filesystem and force
6612 + them to return with the specified errno.
6613 + We look for them in each dm_fsreg_t's fr_evt_dispq.
6614 +*/
6615 +
6616 +int
6617 +dm_release_disp_threads(
6618 + dm_fsid_t *fsidp,
6619 + struct inode *inode, /* may be null */
6620 + int errno)
6621 +{
6622 + unsigned long lc;
6623 + dm_fsreg_t *fsrp;
6624 + dm_tokevent_t *tevp;
6625 + dm_tokdata_t *tdp;
6626 + dm_eventq_t *queue;
6627 + int found_events = 0;
6628 +
6629 + if ((fsrp = dm_find_fsreg_and_lock(fsidp, &lc)) == NULL){
6630 + return 0;
6631 + }
6632 +
6633 + queue = &fsrp->fr_evt_dispq;
6634 + for (tevp = queue->eq_head; tevp; tevp = tevp->te_next) {
6635 + nested_spinlock(&tevp->te_lock);
6636 + if (inode) {
6637 + for (tdp = tevp->te_tdp; tdp; tdp = tdp->td_next) {
6638 + if( tdp->td_ip == inode ) {
6639 + tevp->te_flags |= DM_TEF_FLUSH;
6640 + tevp->te_reply = errno;
6641 + found_events = 1;
6642 + break;
6643 + }
6644 + }
6645 + }
6646 + else {
6647 + tevp->te_flags |= DM_TEF_FLUSH;
6648 + tevp->te_reply = errno;
6649 + found_events = 1;
6650 + }
6651 + nested_spinunlock(&tevp->te_lock);
6652 + }
6653 +
6654 + if (found_events && fsrp->fr_dispcnt)
6655 + sv_broadcast(&fsrp->fr_dispq);
6656 + mutex_spinunlock(&fsrp->fr_lock, lc);
6657 + return 0;
6658 +}
6659 +#endif /* HAVE_DM_QUEUE_FLUSH */
6660 Index: linux-2.6.26/fs/dmapi/dmapi_right.c
6661 ===================================================================
6662 --- /dev/null
6663 +++ linux-2.6.26/fs/dmapi/dmapi_right.c
6664 @@ -0,0 +1,1256 @@
6665 +/*
6666 + * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
6667 + *
6668 + * This program is free software; you can redistribute it and/or modify it
6669 + * under the terms of version 2 of the GNU General Public License as
6670 + * published by the Free Software Foundation.
6671 + *
6672 + * This program is distributed in the hope that it would be useful, but
6673 + * WITHOUT ANY WARRANTY; without even the implied warranty of
6674 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
6675 + *
6676 + * Further, this software is distributed without any warranty that it is
6677 + * free of the rightful claim of any third person regarding infringement
6678 + * or the like. Any license provided herein, whether implied or
6679 + * otherwise, applies only to this software file. Patent licenses, if
6680 + * any, provided herein do not apply to combinations of this program with
6681 + * other software, or any other product whatsoever.
6682 + *
6683 + * You should have received a copy of the GNU General Public License along
6684 + * with this program; if not, write the Free Software Foundation, Inc., 59
6685 + * Temple Place - Suite 330, Boston MA 02111-1307, USA.
6686 + *
6687 + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
6688 + * Mountain View, CA 94043, or:
6689 + *
6690 + * http://www.sgi.com
6691 + *
6692 + * For further information regarding this notice, see:
6693 + *
6694 + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
6695 + */
6696 +#include <asm/uaccess.h>
6697 +#include "dmapi.h"
6698 +#include "dmapi_kern.h"
6699 +#include "dmapi_private.h"
6700 +
6701 +
6702 +#define DM_FG_STHREAD 0x001 /* keep other threads from using tdp */
6703 +#define DM_FG_MUSTEXIST 0x002 /* handle must exist in the event */
6704 +#define DM_FG_DONTADD 0x004 /* don't add handle if not in event */
6705 +
6706 +/* Get a handle of the form (void *, size_t) from user space and convert it to
6707 + a handle_t. Do as much validation of the result as possible; any error
6708 + other than a bad address should return EBADF per the DMAPI spec.
6709 +*/
6710 +
6711 +int
6712 +dm_copyin_handle(
6713 + void __user *hanp, /* input, handle data */
6714 + size_t hlen, /* input, size of handle data */
6715 + dm_handle_t *handlep) /* output, copy of data */
6716 +{
6717 + u_short len;
6718 + dm_fid_t *fidp;
6719 +
6720 + fidp = (dm_fid_t*)&handlep->ha_fid;
6721 +
6722 + if (hlen < sizeof(handlep->ha_fsid) || hlen > sizeof(*handlep))
6723 + return -EBADF;
6724 +
6725 + if (copy_from_user(handlep, hanp, hlen))
6726 + return -EFAULT;
6727 +
6728 + if (hlen < sizeof(*handlep))
6729 + memset((char *)handlep + hlen, 0, sizeof(*handlep) - hlen);
6730 +
6731 + if (hlen == sizeof(handlep->ha_fsid))
6732 + return 0; /* FS handle, nothing more to check */
6733 +
6734 + len = hlen - sizeof(handlep->ha_fsid) - sizeof(fidp->dm_fid_len);
6735 +
6736 + if ((fidp->dm_fid_len != len) || fidp->dm_fid_pad)
6737 + return -EBADF;
6738 + return 0;
6739 +}
6740 +
6741 +/* Allocate and initialize a tevp structure. Called from both application and
6742 + event threads.
6743 +*/
6744 +
6745 +static dm_tokevent_t *
6746 +dm_init_tevp(
6747 + int ev_size, /* size of event structure */
6748 + int var_size) /* size of variable-length data */
6749 +{
6750 + dm_tokevent_t *tevp;
6751 + int msgsize;
6752 +
6753 + /* Calculate the size of the event in bytes and allocate memory for it.
6754 + Zero all but the variable portion of the message, which will be
6755 + eventually overlaid by the caller with data.
6756 + */
6757 +
6758 + msgsize = offsetof(dm_tokevent_t, te_event) + ev_size + var_size;
6759 + tevp = kmalloc(msgsize, GFP_KERNEL);
6760 + if (tevp == NULL) {
6761 + printk("%s/%d: kmalloc returned NULL\n", __FUNCTION__, __LINE__);
6762 + return NULL;
6763 + }
6764 + memset(tevp, 0, msgsize - var_size);
6765 +
6766 + /* Now initialize all the non-zero fields. */
6767 +
6768 + spinlock_init(&tevp->te_lock, "te_lock");
6769 + sv_init(&tevp->te_evt_queue, SV_DEFAULT, "te_evt_queue");
6770 + sv_init(&tevp->te_app_queue, SV_DEFAULT, "te_app_queue");
6771 + tevp->te_allocsize = msgsize;
6772 + tevp->te_msg.ev_type = DM_EVENT_INVALID;
6773 + tevp->te_flags = 0;
6774 +
6775 + return(tevp);
6776 +}
6777 +
6778 +
6779 +/* Given the event type and the number of bytes of variable length data that
6780 + will follow the event, dm_evt_create_tevp() creates a dm_tokevent_t
6781 + structure to hold the event and initializes all the common event fields.
6782 +
6783 + No locking is required for this routine because the caller is an event
6784 + thread, and is therefore the only thread that can see the event.
6785 +*/
6786 +
6787 +dm_tokevent_t *
6788 +dm_evt_create_tevp(
6789 + dm_eventtype_t event,
6790 + int variable_size,
6791 + void **msgpp)
6792 +{
6793 + dm_tokevent_t *tevp;
6794 + int evsize;
6795 +
6796 + switch (event) {
6797 + case DM_EVENT_READ:
6798 + case DM_EVENT_WRITE:
6799 + case DM_EVENT_TRUNCATE:
6800 + evsize = sizeof(dm_data_event_t);
6801 + break;
6802 +
6803 + case DM_EVENT_DESTROY:
6804 + evsize = sizeof(dm_destroy_event_t);
6805 + break;
6806 +
6807 + case DM_EVENT_MOUNT:
6808 + evsize = sizeof(dm_mount_event_t);
6809 + break;
6810 +
6811 + case DM_EVENT_PREUNMOUNT:
6812 + case DM_EVENT_UNMOUNT:
6813 + case DM_EVENT_NOSPACE:
6814 + case DM_EVENT_CREATE:
6815 + case DM_EVENT_REMOVE:
6816 + case DM_EVENT_RENAME:
6817 + case DM_EVENT_SYMLINK:
6818 + case DM_EVENT_LINK:
6819 + case DM_EVENT_POSTCREATE:
6820 + case DM_EVENT_POSTREMOVE:
6821 + case DM_EVENT_POSTRENAME:
6822 + case DM_EVENT_POSTSYMLINK:
6823 + case DM_EVENT_POSTLINK:
6824 + case DM_EVENT_ATTRIBUTE:
6825 + case DM_EVENT_DEBUT: /* currently not supported */
6826 + case DM_EVENT_CLOSE: /* currently not supported */
6827 + evsize = sizeof(dm_namesp_event_t);
6828 + break;
6829 +
6830 + case DM_EVENT_CANCEL: /* currently not supported */
6831 + evsize = sizeof(dm_cancel_event_t);
6832 + break;
6833 +
6834 + case DM_EVENT_USER:
6835 + evsize = 0;
6836 + break;
6837 +
6838 + default:
6839 + panic("dm_create_tevp: called with unknown event type %d\n",
6840 + event);
6841 + }
6842 +
6843 + /* Allocate and initialize an event structure of the correct size. */
6844 +
6845 + tevp = dm_init_tevp(evsize, variable_size);
6846 + if (tevp == NULL)
6847 + return NULL;
6848 + tevp->te_evt_ref = 1;
6849 +
6850 + /* Fields ev_token, ev_sequence, and _link are all filled in when the
6851 + event is queued onto a session. Initialize all other fields here.
6852 + */
6853 +
6854 + tevp->te_msg.ev_type = event;
6855 + tevp->te_msg.ev_data.vd_offset = offsetof(dm_tokevent_t, te_event) -
6856 + offsetof(dm_tokevent_t, te_msg);
6857 + tevp->te_msg.ev_data.vd_length = evsize + variable_size;
6858 +
6859 + /* Give the caller a pointer to the event-specific structure. */
6860 +
6861 + *msgpp = ((char *)&tevp->te_msg + tevp->te_msg.ev_data.vd_offset);
6862 + return(tevp);
6863 +}
6864 +
6865 +
6866 +/* Given a pointer to an event (tevp) and a pointer to a handle_t, look for a
6867 + tdp structure within the event which contains the handle_t. Either verify
6868 + that the event contains the tdp, or optionally add the tdp to the
6869 + event. Called only from application threads.
6870 +
6871 + On entry, tevp->te_lock is held; it is dropped prior to return.
6872 +*/
6873 +
6874 +static int
6875 +dm_app_lookup_tdp(
6876 + dm_handle_t *handlep, /* the handle we are looking for */
6877 + dm_tokevent_t *tevp, /* the event to search for the handle */
6878 + unsigned long *lcp, /* address of active lock cookie */
6879 + short types, /* acceptable object types */
6880 + dm_right_t right, /* minimum right the object must have */
6881 + u_int flags,
6882 + dm_tokdata_t **tdpp) /* if ! NULL, pointer to matching tdp */
6883 +{
6884 + dm_fsys_vector_t *fsys_vector;
6885 + dm_tokdata_t *tdp;
6886 + struct inode *ip;
6887 + int error;
6888 +
6889 + /* Bump the tevp application reference counter so that the event
6890 + can't disappear in case we have to drop the lock for a while.
6891 + */
6892 +
6893 + tevp->te_app_ref++;
6894 + *tdpp = NULL; /* assume failure */
6895 +
6896 + for (;;) {
6897 + /* Look for a matching tdp in the tevp. */
6898 +
6899 + for (tdp = tevp->te_tdp; tdp; tdp = tdp->td_next) {
6900 + if (DM_HANDLE_CMP(&tdp->td_handle, handlep) == 0)
6901 + break;
6902 + }
6903 +
6904 + /* If the tdp exists, but either we need single-thread access
6905 + to the handle and can't get it, or some other thread already
6906 + has single-thread access, then sleep until we can try again.
6907 + */
6908 +
6909 + if (tdp != NULL && tdp->td_app_ref &&
6910 + ((flags & DM_FG_STHREAD) ||
6911 + (tdp->td_flags & DM_TDF_STHREAD))) {
6912 + tevp->te_app_slp++;
6913 + sv_wait(&tevp->te_app_queue, 1,
6914 + &tevp->te_lock, *lcp);
6915 + *lcp = mutex_spinlock(&tevp->te_lock);
6916 + tevp->te_app_slp--;
6917 + continue;
6918 + }
6919 +
6920 + if (tdp != NULL &&
6921 + (tdp->td_vcount > 0 || tdp->td_flags & DM_TDF_EVTREF)) {
6922 + /* We have an existing tdp with a non-zero inode
6923 + reference count. If it's the wrong type, return
6924 + an appropriate errno.
6925 + */
6926 +
6927 + if (!(tdp->td_type & types)) {
6928 + mutex_spinunlock(&tevp->te_lock, *lcp);
6929 + dm_put_tevp(tevp, NULL); /* no destroy events */
6930 + return(-EOPNOTSUPP);
6931 + }
6932 +
6933 + /* If the current access right isn't high enough,
6934 + complain.
6935 + */
6936 +
6937 + if (tdp->td_right < right) {
6938 + mutex_spinunlock(&tevp->te_lock, *lcp);
6939 + dm_put_tevp(tevp, NULL); /* no destroy events */
6940 + return(-EACCES);
6941 + }
6942 +
6943 + /* The handle is acceptable. Increment the tdp
6944 + application and inode references and mark the tdp
6945 + as single-threaded if necessary.
6946 + */
6947 +
6948 + tdp->td_app_ref++;
6949 + if (flags & DM_FG_STHREAD)
6950 + tdp->td_flags |= DM_TDF_STHREAD;
6951 + tdp->td_vcount++;
6952 +
6953 + fsys_vector = dm_fsys_vector(tdp->td_ip);
6954 + (void)fsys_vector->obj_ref_hold(tdp->td_ip);
6955 +
6956 + mutex_spinunlock(&tevp->te_lock, *lcp);
6957 + *tdpp = tdp;
6958 + return(0);
6959 + }
6960 +
6961 + /* If the tdp is not in the tevp or does not have an inode
6962 + reference, check to make sure it is okay to add/update it.
6963 + */
6964 +
6965 + if (flags & DM_FG_MUSTEXIST) {
6966 + mutex_spinunlock(&tevp->te_lock, *lcp);
6967 + dm_put_tevp(tevp, NULL); /* no destroy events */
6968 + return(-EACCES); /* i.e. an insufficient right */
6969 + }
6970 + if (flags & DM_FG_DONTADD) {
6971 + tevp->te_app_ref--;
6972 + mutex_spinunlock(&tevp->te_lock, *lcp);
6973 + return(0);
6974 + }
6975 +
6976 + /* If a tdp structure doesn't yet exist, create one and link
6977 + it into the tevp. Drop the lock while we are doing this as
6978 + zallocs can go to sleep. Once we have the memory, make
6979 + sure that another thread didn't simultaneously add the same
6980 + handle to the same event. If so, toss ours and start over.
6981 + */
6982 +
6983 + if (tdp == NULL) {
6984 + dm_tokdata_t *tmp;
6985 +
6986 + mutex_spinunlock(&tevp->te_lock, *lcp);
6987 +
6988 + tdp = kmem_cache_alloc(dm_tokdata_cachep, GFP_KERNEL);
6989 + if (tdp == NULL){
6990 + printk("%s/%d: kmem_cache_alloc(dm_tokdata_cachep) returned NULL\n", __FUNCTION__, __LINE__);
6991 + return(-ENOMEM);
6992 + }
6993 + memset(tdp, 0, sizeof(*tdp));
6994 +
6995 + *lcp = mutex_spinlock(&tevp->te_lock);
6996 +
6997 + for (tmp = tevp->te_tdp; tmp; tmp = tmp->td_next) {
6998 + if (DM_HANDLE_CMP(&tmp->td_handle, handlep) == 0)
6999 + break;
7000 + }
7001 + if (tmp) {
7002 + kmem_cache_free(dm_tokdata_cachep, tdp);
7003 + continue;
7004 + }
7005 +
7006 + tdp->td_next = tevp->te_tdp;
7007 + tevp->te_tdp = tdp;
7008 + tdp->td_tevp = tevp;
7009 + tdp->td_handle = *handlep;
7010 + }
7011 +
7012 + /* Temporarily single-thread access to the tdp so that other
7013 + threads don't touch it while we are filling the rest of the
7014 + fields in.
7015 + */
7016 +
7017 + tdp->td_app_ref = 1;
7018 + tdp->td_flags |= DM_TDF_STHREAD;
7019 +
7020 + /* Drop the spinlock while we access, validate, and obtain the
7021 + proper rights to the object. This can take a very long time
7022 + if the inode is not in memory, if the filesystem is
7023 + unmounting, or if the request_right() call should block
7024 + because some other tdp or kernel thread is holding a right.
7025 + */
7026 +
7027 + mutex_spinunlock(&tevp->te_lock, *lcp);
7028 +
7029 + if ((ip = dm_handle_to_ip(handlep, &tdp->td_type)) == NULL) {
7030 + error = -EBADF;
7031 + } else {
7032 + tdp->td_vcount = 1;
7033 + tdp->td_ip = ip;
7034 +
7035 + /* The handle is usable. Check that the type of the
7036 + object matches one of the types that the caller
7037 + will accept.
7038 + */
7039 +
7040 + if (!(types & tdp->td_type)) {
7041 + error = -EOPNOTSUPP;
7042 + } else if (right > DM_RIGHT_NULL) {
7043 + /* Attempt to get the rights required by the
7044 + caller. If rights can't be obtained, return
7045 + an error.
7046 + */
7047 +
7048 + fsys_vector = dm_fsys_vector(tdp->td_ip);
7049 + error = fsys_vector->request_right(tdp->td_ip,
7050 + DM_RIGHT_NULL,
7051 + (tdp->td_type == DM_TDT_VFS ?
7052 + DM_FSYS_OBJ : 0),
7053 + DM_RR_WAIT, right);
7054 + if (!error) {
7055 + tdp->td_right = right;
7056 + }
7057 + } else {
7058 + error = 0;
7059 + }
7060 + }
7061 + if (error != 0) {
7062 + dm_put_tevp(tevp, tdp); /* destroy event risk, although tiny */
7063 + return(error);
7064 + }
7065 +
7066 + *lcp = mutex_spinlock(&tevp->te_lock);
7067 +
7068 + /* Wake up any threads which may have seen our tdp while we
7069 + were filling it in.
7070 + */
7071 +
7072 + if (!(flags & DM_FG_STHREAD)) {
7073 + tdp->td_flags &= ~DM_TDF_STHREAD;
7074 + if (tevp->te_app_slp)
7075 + sv_broadcast(&tevp->te_app_queue);
7076 + }
7077 +
7078 + mutex_spinunlock(&tevp->te_lock, *lcp);
7079 + *tdpp = tdp;
7080 + return(0);
7081 + }
7082 +}
7083 +
7084 +
7085 +/* dm_app_get_tdp_by_token() is called whenever the application request
7086 + contains a session ID and contains a token other than DM_NO_TOKEN.
7087 + Most of the callers provide a right that is either DM_RIGHT_SHARED or
7088 + DM_RIGHT_EXCL, but a few of the callers such as dm_obj_ref_hold() may
7089 + specify a right of DM_RIGHT_NULL.
7090 +*/
7091 +
7092 +static int
7093 +dm_app_get_tdp_by_token(
7094 + dm_sessid_t sid, /* an existing session ID */
7095 + void __user *hanp,
7096 + size_t hlen,
7097 + dm_token_t token, /* an existing token */
7098 + short types, /* acceptable object types */
7099 + dm_right_t right, /* minimum right the object must have */
7100 + u_int flags,
7101 + dm_tokdata_t **tdpp)
7102 +{
7103 + dm_tokevent_t *tevp;
7104 + dm_handle_t handle;
7105 + int error;
7106 + unsigned long lc; /* lock cookie */
7107 +
7108 + if (right < DM_RIGHT_NULL || right > DM_RIGHT_EXCL)
7109 + return(-EINVAL);
7110 +
7111 + if ((error = dm_copyin_handle(hanp, hlen, &handle)) != 0)
7112 + return(error);
7113 +
7114 + /* Find and lock the event which corresponds to the specified
7115 + session/token pair.
7116 + */
7117 +
7118 + if ((error = dm_find_msg_and_lock(sid, token, &tevp, &lc)) != 0)
7119 + return(error);
7120 +
7121 + return(dm_app_lookup_tdp(&handle, tevp, &lc, types,
7122 + right, flags, tdpp));
7123 +}
7124 +
7125 +
7126 +/* Function dm_app_get_tdp() must ONLY be called from routines associated with
7127 + application calls, e.g. dm_read_invis, dm_set_disp, etc. It must not be
7128 + called by a thread responsible for generating an event such as
7129 + dm_send_data_event()!
7130 +
7131 + dm_app_get_tdp() is the interface used by all application calls other than
7132 + dm_get_events, dm_respond_event, dm_get_config, dm_get_config_events, and by
7133 + the dm_obj_ref_* and dm_*_right families of requests.
7134 +
7135 + dm_app_get_tdp() converts a sid/hanp/hlen/token quad into a tdp pointer,
7136 + increments the number of active application threads in the event, and
7137 + increments the number of active application threads using the tdp. The
7138 + 'right' parameter must be either DM_RIGHT_SHARED or DM_RIGHT_EXCL. The
7139 + token may either be DM_NO_TOKEN, or can be a token received in a synchronous
7140 + event.
7141 +*/
7142 +
7143 +int
7144 +dm_app_get_tdp(
7145 + dm_sessid_t sid,
7146 + void __user *hanp,
7147 + size_t hlen,
7148 + dm_token_t token,
7149 + short types,
7150 + dm_right_t right, /* minimum right */
7151 + dm_tokdata_t **tdpp)
7152 +{
7153 + dm_session_t *s;
7154 + dm_handle_t handle;
7155 + dm_tokevent_t *tevp;
7156 + int error;
7157 + unsigned long lc; /* lock cookie */
7158 +
7159 + ASSERT(right >= DM_RIGHT_SHARED);
7160 +
7161 + /* If a token other than DM_NO_TOKEN is specified, find the event on
7162 + this session which owns the token and increment its reference count.
7163 + */
7164 +
7165 + if (token != DM_NO_TOKEN) { /* look up existing tokevent struct */
7166 + return(dm_app_get_tdp_by_token(sid, hanp, hlen, token, types,
7167 + right, DM_FG_MUSTEXIST, tdpp));
7168 + }
7169 +
7170 + /* The token is DM_NO_TOKEN. In this case we only want to verify that
7171 + the session ID is valid, and do not need to continue holding the
7172 + session lock after we know that to be true.
7173 + */
7174 +
7175 + if ((error = dm_copyin_handle(hanp, hlen, &handle)) != 0)
7176 + return(error);
7177 +
7178 + if ((error = dm_find_session_and_lock(sid, &s, &lc)) != 0)
7179 + return(error);
7180 + mutex_spinunlock(&s->sn_qlock, lc);
7181 +
7182 + /* When DM_NO_TOKEN is used, we simply block until we can obtain the
7183 + right that we want (since the tevp contains no tdp structures).
7184 + The blocking when we eventually support it will occur within
7185 + fsys_vector->request_right().
7186 + */
7187 +
7188 + tevp = dm_init_tevp(0, 0);
7189 + lc = mutex_spinlock(&tevp->te_lock);
7190 +
7191 + return(dm_app_lookup_tdp(&handle, tevp, &lc, types, right, 0, tdpp));
7192 +}
7193 +
7194 +
7195 +/* dm_get_config_tdp() is only called by dm_get_config() and
7196 + dm_get_config_events(), which neither have a session ID nor a token.
7197 + Both of these calls are supposed to work even if the filesystem is in the
7198 + process of being mounted, as long as the caller only uses handles within
7199 + the mount event.
7200 +*/
7201 +
7202 +int
7203 +dm_get_config_tdp(
7204 + void __user *hanp,
7205 + size_t hlen,
7206 + dm_tokdata_t **tdpp)
7207 +{
7208 + dm_handle_t handle;
7209 + dm_tokevent_t *tevp;
7210 + int error;
7211 + unsigned long lc; /* lock cookie */
7212 +
7213 + if ((error = dm_copyin_handle(hanp, hlen, &handle)) != 0)
7214 + return(error);
7215 +
7216 + tevp = dm_init_tevp(0, 0);
7217 + lc = mutex_spinlock(&tevp->te_lock);
7218 +
7219 + /* Try to use the handle provided by the caller and assume DM_NO_TOKEN.
7220 + This will fail if the filesystem is in the process of being mounted.
7221 + */
7222 +
7223 + error = dm_app_lookup_tdp(&handle, tevp, &lc, DM_TDT_ANY,
7224 + DM_RIGHT_NULL, 0, tdpp);
7225 +
7226 + if (!error) {
7227 + return(0);
7228 + }
7229 +
7230 + /* Perhaps the filesystem is still mounting, in which case we need to
7231 + see if this is one of the handles in the DM_EVENT_MOUNT tevp.
7232 + */
7233 +
7234 + if ((tevp = dm_find_mount_tevp_and_lock(&handle.ha_fsid, &lc)) == NULL)
7235 + return(-EBADF);
7236 +
7237 + return(dm_app_lookup_tdp(&handle, tevp, &lc, DM_TDT_ANY,
7238 + DM_RIGHT_NULL, DM_FG_MUSTEXIST, tdpp));
7239 +}
7240 +
7241 +
7242 +/* dm_put_tdp() is called to release any right held on the inode, and to
7243 + VN_RELE() all references held on the inode. It is the caller's
7244 + responsibility to ensure that no other application threads are using the
7245 + tdp, and if necessary to unlink the tdp from the tevp before calling
7246 + this routine and to free the tdp afterwards.
7247 +*/
7248 +
7249 +static void
7250 +dm_put_tdp(
7251 + dm_tokdata_t *tdp)
7252 +{
7253 + ASSERT(tdp->td_app_ref <= 1);
7254 +
7255 + /* If the application thread is holding a right, or if the event
7256 + thread had a right but it has disappeared because of a dm_pending
7257 + or Cntl-C, then we need to release it here.
7258 + */
7259 +
7260 + if (tdp->td_right != DM_RIGHT_NULL) {
7261 + dm_fsys_vector_t *fsys_vector;
7262 +
7263 + fsys_vector = dm_fsys_vector(tdp->td_ip);
7264 + (void)fsys_vector->release_right(tdp->td_ip, tdp->td_right,
7265 + (tdp->td_type == DM_TDT_VFS ? DM_FSYS_OBJ : 0));
7266 + tdp->td_right = DM_RIGHT_NULL;
7267 + }
7268 +
7269 + /* Given that we wouldn't be here if there was still an event thread,
7270 + this VN_RELE loop has the potential of generating a DM_EVENT_DESTROY
7271 + event if some other thread has unlinked the file.
7272 + */
7273 +
7274 + while (tdp->td_vcount > 0) {
7275 + iput(tdp->td_ip);
7276 + tdp->td_vcount--;
7277 + }
7278 +
7279 + tdp->td_flags &= ~(DM_TDF_HOLD|DM_TDF_RIGHT);
7280 + tdp->td_ip = NULL;
7281 +}
7282 +
7283 +
7284 +/* Function dm_put_tevp() must ONLY be called from routines associated with
7285 + application threads, e.g. dm_read_invis, dm_get_events, etc. It must not be
7286 + called by a thread responsible for generating an event, such as
7287 + dm_send_data_event.
7288 +
7289 + PLEASE NOTE: It is possible for this routine to generate DM_EVENT_DESTROY
7290 + events, because its calls to dm_put_tdp drop inode references, and another
7291 + thread may have already unlinked a file whose inode we are de-referencing.
7292 + This sets the stage for various types of deadlock if the thread calling
7293 + dm_put_tevp is the same thread that calls dm_respond_event! In particular,
7294 + the dm_sent_destroy_event routine needs to obtain the dm_reg_lock,
7295 + dm_session_lock, and sn_qlock in order to queue the destroy event. No
7296 + caller of dm_put_tevp can hold any of these locks!
7297 +
7298 + Other possible deadlocks are that dm_send_destroy_event could block waiting
7299 + for a thread to register for the event using dm_set_disp() and/or
7300 + dm_set_return_on_destroy, or it could block because the session's sn_newq
7301 + is at the dm_max_queued_msgs event limit. The only safe solution
7302 + (unimplemented) is to have a separate kernel thread for each filesystem
7303 + whose only job is to do the inode-dereferencing. That way dm_respond_event
7304 + will not block, so the application can keep calling dm_get_events to read
7305 + events even if the filesystem thread should block. (If the filesystem
7306 + thread blocks, so will all subsequent destroy events for the same
7307 + filesystem.)
7308 +*/
7309 +
7310 +void
7311 +dm_put_tevp(
7312 + dm_tokevent_t *tevp,
7313 + dm_tokdata_t *tdp)
7314 +{
7315 + int free_tdp = 0;
7316 + unsigned long lc; /* lock cookie */
7317 +
7318 + lc = mutex_spinlock(&tevp->te_lock);
7319 +
7320 + if (tdp != NULL) {
7321 + if (tdp->td_vcount > 1 || (tdp->td_flags & DM_TDF_EVTREF)) {
7322 + ASSERT(tdp->td_app_ref > 0);
7323 +
7324 + iput(tdp->td_ip);
7325 + tdp->td_vcount--;
7326 + } else {
7327 + ASSERT(tdp->td_app_ref == 1);
7328 +
7329 + /* The inode reference count is either already at
7330 + zero (e.g. a failed dm_handle_to_ip() call in
7331 + dm_app_lookup_tdp()) or is going to zero. We can't
7332 + hold the lock while we decrement the count because
7333 + we could potentially end up being busy for a long
7334 + time in VOP_INACTIVATE. Use single-threading to
7335 + lock others out while we clean house.
7336 + */
7337 +
7338 + tdp->td_flags |= DM_TDF_STHREAD;
7339 +
7340 + /* WARNING - A destroy event is possible here if we are
7341 + giving up the last reference on an inode which has
7342 + been previously unlinked by some other thread!
7343 + */
7344 +
7345 + mutex_spinunlock(&tevp->te_lock, lc);
7346 + dm_put_tdp(tdp);
7347 + lc = mutex_spinlock(&tevp->te_lock);
7348 +
7349 + /* If this tdp is not one of the original tdps in the
7350 + event, then remove it from the tevp.
7351 + */
7352 +
7353 + if (!(tdp->td_flags & DM_TDF_ORIG)) {
7354 + dm_tokdata_t **tdpp = &tevp->te_tdp;
7355 +
7356 + while (*tdpp && *tdpp != tdp) {
7357 + tdpp = &(*tdpp)->td_next;
7358 + }
7359 + if (*tdpp == NULL) {
7360 + panic("dm_remove_tdp_from_tevp: tdp "
7361 + "%p not in tevp %p\n", tdp,
7362 + tevp);
7363 + }
7364 + *tdpp = tdp->td_next;
7365 + free_tdp++;
7366 + }
7367 + }
7368 +
7369 + /* If this is the last app thread actively using the tdp, clear
7370 + any single-threading and wake up any other app threads who
7371 + might be waiting to use this tdp, single-threaded or
7372 + otherwise.
7373 + */
7374 +
7375 + if (--tdp->td_app_ref == 0) {
7376 + if (tdp->td_flags & DM_TDF_STHREAD) {
7377 + tdp->td_flags &= ~DM_TDF_STHREAD;
7378 + if (tevp->te_app_slp)
7379 + sv_broadcast(&tevp->te_app_queue);
7380 + }
7381 + }
7382 +
7383 + if (free_tdp) {
7384 + kmem_cache_free(dm_tokdata_cachep, tdp);
7385 + }
7386 + }
7387 +
7388 + /* If other application threads are using this token/event, they will
7389 + do the cleanup.
7390 + */
7391 +
7392 + if (--tevp->te_app_ref > 0) {
7393 + mutex_spinunlock(&tevp->te_lock, lc);
7394 + return;
7395 + }
7396 +
7397 + /* If event generation threads are waiting for this thread to go away,
7398 + wake them up and let them do the cleanup.
7399 + */
7400 +
7401 + if (tevp->te_evt_ref > 0) {
7402 + sv_broadcast(&tevp->te_evt_queue);
7403 + mutex_spinunlock(&tevp->te_lock, lc);
7404 + return;
7405 + }
7406 +
7407 + /* This thread is the last active thread using the token/event. No
7408 + lock can be held while we disassemble the tevp because we could
7409 + potentially end up being busy for a long time in VOP_INACTIVATE.
7410 + */
7411 +
7412 + mutex_spinunlock(&tevp->te_lock, lc);
7413 +
7414 + /* WARNING - One or more destroy events are possible here if we are
7415 + giving up references on inodes which have been previously unlinked
7416 + by other kernel threads!
7417 + */
7418 +
7419 + while ((tdp = tevp->te_tdp) != NULL) {
7420 + tevp->te_tdp = tdp->td_next;
7421 + dm_put_tdp(tdp);
7422 + kmem_cache_free(dm_tokdata_cachep, tdp);
7423 + }
7424 + spinlock_destroy(&tevp->te_lock);
7425 + sv_destroy(&tevp->te_evt_queue);
7426 + sv_destroy(&tevp->te_app_queue);
7427 + kfree(tevp);
7428 +}
7429 +
7430 +
7431 +/* No caller of dm_app_put_tevp can hold either of the locks dm_reg_lock,
7432 + dm_session_lock, or any sn_qlock! (See dm_put_tevp for details.)
7433 +*/
7434 +
7435 +void
7436 +dm_app_put_tdp(
7437 + dm_tokdata_t *tdp)
7438 +{
7439 + dm_put_tevp(tdp->td_tevp, tdp);
7440 +}
7441 +
7442 +
7443 +/* dm_change_right is only called if the event thread is the one doing the
7444 + cleanup on a completed event. It looks at the current rights of a tdp
7445 + and compares that with the rights it had on the tdp when the event was
7446 + created. If different, it reaquires the original rights, then transfers
7447 + the rights back to being thread-based.
7448 +*/
7449 +
7450 +static void
7451 +dm_change_right(
7452 + dm_tokdata_t *tdp)
7453 +{
7454 +#ifdef HAVE_DMAPI_RIGHTS
7455 + dm_fsys_vector_t *fsys_vector;
7456 + int error;
7457 + u_int type;
7458 +#endif
7459 +
7460 + /* If the event doesn't have an inode reference, if the original right
7461 + was DM_RIGHT_NULL, or if the rights were never switched from being
7462 + thread-based to tdp-based, then there is nothing to do.
7463 + */
7464 +
7465 + if (!(tdp->td_flags & DM_TDF_EVTREF))
7466 + return;
7467 +
7468 + if (tdp->td_orig_right == DM_RIGHT_NULL)
7469 + return;
7470 +
7471 + /* DEBUG - Need a check here for event-based rights. */
7472 +
7473 +#ifdef HAVE_DMAPI_RIGHTS
7474 + /* The "rights" vectors are stubs now anyway. When they are
7475 + * implemented then bhv locking will have to be sorted out.
7476 + */
7477 +
7478 + /* If the current right is not the same as it was when the event was
7479 + created, first get back the original right.
7480 + */
7481 +
7482 + if (tdp->td_right != tdp->td_orig_right) {
7483 + fsys_vector = dm_fsys_vector(tdp->td_ip);
7484 + type = (tdp->td_type == DM_TDT_VFS ? DM_FSYS_OBJ : 0);
7485 +
7486 + switch (tdp->td_orig_right) {
7487 + case DM_RIGHT_SHARED:
7488 + if (tdp->td_right == DM_RIGHT_EXCL) {
7489 + error = fsys_vector->downgrade_right(
7490 + tdp->td_ip, tdp->td_right, type);
7491 + if (!error)
7492 + break;
7493 + (void)fsys_vector->release_right(tdp->td_ip,
7494 + tdp->td_right, type);
7495 + }
7496 + (void)fsys_vector->request_right(tdp->td_ip,
7497 + tdp->td_right, type, DM_RR_WAIT,
7498 + tdp->td_orig_right);
7499 + break;
7500 +
7501 + case DM_RIGHT_EXCL:
7502 + if (tdp->td_right == DM_RIGHT_SHARED) {
7503 + error = fsys_vector->upgrade_right(tdp->td_ip,
7504 + tdp->td_right, type);
7505 + if (!error)
7506 + break;
7507 + (void)fsys_vector->release_right(tdp->td_ip,
7508 + tdp->td_right, type);
7509 + }
7510 + (void)fsys_vector->request_right(tdp->td_ip,
7511 + tdp->td_right, type, DM_RR_WAIT,
7512 + tdp->td_orig_right);
7513 + break;
7514 + case DM_RIGHT_NULL:
7515 + break;
7516 + }
7517 + }
7518 +#endif
7519 +
7520 + /* We now have back the same level of rights as we had when the event
7521 + was generated. Now transfer the rights from being tdp-based back
7522 + to thread-based.
7523 + */
7524 +
7525 + /* DEBUG - Add a call here to transfer rights back to thread-based. */
7526 +
7527 + /* Finally, update the tdp so that we don't mess with the rights when
7528 + we eventually call dm_put_tdp.
7529 + */
7530 +
7531 + tdp->td_right = DM_RIGHT_NULL;
7532 +}
7533 +
7534 +
7535 +/* This routine is only called by event threads. The calls to dm_put_tdp
7536 + are not a deadlock risk here because this is an event thread, and it is
7537 + okay for such a thread to block on an induced destroy event. Okay, maybe
7538 + there is a slight risk; say that the event contains three inodes all of
7539 + which have DM_RIGHT_EXCL, and say that we are at the dm_max_queued_msgs
7540 + limit, and that the first inode is already unlinked. In that case the
7541 + destroy event will block waiting to be queued, and the application thread
7542 + could happen to reference one of the other locked inodes. Deadlock.
7543 +*/
7544 +
7545 +void
7546 +dm_evt_rele_tevp(
7547 + dm_tokevent_t *tevp,
7548 + int droprights) /* non-zero, evt thread loses rights */
7549 +{
7550 + dm_tokdata_t *tdp;
7551 + unsigned long lc; /* lock cookie */
7552 +
7553 + lc = mutex_spinlock(&tevp->te_lock);
7554 +
7555 + /* If we are here without DM_TEF_FINAL set and with at least one
7556 + application reference still remaining, then one of several
7557 + possibilities is true:
7558 + 1. This is an asynchronous event which has been queued but has not
7559 + yet been delivered, or which is in the process of being delivered.
7560 + 2. This is an unmount event (pseudo-asynchronous) yet to be
7561 + delivered or in the process of being delivered.
7562 + 3. This event had DM_FLAGS_NDELAY specified, and the application
7563 + has sent a dm_pending() reply for the event.
7564 + 4. This is a DM_EVENT_READ, DM_EVENT_WRITE, or DM_EVENT_TRUNCATE
7565 + event and the user typed a Cntl-C.
7566 + In all of these cases, the correct behavior is to leave the
7567 + responsibility of releasing any rights to the application threads
7568 + when they are done.
7569 + */
7570 +
7571 + if (tevp->te_app_ref > 0 && !(tevp->te_flags & DM_TEF_FINAL)) {
7572 + tevp->te_evt_ref--;
7573 + for (tdp = tevp->te_tdp; tdp; tdp = tdp->td_next) {
7574 + if (tdp->td_flags & DM_TDF_EVTREF) {
7575 + tdp->td_flags &= ~DM_TDF_EVTREF;
7576 + if (tdp->td_vcount == 0) {
7577 + tdp->td_ip = NULL;
7578 + }
7579 + }
7580 + }
7581 + mutex_spinunlock(&tevp->te_lock, lc);
7582 + return; /* not the last thread */
7583 + }
7584 +
7585 + /* If the application reference count is non-zero here, that can only
7586 + mean that dm_respond_event() has been called, but the application
7587 + still has one or more threads in the kernel that haven't let go of
7588 + the tevp. In these cases, the event thread must wait until all
7589 + application threads have given up their references, and their
7590 + rights to handles within the event.
7591 + */
7592 +
7593 + while (tevp->te_app_ref) {
7594 + sv_wait(&tevp->te_evt_queue, 1, &tevp->te_lock, lc);
7595 + lc = mutex_spinlock(&tevp->te_lock);
7596 + }
7597 +
7598 + /* This thread is the last active thread using the token/event. Reset
7599 + the rights of any inode that was part of the original event back
7600 + to their initial values before returning to the filesystem. The
7601 + exception is if the event failed (droprights is non-zero), in which
7602 + case we chose to return to the filesystem with all rights released.
7603 + Release the rights on any inode that was not part of the original
7604 + event. Give up all remaining application inode references
7605 + regardless of whether or not the inode was part of the original
7606 + event.
7607 + */
7608 +
7609 + mutex_spinunlock(&tevp->te_lock, lc);
7610 +
7611 + while ((tdp = tevp->te_tdp) != NULL) {
7612 + tevp->te_tdp = tdp->td_next;
7613 + if ((tdp->td_flags & DM_TDF_ORIG) &&
7614 + (tdp->td_flags & DM_TDF_EVTREF) &&
7615 + (!droprights)) {
7616 + dm_change_right(tdp);
7617 + }
7618 + dm_put_tdp(tdp);
7619 + kmem_cache_free(dm_tokdata_cachep, tdp);
7620 + }
7621 + spinlock_destroy(&tevp->te_lock);
7622 + sv_destroy(&tevp->te_evt_queue);
7623 + sv_destroy(&tevp->te_app_queue);
7624 + kfree(tevp);
7625 +}
7626 +
7627 +
7628 +/* dm_obj_ref_hold() is just a fancy way to get an inode reference on an object
7629 + to hold it in kernel memory.
7630 +*/
7631 +
7632 +int
7633 +dm_obj_ref_hold(
7634 + dm_sessid_t sid,
7635 + dm_token_t token,
7636 + void __user *hanp,
7637 + size_t hlen)
7638 +{
7639 + dm_fsys_vector_t *fsys_vector;
7640 + dm_tokdata_t *tdp;
7641 + int error;
7642 +
7643 + error = dm_app_get_tdp_by_token(sid, hanp, hlen, token, DM_TDT_VNO,
7644 + DM_RIGHT_NULL, DM_FG_STHREAD, &tdp);
7645 +
7646 + /* The tdp is single-threaded, so no mutex lock needed for update. */
7647 +
7648 + if (error == 0) {
7649 + if (tdp->td_flags & DM_TDF_HOLD) { /* if already held */
7650 + error = -EBUSY;
7651 + } else {
7652 + tdp->td_flags |= DM_TDF_HOLD;
7653 + tdp->td_vcount++;
7654 +
7655 + fsys_vector = dm_fsys_vector(tdp->td_ip);
7656 + (void)fsys_vector->obj_ref_hold(tdp->td_ip);
7657 + }
7658 + dm_app_put_tdp(tdp);
7659 + }
7660 + return(error);
7661 +}
7662 +
7663 +
7664 +int
7665 +dm_obj_ref_rele(
7666 + dm_sessid_t sid,
7667 + dm_token_t token,
7668 + void __user *hanp,
7669 + size_t hlen)
7670 +{
7671 + dm_tokdata_t *tdp;
7672 + int error;
7673 +
7674 + error = dm_app_get_tdp_by_token(sid, hanp, hlen, token, DM_TDT_VNO,
7675 + DM_RIGHT_NULL, DM_FG_MUSTEXIST|DM_FG_STHREAD, &tdp);
7676 +
7677 + /* The tdp is single-threaded, so no mutex lock needed for update. */
7678 +
7679 + if (error == 0) {
7680 + if (!(tdp->td_flags & DM_TDF_HOLD)) { /* if not held */
7681 + error = -EACCES; /* use the DM_FG_MUSTEXIST errno */
7682 + } else {
7683 + tdp->td_flags &= ~DM_TDF_HOLD;
7684 + iput(tdp->td_ip);
7685 + tdp->td_vcount--;
7686 + }
7687 + dm_app_put_tdp(tdp);
7688 + }
7689 + return(error);
7690 +}
7691 +
7692 +
7693 +int
7694 +dm_obj_ref_query_rvp(
7695 + dm_sessid_t sid,
7696 + dm_token_t token,
7697 + void __user *hanp,
7698 + size_t hlen,
7699 + int *rvp)
7700 +{
7701 + dm_tokdata_t *tdp;
7702 + int error;
7703 +
7704 + error = dm_app_get_tdp_by_token(sid, hanp, hlen, token, DM_TDT_VNO,
7705 + DM_RIGHT_NULL, DM_FG_DONTADD|DM_FG_STHREAD, &tdp);
7706 + if (error != 0)
7707 + return(error);
7708 +
7709 + /* If the request is valid but the handle just isn't present in the
7710 + event or the hold flag isn't set, return zero, else return one.
7711 + */
7712 +
7713 + if (tdp) {
7714 + if (tdp->td_flags & DM_TDF_HOLD) { /* if held */
7715 + *rvp = 1;
7716 + } else {
7717 + *rvp = 0;
7718 + }
7719 + dm_app_put_tdp(tdp);
7720 + } else {
7721 + *rvp = 0;
7722 + }
7723 + return(0);
7724 +}
7725 +
7726 +
7727 +int
7728 +dm_downgrade_right(
7729 + dm_sessid_t sid,
7730 + void __user *hanp,
7731 + size_t hlen,
7732 + dm_token_t token)
7733 +{
7734 + dm_fsys_vector_t *fsys_vector;
7735 + dm_tokdata_t *tdp;
7736 + int error;
7737 +
7738 + error = dm_app_get_tdp_by_token(sid, hanp, hlen, token, DM_TDT_ANY,
7739 + DM_RIGHT_EXCL, DM_FG_MUSTEXIST|DM_FG_STHREAD, &tdp);
7740 + if (error != 0)
7741 + return(error);
7742 +
7743 + /* Attempt the downgrade. Filesystems which support rights but not
7744 + the downgrading of rights will return ENOSYS.
7745 + */
7746 +
7747 + fsys_vector = dm_fsys_vector(tdp->td_ip);
7748 + error = fsys_vector->downgrade_right(tdp->td_ip, tdp->td_right,
7749 + (tdp->td_type == DM_TDT_VFS ? DM_FSYS_OBJ : 0));
7750 +
7751 + /* The tdp is single-threaded, so no mutex lock needed for update. */
7752 +
7753 + if (error == 0)
7754 + tdp->td_right = DM_RIGHT_SHARED;
7755 +
7756 + dm_app_put_tdp(tdp);
7757 + return(error);
7758 +}
7759 +
7760 +
7761 +int
7762 +dm_query_right(
7763 + dm_sessid_t sid,
7764 + void __user *hanp,
7765 + size_t hlen,
7766 + dm_token_t token,
7767 + dm_right_t __user *rightp)
7768 +{
7769 + dm_tokdata_t *tdp;
7770 + dm_right_t right;
7771 + int error;
7772 +
7773 + error = dm_app_get_tdp_by_token(sid, hanp, hlen, token, DM_TDT_ANY,
7774 + DM_RIGHT_NULL, DM_FG_DONTADD|DM_FG_STHREAD, &tdp);
7775 + if (error != 0)
7776 + return(error);
7777 +
7778 + /* Get the current right and copy it to the caller. The tdp is
7779 + single-threaded, so no mutex lock is needed. If the tdp is not in
7780 + the event we are supposed to return DM_RIGHT_NULL in order to be
7781 + compatible with Veritas.
7782 + */
7783 +
7784 + if (tdp) {
7785 + right = tdp->td_right;
7786 + dm_app_put_tdp(tdp);
7787 + } else {
7788 + right = DM_RIGHT_NULL;
7789 + }
7790 + if (copy_to_user(rightp, &right, sizeof(right)))
7791 + return(-EFAULT);
7792 + return(0);
7793 +}
7794 +
7795 +
7796 +int
7797 +dm_release_right(
7798 + dm_sessid_t sid,
7799 + void __user *hanp,
7800 + size_t hlen,
7801 + dm_token_t token)
7802 +{
7803 + dm_fsys_vector_t *fsys_vector;
7804 + dm_tokdata_t *tdp;
7805 + int error;
7806 +
7807 + error = dm_app_get_tdp_by_token(sid, hanp, hlen, token, DM_TDT_ANY,
7808 + DM_RIGHT_SHARED, DM_FG_MUSTEXIST|DM_FG_STHREAD, &tdp);
7809 + if (error != 0)
7810 + return(error);
7811 +
7812 + fsys_vector = dm_fsys_vector(tdp->td_ip);
7813 + error = fsys_vector->release_right(tdp->td_ip, tdp->td_right,
7814 + (tdp->td_type == DM_TDT_VFS ? DM_FSYS_OBJ : 0));
7815 +
7816 + /* The tdp is single-threaded, so no mutex lock needed for update. */
7817 +
7818 + if (error == 0) {
7819 + tdp->td_right = DM_RIGHT_NULL;
7820 + if (tdp->td_flags & DM_TDF_RIGHT) {
7821 + tdp->td_flags &= ~DM_TDF_RIGHT;
7822 + iput(tdp->td_ip);
7823 + tdp->td_vcount--;
7824 + }
7825 + }
7826 +
7827 + dm_app_put_tdp(tdp);
7828 + return(error);
7829 +}
7830 +
7831 +
7832 +int
7833 +dm_request_right(
7834 + dm_sessid_t sid,
7835 + void __user *hanp,
7836 + size_t hlen,
7837 + dm_token_t token,
7838 + u_int flags,
7839 + dm_right_t right)
7840 +{
7841 + dm_fsys_vector_t *fsys_vector;
7842 + dm_tokdata_t *tdp;
7843 + int error;
7844 +
7845 + error = dm_app_get_tdp_by_token(sid, hanp, hlen, token, DM_TDT_ANY,
7846 + DM_RIGHT_NULL, DM_FG_STHREAD, &tdp);
7847 + if (error != 0)
7848 + return(error);
7849 +
7850 + fsys_vector = dm_fsys_vector(tdp->td_ip);
7851 + error = fsys_vector->request_right(tdp->td_ip, tdp->td_right,
7852 + (tdp->td_type == DM_TDT_VFS ? DM_FSYS_OBJ : 0), flags, right);
7853 +
7854 + /* The tdp is single-threaded, so no mutex lock is needed for update.
7855 +
7856 + If this is the first dm_request_right call for this inode, then we
7857 + need to bump the inode reference count for two reasons. First of
7858 + all, it is supposed to be impossible for the file to disappear or
7859 + for the filesystem to be unmounted while a right is held on a file;
7860 + bumping the file's inode reference count ensures this. Second, if
7861 + rights are ever actually implemented, it will most likely be done
7862 + without changes to the on-disk inode, which means that we can't let
7863 + the inode become unreferenced while a right on it is held.
7864 + */
7865 +
7866 + if (error == 0) {
7867 + if (!(tdp->td_flags & DM_TDF_RIGHT)) { /* if first call */
7868 + tdp->td_flags |= DM_TDF_RIGHT;
7869 + tdp->td_vcount++;
7870 + (void)fsys_vector->obj_ref_hold(tdp->td_ip);
7871 + }
7872 + tdp->td_right = right;
7873 + }
7874 +
7875 + dm_app_put_tdp(tdp);
7876 + return(error);
7877 +}
7878 +
7879 +
7880 +int
7881 +dm_upgrade_right(
7882 + dm_sessid_t sid,
7883 + void __user *hanp,
7884 + size_t hlen,
7885 + dm_token_t token)
7886 +{
7887 + dm_fsys_vector_t *fsys_vector;
7888 + dm_tokdata_t *tdp;
7889 + int error;
7890 +
7891 + error = dm_app_get_tdp_by_token(sid, hanp, hlen, token, DM_TDT_ANY,
7892 + DM_RIGHT_SHARED, DM_FG_MUSTEXIST|DM_FG_STHREAD, &tdp);
7893 + if (error != 0)
7894 + return(error);
7895 +
7896 + /* If the object already has the DM_RIGHT_EXCL right, no need to
7897 + attempt an upgrade.
7898 + */
7899 +
7900 + if (tdp->td_right == DM_RIGHT_EXCL) {
7901 + dm_app_put_tdp(tdp);
7902 + return(0);
7903 + }
7904 +
7905 + /* Attempt the upgrade. Filesystems which support rights but not
7906 + the upgrading of rights will return ENOSYS.
7907 + */
7908 +
7909 + fsys_vector = dm_fsys_vector(tdp->td_ip);
7910 + error = fsys_vector->upgrade_right(tdp->td_ip, tdp->td_right,
7911 + (tdp->td_type == DM_TDT_VFS ? DM_FSYS_OBJ : 0));
7912 +
7913 + /* The tdp is single-threaded, so no mutex lock needed for update. */
7914 +
7915 + if (error == 0)
7916 + tdp->td_right = DM_RIGHT_EXCL;
7917 +
7918 + dm_app_put_tdp(tdp);
7919 + return(error);
7920 +}
7921 Index: linux-2.6.26/fs/dmapi/dmapi_session.c
7922 ===================================================================
7923 --- /dev/null
7924 +++ linux-2.6.26/fs/dmapi/dmapi_session.c
7925 @@ -0,0 +1,1825 @@
7926 +/*
7927 + * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
7928 + *
7929 + * This program is free software; you can redistribute it and/or modify it
7930 + * under the terms of version 2 of the GNU General Public License as
7931 + * published by the Free Software Foundation.
7932 + *
7933 + * This program is distributed in the hope that it would be useful, but
7934 + * WITHOUT ANY WARRANTY; without even the implied warranty of
7935 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
7936 + *
7937 + * Further, this software is distributed without any warranty that it is
7938 + * free of the rightful claim of any third person regarding infringement
7939 + * or the like. Any license provided herein, whether implied or
7940 + * otherwise, applies only to this software file. Patent licenses, if
7941 + * any, provided herein do not apply to combinations of this program with
7942 + * other software, or any other product whatsoever.
7943 + *
7944 + * You should have received a copy of the GNU General Public License along
7945 + * with this program; if not, write the Free Software Foundation, Inc., 59
7946 + * Temple Place - Suite 330, Boston MA 02111-1307, USA.
7947 + *
7948 + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
7949 + * Mountain View, CA 94043, or:
7950 + *
7951 + * http://www.sgi.com
7952 + *
7953 + * For further information regarding this notice, see:
7954 + *
7955 + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
7956 + */
7957 +
7958 +#include <linux/init.h>
7959 +#include <linux/proc_fs.h>
7960 +#include <asm/uaccess.h>
7961 +#ifdef CONFIG_PROC_FS
7962 +#include <linux/module.h>
7963 +#endif
7964 +#include "dmapi.h"
7965 +#include "dmapi_kern.h"
7966 +#include "dmapi_private.h"
7967 +
7968 +dm_session_t *dm_sessions = NULL; /* head of session list */
7969 +u_int dm_sessions_active = 0; /* # sessions currently active */
7970 +dm_sessid_t dm_next_sessid = 1; /* next session ID to use */
7971 +lock_t dm_session_lock = SPIN_LOCK_UNLOCKED;/* lock for session list */
7972 +
7973 +dm_token_t dm_next_token = 1; /* next token ID to use */
7974 +dm_sequence_t dm_next_sequence = 1; /* next sequence number to use */
7975 +lock_t dm_token_lock = SPIN_LOCK_UNLOCKED;/* dm_next_token/dm_next_sequence lock */
7976 +
7977 +int dm_max_queued_msgs = 2048; /* max # undelivered msgs/session */
7978 +
7979 +int dm_hash_buckets = 1009; /* prime -- number of buckets */
7980 +
7981 +#define DM_SHASH(sess,inodenum) \
7982 + ((sess)->sn_sesshash + do_mod((inodenum), dm_hash_buckets))
7983 +
7984 +
7985 +#ifdef CONFIG_PROC_FS
7986 +static int
7987 +sessions_read_pfs(char *buffer, char **start, off_t offset,
7988 + int count, int *eof, void *data)
7989 +{
7990 + int len;
7991 + dm_session_t *sessp = (dm_session_t*)data;
7992 +
7993 +#define CHKFULL if(len >= count) break;
7994 +#define ADDBUF(a,b) len += sprintf(buffer + len, a, b); CHKFULL;
7995 +
7996 + len=0;
7997 + while(1){
7998 + ADDBUF("sessp=0x%p\n", sessp);
7999 + ADDBUF("sn_next=0x%p\n", sessp->sn_next);
8000 + ADDBUF("sn_sessid=%d\n", sessp->sn_sessid);
8001 + ADDBUF("sn_flags=%x\n", sessp->sn_flags);
8002 + ADDBUF("sn_qlock=%c\n", '?');
8003 + ADDBUF("sn_readerq=%c\n", '?');
8004 + ADDBUF("sn_writerq=%c\n", '?');
8005 + ADDBUF("sn_readercnt=%u\n", sessp->sn_readercnt);
8006 + ADDBUF("sn_writercnt=%u\n", sessp->sn_writercnt);
8007 +
8008 + ADDBUF("sn_newq.eq_head=0x%p\n", sessp->sn_newq.eq_head);
8009 + ADDBUF("sn_newq.eq_tail=0x%p\n", sessp->sn_newq.eq_tail);
8010 + ADDBUF("sn_newq.eq_count=%d\n", sessp->sn_newq.eq_count);
8011 +
8012 + ADDBUF("sn_delq.eq_head=0x%p\n", sessp->sn_delq.eq_head);
8013 + ADDBUF("sn_delq.eq_tail=0x%p\n", sessp->sn_delq.eq_tail);
8014 + ADDBUF("sn_delq.eq_count=%d\n", sessp->sn_delq.eq_count);
8015 +
8016 + ADDBUF("sn_evt_writerq.eq_head=0x%p\n", sessp->sn_evt_writerq.eq_head);
8017 + ADDBUF("sn_evt_writerq.eq_tail=0x%p\n", sessp->sn_evt_writerq.eq_tail);
8018 + ADDBUF("sn_evt_writerq.eq_count=%d\n", sessp->sn_evt_writerq.eq_count);
8019 +
8020 + ADDBUF("sn_info=\"%s\"\n", sessp->sn_info);
8021 +
8022 + break;
8023 + }
8024 +
8025 + if (offset >= len) {
8026 + *start = buffer;
8027 + *eof = 1;
8028 + return 0;
8029 + }
8030 + *start = buffer + offset;
8031 + if ((len -= offset) > count)
8032 + return count;
8033 + *eof = 1;
8034 +
8035 + return len;
8036 +}
8037 +#endif
8038 +
8039 +
8040 +/* Link a session to the end of the session list. New sessions are always
8041 + added at the end of the list so that dm_enqueue_mount_event() doesn't
8042 + miss a session. The caller must have obtained dm_session_lock before
8043 + calling this routine.
8044 +*/
8045 +
8046 +static void
8047 +link_session(
8048 + dm_session_t *s)
8049 +{
8050 + dm_session_t *tmp;
8051 +
8052 + if ((tmp = dm_sessions) == NULL) {
8053 + dm_sessions = s;
8054 + } else {
8055 + while (tmp->sn_next != NULL)
8056 + tmp = tmp->sn_next;
8057 + tmp->sn_next = s;
8058 + }
8059 + s->sn_next = NULL;
8060 + dm_sessions_active++;
8061 +}
8062 +
8063 +
8064 +/* Remove a session from the session list. The caller must have obtained
8065 + dm_session_lock before calling this routine. unlink_session() should only
8066 + be used in situations where the session is known to be on the dm_sessions
8067 + list; otherwise it panics.
8068 +*/
8069 +
8070 +static void
8071 +unlink_session(
8072 + dm_session_t *s)
8073 +{
8074 + dm_session_t *tmp;
8075 +
8076 + if (dm_sessions == s) {
8077 + dm_sessions = dm_sessions->sn_next;
8078 + } else {
8079 + for (tmp = dm_sessions; tmp; tmp = tmp->sn_next) {
8080 + if (tmp->sn_next == s)
8081 + break;
8082 + }
8083 + if (tmp == NULL) {
8084 + panic("unlink_session: corrupt DMAPI session list, "
8085 + "dm_sessions %p, session %p\n",
8086 + dm_sessions, s);
8087 + }
8088 + tmp->sn_next = s->sn_next;
8089 + }
8090 + s->sn_next = NULL;
8091 + dm_sessions_active--;
8092 +}
8093 +
8094 +
8095 +/* Link an event to the end of an event queue. The caller must have obtained
8096 + the session's sn_qlock before calling this routine.
8097 +*/
8098 +
8099 +void
8100 +dm_link_event(
8101 + dm_tokevent_t *tevp,
8102 + dm_eventq_t *queue)
8103 +{
8104 + if (queue->eq_tail) {
8105 + queue->eq_tail->te_next = tevp;
8106 + queue->eq_tail = tevp;
8107 + } else {
8108 + queue->eq_head = queue->eq_tail = tevp;
8109 + }
8110 + tevp->te_next = NULL;
8111 + queue->eq_count++;
8112 +}
8113 +
8114 +
8115 +/* Remove an event from an event queue. The caller must have obtained the
8116 + session's sn_qlock before calling this routine. dm_unlink_event() should
8117 + only be used in situations where the event is known to be on the queue;
8118 + otherwise it panics.
8119 +*/
8120 +
8121 +void
8122 +dm_unlink_event(
8123 + dm_tokevent_t *tevp,
8124 + dm_eventq_t *queue)
8125 +{
8126 + dm_tokevent_t *tmp;
8127 +
8128 + if (queue->eq_head == tevp) {
8129 + queue->eq_head = tevp->te_next;
8130 + if (queue->eq_head == NULL)
8131 + queue->eq_tail = NULL;
8132 + } else {
8133 + tmp = queue->eq_head;
8134 + while (tmp && tmp->te_next != tevp)
8135 + tmp = tmp->te_next;
8136 + if (tmp == NULL) {
8137 + panic("dm_unlink_event: corrupt DMAPI queue %p, "
8138 + "tevp %p\n", queue, tevp);
8139 + }
8140 + tmp->te_next = tevp->te_next;
8141 + if (tmp->te_next == NULL)
8142 + queue->eq_tail = tmp;
8143 + }
8144 + tevp->te_next = NULL;
8145 + queue->eq_count--;
8146 +}
8147 +
8148 +/* Link a regular file event to a hash bucket. The caller must have obtained
8149 + the session's sn_qlock before calling this routine.
8150 + The tokevent must be for a regular file object--DM_TDT_REG.
8151 +*/
8152 +
8153 +static void
8154 +hash_event(
8155 + dm_session_t *s,
8156 + dm_tokevent_t *tevp)
8157 +{
8158 + dm_sesshash_t *sh;
8159 + dm_ino_t ino;
8160 +
8161 + if (s->sn_sesshash == NULL) {
8162 + s->sn_sesshash = kmalloc(dm_hash_buckets * sizeof(dm_sesshash_t), GFP_KERNEL);
8163 + if (s->sn_sesshash == NULL) {
8164 + printk("%s/%d: kmalloc returned NULL\n", __FUNCTION__, __LINE__);
8165 + return;
8166 + }
8167 + memset(s->sn_sesshash, 0, dm_hash_buckets * sizeof(dm_sesshash_t));
8168 + }
8169 +
8170 + ino = (&tevp->te_tdp->td_handle.ha_fid)->dm_fid_ino;
8171 + sh = DM_SHASH(s, ino);
8172 +
8173 +#ifdef DM_SHASH_DEBUG
8174 + if (sh->h_next == NULL) {
8175 + s->sn_buckets_in_use++;
8176 + if (s->sn_buckets_in_use > s->sn_max_buckets_in_use)
8177 + s->sn_max_buckets_in_use++;
8178 + }
8179 + sh->maxlength++;
8180 + sh->curlength++;
8181 + sh->num_adds++;
8182 +#endif
8183 +
8184 + tevp->te_flags |= DM_TEF_HASHED;
8185 + tevp->te_hashnext = sh->h_next;
8186 + sh->h_next = tevp;
8187 +}
8188 +
8189 +
8190 +/* Remove a regular file event from a hash bucket. The caller must have
8191 + obtained the session's sn_qlock before calling this routine.
8192 + The tokevent must be for a regular file object--DM_TDT_REG.
8193 +*/
8194 +
8195 +static void
8196 +unhash_event(
8197 + dm_session_t *s,
8198 + dm_tokevent_t *tevp)
8199 +{
8200 + dm_sesshash_t *sh;
8201 + dm_tokevent_t *tmp;
8202 + dm_ino_t ino;
8203 +
8204 + if (s->sn_sesshash == NULL)
8205 + return;
8206 +
8207 + ino = (&tevp->te_tdp->td_handle.ha_fid)->dm_fid_ino;
8208 + sh = DM_SHASH(s, ino);
8209 +
8210 + if (sh->h_next == tevp) {
8211 + sh->h_next = tevp->te_hashnext; /* leap frog */
8212 + } else {
8213 + tmp = sh->h_next;
8214 + while (tmp->te_hashnext != tevp) {
8215 + tmp = tmp->te_hashnext;
8216 + }
8217 + tmp->te_hashnext = tevp->te_hashnext; /* leap frog */
8218 + }
8219 + tevp->te_hashnext = NULL;
8220 + tevp->te_flags &= ~DM_TEF_HASHED;
8221 +
8222 +#ifdef DM_SHASH_DEBUG
8223 + if (sh->h_next == NULL)
8224 + s->sn_buckets_in_use--;
8225 + sh->curlength--;
8226 + sh->num_dels++;
8227 +#endif
8228 +}
8229 +
8230 +
8231 +/* Determine if this is a repeat event. The caller MUST be holding
8232 + the session lock.
8233 + The tokevent must be for a regular file object--DM_TDT_REG.
8234 + Returns:
8235 + 0 == match not found
8236 + 1 == match found
8237 +*/
8238 +
8239 +static int
8240 +repeated_event(
8241 + dm_session_t *s,
8242 + dm_tokevent_t *tevp)
8243 +{
8244 + dm_sesshash_t *sh;
8245 + dm_data_event_t *d_event1;
8246 + dm_data_event_t *d_event2;
8247 + dm_tokevent_t *tevph;
8248 + dm_ino_t ino1;
8249 + dm_ino_t ino2;
8250 +
8251 + if ((!s->sn_newq.eq_tail) && (!s->sn_delq.eq_tail)) {
8252 + return(0);
8253 + }
8254 + if (s->sn_sesshash == NULL) {
8255 + return(0);
8256 + }
8257 +
8258 + ino1 = (&tevp->te_tdp->td_handle.ha_fid)->dm_fid_ino;
8259 + sh = DM_SHASH(s, ino1);
8260 +
8261 + if (sh->h_next == NULL) {
8262 + /* bucket is empty, no match here */
8263 + return(0);
8264 + }
8265 +
8266 + d_event1 = (dm_data_event_t *)((char *)&tevp->te_msg + tevp->te_msg.ev_data.vd_offset);
8267 + tevph = sh->h_next;
8268 + while (tevph) {
8269 + /* find something with the same event type and handle type */
8270 + if ((tevph->te_msg.ev_type == tevp->te_msg.ev_type) &&
8271 + (tevph->te_tdp->td_type == tevp->te_tdp->td_type)) {
8272 +
8273 + ino2 = (&tevp->te_tdp->td_handle.ha_fid)->dm_fid_ino;
8274 + d_event2 = (dm_data_event_t *)((char *)&tevph->te_msg + tevph->te_msg.ev_data.vd_offset);
8275 +
8276 + /* If the two events are operating on the same file,
8277 + and the same part of that file, then we have a
8278 + match.
8279 + */
8280 + if ((ino1 == ino2) &&
8281 + (d_event2->de_offset == d_event1->de_offset) &&
8282 + (d_event2->de_length == d_event1->de_length)) {
8283 + /* found a match */
8284 +#ifdef DM_SHASH_DEBUG
8285 + sh->dup_hits++;
8286 +#endif
8287 + return(1);
8288 + }
8289 + }
8290 + tevph = tevph->te_hashnext;
8291 + }
8292 +
8293 + /* No match found */
8294 + return(0);
8295 +}
8296 +
8297 +
8298 +/* Return a pointer to a session given its session ID, or EINVAL if no session
8299 + has the session ID (per the DMAPI spec). The caller must have obtained
8300 + dm_session_lock before calling this routine.
8301 +*/
8302 +
8303 +static int
8304 +dm_find_session(
8305 + dm_sessid_t sid,
8306 + dm_session_t **sessionpp)
8307 +{
8308 + dm_session_t *s;
8309 +
8310 + for (s = dm_sessions; s; s = s->sn_next) {
8311 + if (s->sn_sessid == sid) {
8312 + *sessionpp = s;
8313 + return(0);
8314 + }
8315 + }
8316 + return(-EINVAL);
8317 +}
8318 +
8319 +
8320 +/* Return a pointer to a locked session given its session ID. '*lcp' is
8321 + used to obtain the session's sn_qlock. Caller is responsible for eventually
8322 + unlocking it.
8323 +*/
8324 +
8325 +int
8326 +dm_find_session_and_lock(
8327 + dm_sessid_t sid,
8328 + dm_session_t **sessionpp,
8329 + unsigned long *lcp) /* addr of returned lock cookie */
8330 +{
8331 + int error;
8332 +
8333 + for (;;) {
8334 + *lcp = mutex_spinlock(&dm_session_lock);
8335 +
8336 + if ((error = dm_find_session(sid, sessionpp)) != 0) {
8337 + mutex_spinunlock(&dm_session_lock, *lcp);
8338 + return(error);
8339 + }
8340 + if (spin_trylock(&(*sessionpp)->sn_qlock)) {
8341 + nested_spinunlock(&dm_session_lock);
8342 + return(0); /* success */
8343 + }
8344 +
8345 + /* If the second lock is not available, drop the first and
8346 + start over. This gives the CPU a chance to process any
8347 + interrupts, and also allows processes which want a sn_qlock
8348 + for a different session to proceed.
8349 + */
8350 +
8351 + mutex_spinunlock(&dm_session_lock, *lcp);
8352 + }
8353 +}
8354 +
8355 +
8356 +/* Return a pointer to the event on the specified session's sn_delq which
8357 + contains the given token. The caller must have obtained the session's
8358 + sn_qlock before calling this routine.
8359 +*/
8360 +
8361 +static int
8362 +dm_find_msg(
8363 + dm_session_t *s,
8364 + dm_token_t token,
8365 + dm_tokevent_t **tevpp)
8366 +{
8367 + dm_tokevent_t *tevp;
8368 +
8369 + if (token <= DM_INVALID_TOKEN)
8370 + return(-EINVAL);
8371 +
8372 + for (tevp = s->sn_delq.eq_head; tevp; tevp = tevp->te_next) {
8373 + if (tevp->te_msg.ev_token == token) {
8374 + *tevpp = tevp;
8375 + return(0);
8376 + }
8377 + }
8378 + return(-ESRCH);
8379 +}
8380 +
8381 +
8382 +/* Given a session ID and token, find the tevp on the specified session's
8383 + sn_delq which corresponds to that session ID/token pair. If a match is
8384 + found, lock the tevp's te_lock and return a pointer to the tevp.
8385 + '*lcp' is used to obtain the tevp's te_lock. The caller is responsible
8386 + for eventually unlocking it.
8387 +*/
8388 +
8389 +int
8390 +dm_find_msg_and_lock(
8391 + dm_sessid_t sid,
8392 + dm_token_t token,
8393 + dm_tokevent_t **tevpp,
8394 + unsigned long *lcp) /* address of returned lock cookie */
8395 +{
8396 + dm_session_t *s;
8397 + int error;
8398 +
8399 + if ((error = dm_find_session_and_lock(sid, &s, lcp)) != 0)
8400 + return(error);
8401 +
8402 + if ((error = dm_find_msg(s, token, tevpp)) != 0) {
8403 + mutex_spinunlock(&s->sn_qlock, *lcp);
8404 + return(error);
8405 + }
8406 + nested_spinlock(&(*tevpp)->te_lock);
8407 + nested_spinunlock(&s->sn_qlock);
8408 + return(0);
8409 +}
8410 +
8411 +
8412 +/* Create a new session, or resume an old session if one is given. */
8413 +
8414 +int
8415 +dm_create_session(
8416 + dm_sessid_t old,
8417 + char __user *info,
8418 + dm_sessid_t __user *new)
8419 +{
8420 + dm_session_t *s;
8421 + dm_sessid_t sid;
8422 + char sessinfo[DM_SESSION_INFO_LEN];
8423 + size_t len;
8424 + int error;
8425 + unsigned long lc; /* lock cookie */
8426 +
8427 + len = strnlen_user(info, DM_SESSION_INFO_LEN-1);
8428 + if (copy_from_user(sessinfo, info, len))
8429 + return(-EFAULT);
8430 + lc = mutex_spinlock(&dm_session_lock);
8431 + sid = dm_next_sessid++;
8432 + mutex_spinunlock(&dm_session_lock, lc);
8433 + if (copy_to_user(new, &sid, sizeof(sid)))
8434 + return(-EFAULT);
8435 +
8436 + if (old == DM_NO_SESSION) {
8437 + s = kmem_cache_alloc(dm_session_cachep, GFP_KERNEL);
8438 + if (s == NULL) {
8439 + printk("%s/%d: kmem_cache_alloc(dm_session_cachep) returned NULL\n", __FUNCTION__, __LINE__);
8440 + return -ENOMEM;
8441 + }
8442 + memset(s, 0, sizeof(*s));
8443 +
8444 + sv_init(&s->sn_readerq, SV_DEFAULT, "dmreadq");
8445 + sv_init(&s->sn_writerq, SV_DEFAULT, "dmwritq");
8446 + spinlock_init(&s->sn_qlock, "sn_qlock");
8447 + } else {
8448 + lc = mutex_spinlock(&dm_session_lock);
8449 + if ((error = dm_find_session(old, &s)) != 0) {
8450 + mutex_spinunlock(&dm_session_lock, lc);
8451 + return(error);
8452 + }
8453 + unlink_session(s);
8454 + mutex_spinunlock(&dm_session_lock, lc);
8455 +#ifdef CONFIG_PROC_FS
8456 + {
8457 + char buf[100];
8458 + sprintf(buf, DMAPI_DBG_PROCFS "/sessions/0x%p", s);
8459 + remove_proc_entry(buf, NULL);
8460 + }
8461 +#endif
8462 + }
8463 + memcpy(s->sn_info, sessinfo, len);
8464 + s->sn_info[len-1] = 0; /* if not NULL, then now 'tis */
8465 + s->sn_sessid = sid;
8466 + lc = mutex_spinlock(&dm_session_lock);
8467 + link_session(s);
8468 + mutex_spinunlock(&dm_session_lock, lc);
8469 +#ifdef CONFIG_PROC_FS
8470 + {
8471 + char buf[100];
8472 + struct proc_dir_entry *entry;
8473 +
8474 + sprintf(buf, DMAPI_DBG_PROCFS "/sessions/0x%p", s);
8475 + entry = create_proc_read_entry(buf, 0, NULL, sessions_read_pfs, s);
8476 + entry->owner = THIS_MODULE;
8477 + }
8478 +#endif
8479 + return(0);
8480 +}
8481 +
8482 +
8483 +int
8484 +dm_destroy_session(
8485 + dm_sessid_t sid)
8486 +{
8487 + dm_session_t *s;
8488 + int error;
8489 + unsigned long lc; /* lock cookie */
8490 +
8491 + /* The dm_session_lock must be held until the session is unlinked. */
8492 +
8493 + lc = mutex_spinlock(&dm_session_lock);
8494 +
8495 + if ((error = dm_find_session(sid, &s)) != 0) {
8496 + mutex_spinunlock(&dm_session_lock, lc);
8497 + return(error);
8498 + }
8499 + nested_spinlock(&s->sn_qlock);
8500 +
8501 + /* The session exists. Check to see if it is still in use. If any
8502 + messages still exist on the sn_newq or sn_delq, or if any processes
8503 + are waiting for messages to arrive on the session, then the session
8504 + must not be destroyed.
8505 + */
8506 +
8507 + if (s->sn_newq.eq_head || s->sn_readercnt || s->sn_delq.eq_head) {
8508 + nested_spinunlock(&s->sn_qlock);
8509 + mutex_spinunlock(&dm_session_lock, lc);
8510 + return(-EBUSY);
8511 + }
8512 +
8513 + /* The session is not in use. Dequeue it from the session chain. */
8514 +
8515 + unlink_session(s);
8516 + nested_spinunlock(&s->sn_qlock);
8517 + mutex_spinunlock(&dm_session_lock, lc);
8518 +
8519 +#ifdef CONFIG_PROC_FS
8520 + {
8521 + char buf[100];
8522 + sprintf(buf, DMAPI_DBG_PROCFS "/sessions/0x%p", s);
8523 + remove_proc_entry(buf, NULL);
8524 + }
8525 +#endif
8526 +
8527 + /* Now clear the sessions's disposition registration, and then destroy
8528 + the session structure.
8529 + */
8530 +
8531 + dm_clear_fsreg(s);
8532 +
8533 + spinlock_destroy(&s->sn_qlock);
8534 + sv_destroy(&s->sn_readerq);
8535 + sv_destroy(&s->sn_writerq);
8536 + if (s->sn_sesshash)
8537 + kfree(s->sn_sesshash);
8538 + kmem_cache_free(dm_session_cachep, s);
8539 + return(0);
8540 +}
8541 +
8542 +
8543 +/*
8544 + * Return a list of all active sessions.
8545 + */
8546 +
8547 +int
8548 +dm_getall_sessions(
8549 + u_int nelem,
8550 + dm_sessid_t __user *sidp,
8551 + u_int __user *nelemp)
8552 +{
8553 + dm_session_t *s;
8554 + u_int sesscnt;
8555 + dm_sessid_t *sesslist;
8556 + unsigned long lc; /* lock cookie */
8557 + int error;
8558 + int i;
8559 +
8560 + /* Loop until we can get the right amount of temp space, being careful
8561 + not to hold a mutex during the allocation. Usually only one trip.
8562 + */
8563 +
8564 + for (;;) {
8565 + if ((sesscnt = dm_sessions_active) == 0) {
8566 + /*if (suword(nelemp, 0))*/
8567 + if (put_user(0, nelemp))
8568 + return(-EFAULT);
8569 + return(0);
8570 + }
8571 + sesslist = kmalloc(sesscnt * sizeof(*sidp), GFP_KERNEL);
8572 + if (sesslist == NULL) {
8573 + printk("%s/%d: kmalloc returned NULL\n", __FUNCTION__, __LINE__);
8574 + return -ENOMEM;
8575 + }
8576 +
8577 + lc = mutex_spinlock(&dm_session_lock);
8578 + if (sesscnt == dm_sessions_active)
8579 + break;
8580 +
8581 + mutex_spinunlock(&dm_session_lock, lc);
8582 + kfree(sesslist);
8583 + }
8584 +
8585 + /* Make a temp copy of the data, then release the mutex. */
8586 +
8587 + for (i = 0, s = dm_sessions; i < sesscnt; i++, s = s->sn_next)
8588 + sesslist[i] = s->sn_sessid;
8589 +
8590 + mutex_spinunlock(&dm_session_lock, lc);
8591 +
8592 + /* Now copy the data to the user. */
8593 +
8594 + if(put_user(sesscnt, nelemp)) {
8595 + error = -EFAULT;
8596 + } else if (sesscnt > nelem) {
8597 + error = -E2BIG;
8598 + } else if (copy_to_user(sidp, sesslist, sesscnt * sizeof(*sidp))) {
8599 + error = -EFAULT;
8600 + } else {
8601 + error = 0;
8602 + }
8603 + kfree(sesslist);
8604 + return(error);
8605 +}
8606 +
8607 +
8608 +/*
8609 + * Return the descriptive string associated with a session.
8610 + */
8611 +
8612 +int
8613 +dm_query_session(
8614 + dm_sessid_t sid,
8615 + size_t buflen,
8616 + void __user *bufp,
8617 + size_t __user *rlenp)
8618 +{
8619 + dm_session_t *s; /* pointer to session given by sid */
8620 + int len; /* length of session info string */
8621 + int error;
8622 + char sessinfo[DM_SESSION_INFO_LEN];
8623 + unsigned long lc; /* lock cookie */
8624 +
8625 + if ((error = dm_find_session_and_lock(sid, &s, &lc)) != 0)
8626 + return(error);
8627 +
8628 + len = strlen(s->sn_info) + 1; /* NULL terminated when created */
8629 + memcpy(sessinfo, s->sn_info, len);
8630 +
8631 + mutex_spinunlock(&s->sn_qlock, lc);
8632 +
8633 + /* Now that the mutex is released, copy the sessinfo to the user. */
8634 +
8635 + if (put_user(len, rlenp)) {
8636 + error = -EFAULT;
8637 + } else if (len > buflen) {
8638 + error = -E2BIG;
8639 + } else if (copy_to_user(bufp, sessinfo, len)) {
8640 + error = -EFAULT;
8641 + } else {
8642 + error = 0;
8643 + }
8644 + return(error);
8645 +}
8646 +
8647 +
8648 +/*
8649 + * Return all of the previously delivered tokens (that is, their IDs)
8650 + * for the given session.
8651 + */
8652 +
8653 +int
8654 +dm_getall_tokens(
8655 + dm_sessid_t sid, /* session obtaining tokens from */
8656 + u_int nelem, /* size of tokenbufp */
8657 + dm_token_t __user *tokenbufp,/* buffer to copy token IDs to */
8658 + u_int __user *nelemp) /* return number copied to tokenbufp */
8659 +{
8660 + dm_session_t *s; /* pointer to session given by sid */
8661 + dm_tokevent_t *tevp; /* event message queue traversal */
8662 + unsigned long lc; /* lock cookie */
8663 + int tokcnt;
8664 + dm_token_t *toklist;
8665 + int error;
8666 + int i;
8667 +
8668 + /* Loop until we can get the right amount of temp space, being careful
8669 + not to hold a mutex during the allocation. Usually only one trip.
8670 + */
8671 +
8672 + for (;;) {
8673 + if ((error = dm_find_session_and_lock(sid, &s, &lc)) != 0)
8674 + return(error);
8675 + tokcnt = s->sn_delq.eq_count;
8676 + mutex_spinunlock(&s->sn_qlock, lc);
8677 +
8678 + if (tokcnt == 0) {
8679 + /*if (suword(nelemp, 0))*/
8680 + if (put_user(0, nelemp))
8681 + return(-EFAULT);
8682 + return(0);
8683 + }
8684 + toklist = kmalloc(tokcnt * sizeof(*tokenbufp), GFP_KERNEL);
8685 + if (toklist == NULL) {
8686 + printk("%s/%d: kmalloc returned NULL\n", __FUNCTION__, __LINE__);
8687 + return -ENOMEM;
8688 + }
8689 +
8690 + if ((error = dm_find_session_and_lock(sid, &s, &lc)) != 0) {
8691 + kfree(toklist);
8692 + return(error);
8693 + }
8694 +
8695 + if (tokcnt == s->sn_delq.eq_count)
8696 + break;
8697 +
8698 + mutex_spinunlock(&s->sn_qlock, lc);
8699 + kfree(toklist);
8700 + }
8701 +
8702 + /* Make a temp copy of the data, then release the mutex. */
8703 +
8704 + tevp = s->sn_delq.eq_head;
8705 + for (i = 0; i < tokcnt; i++, tevp = tevp->te_next)
8706 + toklist[i] = tevp->te_msg.ev_token;
8707 +
8708 + mutex_spinunlock(&s->sn_qlock, lc);
8709 +
8710 + /* Now copy the data to the user. */
8711 +
8712 + if (put_user(tokcnt, nelemp)) {
8713 + error = -EFAULT;
8714 + } else if (tokcnt > nelem) {
8715 + error = -E2BIG;
8716 + } else if (copy_to_user(tokenbufp,toklist,tokcnt*sizeof(*tokenbufp))) {
8717 + error = -EFAULT;
8718 + } else {
8719 + error = 0;
8720 + }
8721 + kfree(toklist);
8722 + return(error);
8723 +}
8724 +
8725 +
8726 +/*
8727 + * Return the message identified by token.
8728 + */
8729 +
8730 +int
8731 +dm_find_eventmsg(
8732 + dm_sessid_t sid,
8733 + dm_token_t token,
8734 + size_t buflen,
8735 + void __user *bufp,
8736 + size_t __user *rlenp)
8737 +{
8738 + dm_tokevent_t *tevp; /* message identified by token */
8739 + int msgsize; /* size of message to copy out */
8740 + void *msg;
8741 + int error;
8742 + unsigned long lc; /* lock cookie */
8743 +
8744 + /* Because some of the events (dm_data_event_t in particular) contain
8745 + __u64 fields, we need to make sure that the buffer provided by the
8746 + caller is aligned such that he can read those fields successfully.
8747 + */
8748 +
8749 + if (((unsigned long)bufp & (sizeof(__u64) - 1)) != 0)
8750 + return(-EFAULT);
8751 +
8752 + /* Allocate the right amount of temp space, being careful not to hold
8753 + a mutex during the allocation.
8754 + */
8755 +
8756 + if ((error = dm_find_msg_and_lock(sid, token, &tevp, &lc)) != 0)
8757 + return(error);
8758 + msgsize = tevp->te_allocsize - offsetof(dm_tokevent_t, te_msg);
8759 + mutex_spinunlock(&tevp->te_lock, lc);
8760 +
8761 + msg = kmalloc(msgsize, GFP_KERNEL);
8762 + if (msg == NULL) {
8763 + printk("%s/%d: kmalloc returned NULL\n", __FUNCTION__, __LINE__);
8764 + return -ENOMEM;
8765 + }
8766 +
8767 + if ((error = dm_find_msg_and_lock(sid, token, &tevp, &lc)) != 0) {
8768 + kfree(msg);
8769 + return(error);
8770 + }
8771 +
8772 + /* Make a temp copy of the data, then release the mutex. */
8773 +
8774 + memcpy(msg, &tevp->te_msg, msgsize);
8775 + mutex_spinunlock(&tevp->te_lock, lc);
8776 +
8777 + /* Now copy the data to the user. */
8778 +
8779 + if (put_user(msgsize,rlenp)) {
8780 + error = -EFAULT;
8781 + } else if (msgsize > buflen) { /* user buffer not big enough */
8782 + error = -E2BIG;
8783 + } else if (copy_to_user( bufp, msg, msgsize )) {
8784 + error = -EFAULT;
8785 + } else {
8786 + error = 0;
8787 + }
8788 + kfree(msg);
8789 + return(error);
8790 +}
8791 +
8792 +
8793 +int
8794 +dm_move_event(
8795 + dm_sessid_t srcsid,
8796 + dm_token_t token,
8797 + dm_sessid_t targetsid,
8798 + dm_token_t __user *rtokenp)
8799 +{
8800 + dm_session_t *s1;
8801 + dm_session_t *s2;
8802 + dm_tokevent_t *tevp;
8803 + int error;
8804 + unsigned long lc; /* lock cookie */
8805 + int hash_it = 0;
8806 +
8807 + lc = mutex_spinlock(&dm_session_lock);
8808 +
8809 + if ((error = dm_find_session(srcsid, &s1)) != 0 ||
8810 + (error = dm_find_session(targetsid, &s2)) != 0 ||
8811 + (error = dm_find_msg(s1, token, &tevp)) != 0) {
8812 + mutex_spinunlock(&dm_session_lock, lc);
8813 + return(error);
8814 + }
8815 + dm_unlink_event(tevp, &s1->sn_delq);
8816 + if (tevp->te_flags & DM_TEF_HASHED) {
8817 + unhash_event(s1, tevp);
8818 + hash_it = 1;
8819 + }
8820 + dm_link_event(tevp, &s2->sn_delq);
8821 + if (hash_it)
8822 + hash_event(s2, tevp);
8823 + mutex_spinunlock(&dm_session_lock, lc);
8824 +
8825 + if (copy_to_user(rtokenp, &token, sizeof(token)))
8826 + return(-EFAULT);
8827 + return(0);
8828 +}
8829 +
8830 +
8831 +/* ARGSUSED */
8832 +int
8833 +dm_pending(
8834 + dm_sessid_t sid,
8835 + dm_token_t token,
8836 + dm_timestruct_t __user *delay) /* unused */
8837 +{
8838 + dm_tokevent_t *tevp;
8839 + int error;
8840 + unsigned long lc; /* lock cookie */
8841 +
8842 + if ((error = dm_find_msg_and_lock(sid, token, &tevp, &lc)) != 0)
8843 + return(error);
8844 +
8845 + tevp->te_flags |= DM_TEF_INTERMED;
8846 + if (tevp->te_evt_ref > 0) /* if event generation threads exist */
8847 + sv_broadcast(&tevp->te_evt_queue);
8848 +
8849 + mutex_spinunlock(&tevp->te_lock, lc);
8850 + return(0);
8851 +}
8852 +
8853 +
8854 +int
8855 +dm_get_events(
8856 + dm_sessid_t sid,
8857 + u_int maxmsgs,
8858 + u_int flags,
8859 + size_t buflen,
8860 + void __user *bufp,
8861 + size_t __user *rlenp)
8862 +{
8863 + dm_session_t *s; /* pointer to session given by sid */
8864 + dm_tokevent_t *tevp; /* next event message on queue */
8865 + int error;
8866 + unsigned long lc1; /* first lock cookie */
8867 + unsigned long lc2 = 0; /* second lock cookie */
8868 + int totalsize;
8869 + int msgsize;
8870 + dm_eventmsg_t __user *prevmsg;
8871 + int prev_msgsize = 0;
8872 + u_int msgcnt;
8873 +
8874 + /* Because some of the events (dm_data_event_t in particular) contain
8875 + __u64 fields, we need to make sure that the buffer provided by the
8876 + caller is aligned such that he can read those fields successfully.
8877 + */
8878 +
8879 + if (((unsigned long)bufp & (sizeof(__u64) - 1)) != 0)
8880 + return(-EFAULT);
8881 +
8882 + /* Find the indicated session and lock it. */
8883 +
8884 + if ((error = dm_find_session_and_lock(sid, &s, &lc1)) != 0)
8885 + return(error);
8886 +
8887 + /* Check for messages on sn_newq. If there aren't any that haven't
8888 + already been grabbed by another process, and if we are supposed to
8889 + to wait until one shows up, then go to sleep interruptibly on the
8890 + sn_readerq semaphore. The session can't disappear out from under
8891 + us as long as sn_readerq is non-zero.
8892 + */
8893 +
8894 + for (;;) {
8895 + int rc;
8896 +
8897 + for (tevp = s->sn_newq.eq_head; tevp; tevp = tevp->te_next) {
8898 + lc2 = mutex_spinlock(&tevp->te_lock);
8899 + if (!(tevp->te_flags & DM_TEF_LOCKED))
8900 + break;
8901 + mutex_spinunlock(&tevp->te_lock, lc2);
8902 + }
8903 + if (tevp)
8904 + break; /* got one! */
8905 +
8906 + if (!(flags & DM_EV_WAIT)) {
8907 + mutex_spinunlock(&s->sn_qlock, lc1);
8908 + return(-EAGAIN);
8909 + }
8910 + s->sn_readercnt++;
8911 +
8912 + sv_wait_sig(&s->sn_readerq, 1, &s->sn_qlock, lc1);
8913 + rc = signal_pending(current);
8914 +
8915 + lc1 = mutex_spinlock(&s->sn_qlock);
8916 + s->sn_readercnt--;
8917 + if (rc) { /* if signal was received */
8918 + mutex_spinunlock(&s->sn_qlock, lc1);
8919 + return(-EINTR);
8920 + }
8921 + }
8922 +
8923 + /* At least one message is available for delivery, and we have both the
8924 + session lock and event lock. Mark the event so that it is not
8925 + grabbed by other daemons, then drop both locks prior copying the
8926 + data to the caller's buffer. Leaving the event on the queue in a
8927 + marked state prevents both the session and the event from
8928 + disappearing out from under us while we don't have the locks.
8929 + */
8930 +
8931 + tevp->te_flags |= DM_TEF_LOCKED;
8932 + mutex_spinunlock(&tevp->te_lock, lc2); /* reverse cookie order */
8933 + mutex_spinunlock(&s->sn_qlock, lc1);
8934 +
8935 + /* Continue to deliver messages until there are no more, the
8936 + user's buffer becomes full, or we hit his maxmsgs limit.
8937 + */
8938 +
8939 + totalsize = 0; /* total bytes transferred to the user */
8940 + prevmsg = NULL;
8941 + msgcnt = 0;
8942 +
8943 + while (tevp) {
8944 + /* Compute the number of bytes to be moved, rounding up to an
8945 + 8-byte boundary so that any subsequent messages will also be
8946 + aligned.
8947 + */
8948 +
8949 + msgsize = tevp->te_allocsize - offsetof(dm_tokevent_t, te_msg);
8950 + msgsize = (msgsize + sizeof(__u64) - 1) & ~(sizeof(__u64) - 1);
8951 + totalsize += msgsize;
8952 +
8953 + /* If it fits, copy the message into the user's buffer and
8954 + update his 'rlenp'. Update the _link pointer for any
8955 + previous message.
8956 + */
8957 +
8958 + if (totalsize > buflen) { /* no more room */
8959 + error = -E2BIG;
8960 + } else if (put_user(totalsize, rlenp)) {
8961 + error = -EFAULT;
8962 + } else if (copy_to_user(bufp, &tevp->te_msg, msgsize)) {
8963 + error = -EFAULT;
8964 + } else if (prevmsg && put_user(prev_msgsize, &prevmsg->_link)) {
8965 + error = -EFAULT;
8966 + } else {
8967 + error = 0;
8968 + }
8969 +
8970 + /* If an error occurred, just unmark the event and leave it on
8971 + the queue for someone else. Note that other daemons may
8972 + have gone to sleep because this event was marked, so wake
8973 + them up. Also, if at least one message has already been
8974 + delivered, then an error here is not really an error.
8975 + */
8976 +
8977 + lc1 = mutex_spinlock(&s->sn_qlock);
8978 + lc2 = mutex_spinlock(&tevp->te_lock);
8979 + tevp->te_flags &= ~DM_TEF_LOCKED; /* drop the mark */
8980 +
8981 + if (error) {
8982 + if (s->sn_readercnt)
8983 + sv_signal(&s->sn_readerq);
8984 +
8985 + mutex_spinunlock(&tevp->te_lock, lc2); /* rev. order */
8986 + mutex_spinunlock(&s->sn_qlock, lc1);
8987 + if (prevmsg)
8988 + return(0);
8989 + if (error == -E2BIG && put_user(totalsize,rlenp))
8990 + error = -EFAULT;
8991 + return(error);
8992 + }
8993 +
8994 + /* The message was successfully delivered. Unqueue it. */
8995 +
8996 + dm_unlink_event(tevp, &s->sn_newq);
8997 +
8998 + /* Wake up the first of any processes waiting for room on the
8999 + sn_newq.
9000 + */
9001 +
9002 + if (s->sn_writercnt)
9003 + sv_signal(&s->sn_writerq);
9004 +
9005 + /* If the message is synchronous, add it to the sn_delq while
9006 + still holding the lock. If it is asynchronous, free it.
9007 + */
9008 +
9009 + if (tevp->te_msg.ev_token != DM_INVALID_TOKEN) { /* synch */
9010 + dm_link_event(tevp, &s->sn_delq);
9011 + mutex_spinunlock(&tevp->te_lock, lc2);
9012 + } else {
9013 + tevp->te_flags |= DM_TEF_FINAL;
9014 + if (tevp->te_flags & DM_TEF_HASHED)
9015 + unhash_event(s, tevp);
9016 + mutex_spinunlock(&tevp->te_lock, lc2);
9017 + dm_put_tevp(tevp, NULL);/* can't cause destroy events */
9018 + }
9019 +
9020 + /* Update our notion of where we are in the user's buffer. If
9021 + he doesn't want any more messages, then stop.
9022 + */
9023 +
9024 + prevmsg = (dm_eventmsg_t __user *)bufp;
9025 + prev_msgsize = msgsize;
9026 + bufp = (char __user *)bufp + msgsize;
9027 +
9028 + msgcnt++;
9029 + if (maxmsgs && msgcnt >= maxmsgs) {
9030 + mutex_spinunlock(&s->sn_qlock, lc1);
9031 + break;
9032 + }
9033 +
9034 + /* While still holding the sn_qlock, see if any additional
9035 + messages are available for delivery.
9036 + */
9037 +
9038 + for (tevp = s->sn_newq.eq_head; tevp; tevp = tevp->te_next) {
9039 + lc2 = mutex_spinlock(&tevp->te_lock);
9040 + if (!(tevp->te_flags & DM_TEF_LOCKED)) {
9041 + tevp->te_flags |= DM_TEF_LOCKED;
9042 + mutex_spinunlock(&tevp->te_lock, lc2);
9043 + break;
9044 + }
9045 + mutex_spinunlock(&tevp->te_lock, lc2);
9046 + }
9047 + mutex_spinunlock(&s->sn_qlock, lc1);
9048 + }
9049 + return(0);
9050 +}
9051 +
9052 +
9053 +/*
9054 + * Remove an event message from the delivered queue, set the returned
9055 + * error where the event generator wants it, and wake up the generator.
9056 + * Also currently have the user side release any locks it holds...
9057 + */
9058 +
9059 +/* ARGSUSED */
9060 +int
9061 +dm_respond_event(
9062 + dm_sessid_t sid,
9063 + dm_token_t token,
9064 + dm_response_t response,
9065 + int reterror,
9066 + size_t buflen, /* unused */
9067 + void __user *respbufp) /* unused */
9068 +{
9069 + dm_session_t *s; /* pointer to session given by sid */
9070 + dm_tokevent_t *tevp; /* event message queue traversal */
9071 + int error;
9072 + unsigned long lc; /* lock cookie */
9073 +
9074 + /* Sanity check the input parameters. */
9075 +
9076 + switch (response) {
9077 + case DM_RESP_CONTINUE: /* continue must have reterror == 0 */
9078 + if (reterror != 0)
9079 + return(-EINVAL);
9080 + break;
9081 + case DM_RESP_ABORT: /* abort must have errno set */
9082 + if (reterror <= 0)
9083 + return(-EINVAL);
9084 + break;
9085 + case DM_RESP_DONTCARE:
9086 + reterror = -1; /* to distinguish DM_RESP_DONTCARE */
9087 + break;
9088 + default:
9089 + return(-EINVAL);
9090 + }
9091 +
9092 + /* Hold session lock until the event is unqueued. */
9093 +
9094 + if ((error = dm_find_session_and_lock(sid, &s, &lc)) != 0)
9095 + return(error);
9096 +
9097 + if ((error = dm_find_msg(s, token, &tevp)) != 0) {
9098 + mutex_spinunlock(&s->sn_qlock, lc);
9099 + return(error);
9100 + }
9101 + nested_spinlock(&tevp->te_lock);
9102 +
9103 + if ((response == DM_RESP_DONTCARE) &&
9104 + (tevp->te_msg.ev_type != DM_EVENT_MOUNT)) {
9105 + error = -EINVAL;
9106 + nested_spinunlock(&tevp->te_lock);
9107 + mutex_spinunlock(&s->sn_qlock, lc);
9108 + } else {
9109 + dm_unlink_event(tevp, &s->sn_delq);
9110 + if (tevp->te_flags & DM_TEF_HASHED)
9111 + unhash_event(s, tevp);
9112 + tevp->te_reply = -reterror; /* linux wants negative errno */
9113 + tevp->te_flags |= DM_TEF_FINAL;
9114 + if (tevp->te_evt_ref)
9115 + sv_broadcast(&tevp->te_evt_queue);
9116 + nested_spinunlock(&tevp->te_lock);
9117 + mutex_spinunlock(&s->sn_qlock, lc);
9118 + error = 0;
9119 +
9120 + /* Absolutely no locks can be held when calling dm_put_tevp! */
9121 +
9122 + dm_put_tevp(tevp, NULL); /* this can generate destroy events */
9123 + }
9124 + return(error);
9125 +}
9126 +
9127 +/* The caller must hold sn_qlock.
9128 + This will return the tokevent locked.
9129 + */
9130 +static dm_tokevent_t *
9131 +__find_match_event_no_waiters_locked(
9132 + dm_tokevent_t *tevp1,
9133 + dm_eventq_t *queue)
9134 +{
9135 + dm_tokevent_t *tevp2, *next_tevp;
9136 + dm_tokdata_t *tdp1 = tevp1->te_tdp;
9137 + dm_tokdata_t *tdp2;
9138 + dm_data_event_t *d_event1;
9139 + dm_data_event_t *d_event2;
9140 +
9141 + d_event1 = (dm_data_event_t *)((char *)&tevp1->te_msg + tevp1->te_msg.ev_data.vd_offset);
9142 +
9143 + for(tevp2 = queue->eq_head; tevp2; tevp2 = next_tevp) {
9144 + nested_spinlock(&tevp2->te_lock);
9145 + next_tevp = tevp2->te_next;
9146 +
9147 + /* Just compare the first tdp's in each--there should
9148 + be just one, if it's the match we want.
9149 + */
9150 + tdp2 = tevp2->te_tdp;
9151 + if ((tevp2->te_msg.ev_type == tevp1->te_msg.ev_type) &&
9152 + (tevp2->te_tdp->td_type == tevp1->te_tdp->td_type) &&
9153 + (tevp2->te_evt_ref == 0) && (tdp2->td_next == NULL) &&
9154 + (memcmp(&tdp1->td_handle, &tdp2->td_handle,
9155 + sizeof(dm_handle_t)) == 0)) {
9156 +
9157 + d_event2 = (dm_data_event_t *)((char *)&tevp2->te_msg + tevp2->te_msg.ev_data.vd_offset);
9158 +
9159 +
9160 + if ((d_event2->de_offset == d_event1->de_offset) &&
9161 + (d_event2->de_length == d_event1->de_length)) {
9162 + /* Match -- return it locked */
9163 + return tevp2;
9164 + }
9165 + }
9166 + nested_spinunlock(&tevp2->te_lock);
9167 + }
9168 + return NULL;
9169 +}
9170 +
9171 +/* The caller must hold the sn_qlock.
9172 + The returned tokevent will be locked with nested_spinlock.
9173 + */
9174 +static dm_tokevent_t *
9175 +find_match_event_no_waiters_locked(
9176 + dm_session_t *s,
9177 + dm_tokevent_t *tevp)
9178 +{
9179 + dm_tokevent_t *tevp2;
9180 +
9181 + if ((!s->sn_newq.eq_tail) && (!s->sn_delq.eq_tail))
9182 + return NULL;
9183 + if (!tevp->te_tdp)
9184 + return NULL;
9185 + if (tevp->te_tdp->td_next) {
9186 + /* If it has multiple tdp's then don't bother trying to
9187 + find a match.
9188 + */
9189 + return NULL;
9190 + }
9191 + tevp2 = __find_match_event_no_waiters_locked(tevp, &s->sn_newq);
9192 + if (tevp2 == NULL)
9193 + tevp2 = __find_match_event_no_waiters_locked(tevp, &s->sn_delq);
9194 + /* returns a locked tokevent */
9195 + return tevp2;
9196 +}
9197 +
9198 +
9199 +
9200 +/* Queue the filled in event message pointed to by tevp on the session s, and
9201 + (if a synchronous event) wait for the reply from the DMAPI application.
9202 + The caller MUST be holding the session lock before calling this routine!
9203 + The session lock is always released upon exit.
9204 + Returns:
9205 + -1 == don't care
9206 + 0 == success (or async event)
9207 + > 0 == errno describing reason for failure
9208 +*/
9209 +
9210 +static int
9211 +dm_enqueue(
9212 + dm_session_t *s,
9213 + unsigned long lc, /* input lock cookie */
9214 + dm_tokevent_t **tevpp, /* in/out parameter */
9215 + int sync,
9216 + int flags,
9217 + int interruptable)
9218 +{
9219 + int is_unmount = 0;
9220 + int is_hashable = 0;
9221 + int reply;
9222 + dm_tokevent_t *tevp = *tevpp;
9223 +
9224 + /* If the caller isn't planning to stick around for the result
9225 + and this request is identical to one that is already on the
9226 + queues then just give the caller an EAGAIN. Release the
9227 + session lock before returning.
9228 +
9229 + We look only at NDELAY requests with an event type of READ,
9230 + WRITE, or TRUNCATE on objects that are regular files.
9231 + */
9232 +
9233 + if ((flags & DM_FLAGS_NDELAY) && DM_EVENT_RDWRTRUNC(tevp) &&
9234 + (tevp->te_tdp->td_type == DM_TDT_REG)) {
9235 + if (repeated_event(s, tevp)) {
9236 + mutex_spinunlock(&s->sn_qlock, lc);
9237 + return -EAGAIN;
9238 + }
9239 + is_hashable = 1;
9240 + }
9241 +
9242 + /* If the caller is a sync event then look for a matching sync
9243 + event. If there is a match and it doesn't currently have
9244 + event threads waiting on it, then we will drop our own
9245 + tokevent and jump on the matching event.
9246 + */
9247 + if (((flags & DM_FLAGS_NDELAY) == 0) && DM_EVENT_RDWRTRUNC(tevp) &&
9248 + (tevp->te_tdp->td_type == DM_TDT_REG)) {
9249 + dm_tokevent_t *tevp2;
9250 + if ((tevp2 = find_match_event_no_waiters_locked(s, tevp))) {
9251 + ASSERT(tevp2->te_evt_ref == 0);
9252 + tevp2->te_evt_ref++;
9253 + nested_spinunlock(&tevp2->te_lock);
9254 + nested_spinlock(&tevp->te_lock);
9255 + tevp->te_evt_ref--;
9256 + nested_spinunlock(&tevp->te_lock);
9257 + mutex_spinunlock(&s->sn_qlock, lc);
9258 + /* All locks have been released */
9259 + dm_evt_rele_tevp(tevp, 1);
9260 + *tevpp = tevp = tevp2;
9261 + goto wait_on_tevp;
9262 + }
9263 + }
9264 +
9265 + if (tevp->te_msg.ev_type == DM_EVENT_UNMOUNT)
9266 + is_unmount = 1;
9267 +
9268 + /* Check for room on sn_newq. If there is no room for new messages,
9269 + then go to sleep on the sn_writerq semaphore. The
9270 + session cannot disappear out from under us as long as sn_writercnt
9271 + is non-zero.
9272 + */
9273 +
9274 + while (s->sn_newq.eq_count >= dm_max_queued_msgs) { /* no room */
9275 + s->sn_writercnt++;
9276 + dm_link_event(tevp, &s->sn_evt_writerq);
9277 + if (interruptable) {
9278 + sv_wait_sig(&s->sn_writerq, 1, &s->sn_qlock, lc);
9279 + if (signal_pending(current)) {
9280 + s->sn_writercnt--;
9281 + return -EINTR;
9282 + }
9283 + } else {
9284 + sv_wait(&s->sn_writerq, 1, &s->sn_qlock, lc);
9285 + }
9286 + lc = mutex_spinlock(&s->sn_qlock);
9287 + s->sn_writercnt--;
9288 + dm_unlink_event(tevp, &s->sn_evt_writerq);
9289 +#ifdef HAVE_DM_QUEUE_FLUSH
9290 + /* We hold the sn_qlock, from here to after we get into
9291 + * the sn_newq. Any thread going through
9292 + * dm_release_threads() looking for us is already past us
9293 + * and has set the DM_TEF_FLUSH flag for us or is blocked on
9294 + * sn_qlock and will find us in sn_newq after we release
9295 + * the sn_qlock.
9296 + * We check for dop->flushing anyway, in case the
9297 + * dm_release_threads() already completed before we
9298 + * could enter dmapi.
9299 + */
9300 + if (!sync) {
9301 + /* async events are forced into the newq */
9302 + break;
9303 + }
9304 + if (tevp->te_flags & DM_TEF_FLUSH) {
9305 + mutex_spinunlock(&s->sn_qlock, lc);
9306 + return tevp->te_reply;
9307 + }
9308 + else {
9309 + struct filesystem_dmapi_operations *dops;
9310 + dm_tokdata_t *tdp;
9311 + int errno = 0;
9312 +
9313 + nested_spinlock(&tevp->te_lock);
9314 + for (tdp = tevp->te_tdp; tdp; tdp = tdp->td_next) {
9315 + if (tdp->td_ip) {
9316 + dops = dm_fsys_ops(tdp->td_ip->i_sb);
9317 + ASSERT(dops);
9318 + if (dops->flushing)
9319 + errno = dops->flushing(tdp->td_ip);
9320 + if (errno) {
9321 + nested_spinunlock(&tevp->te_lock);
9322 + mutex_spinunlock(&s->sn_qlock, lc);
9323 + return errno;
9324 + }
9325 + }
9326 + }
9327 + nested_spinunlock(&tevp->te_lock);
9328 + }
9329 +#endif /* HAVE_DM_QUEUE_FLUSH */
9330 + }
9331 +
9332 + /* Assign a sequence number and token to the event and bump the
9333 + application reference count by one. We don't need 'te_lock' here
9334 + because this thread is still the only thread that can see the event.
9335 + */
9336 +
9337 + nested_spinlock(&dm_token_lock);
9338 + tevp->te_msg.ev_sequence = dm_next_sequence++;
9339 + if (sync) {
9340 + tevp->te_msg.ev_token = dm_next_token++;
9341 + } else {
9342 + tevp->te_msg.ev_token = DM_INVALID_TOKEN;
9343 + }
9344 + nested_spinunlock(&dm_token_lock);
9345 +
9346 + tevp->te_app_ref++;
9347 +
9348 + /* Room exists on the sn_newq queue, so add this request. If the
9349 + queue was previously empty, wake up the first of any processes
9350 + that are waiting for an event.
9351 + */
9352 +
9353 + dm_link_event(tevp, &s->sn_newq);
9354 + if (is_hashable)
9355 + hash_event(s, tevp);
9356 +
9357 + if (s->sn_readercnt)
9358 + sv_signal(&s->sn_readerq);
9359 +
9360 + mutex_spinunlock(&s->sn_qlock, lc);
9361 +
9362 + /* Now that the message is queued, processes issuing asynchronous
9363 + events or DM_EVENT_UNMOUNT events are ready to continue.
9364 + */
9365 +
9366 + if (!sync || is_unmount)
9367 + return 0;
9368 +
9369 + /* Synchronous requests wait until a final reply is received. If the
9370 + caller supplied the DM_FLAGS_NDELAY flag, the process will return
9371 + EAGAIN if dm_pending() sets DM_TEF_INTERMED. We also let users
9372 + Cntl-C out of a read, write, and truncate requests.
9373 + */
9374 +
9375 +wait_on_tevp:
9376 + lc = mutex_spinlock(&tevp->te_lock);
9377 +
9378 + while (!(tevp->te_flags & DM_TEF_FINAL)) {
9379 + if ((tevp->te_flags & DM_TEF_INTERMED) &&
9380 + (flags & DM_FLAGS_NDELAY)) {
9381 + mutex_spinunlock(&tevp->te_lock, lc);
9382 + return -EAGAIN;
9383 + }
9384 + if (tevp->te_msg.ev_type == DM_EVENT_READ ||
9385 + tevp->te_msg.ev_type == DM_EVENT_WRITE ||
9386 + tevp->te_msg.ev_type == DM_EVENT_TRUNCATE) {
9387 + sv_wait_sig(&tevp->te_evt_queue, 1, &tevp->te_lock, lc);
9388 + if (signal_pending(current)){
9389 + return -EINTR;
9390 + }
9391 + } else {
9392 + sv_wait(&tevp->te_evt_queue, 1, &tevp->te_lock, lc);
9393 + }
9394 + lc = mutex_spinlock(&tevp->te_lock);
9395 +#ifdef HAVE_DM_QUEUE_FLUSH
9396 + /* Did we pop out because of queue flushing? */
9397 + if (tevp->te_flags & DM_TEF_FLUSH) {
9398 + mutex_spinunlock(&tevp->te_lock, lc);
9399 + return tevp->te_reply;
9400 + }
9401 +#endif /* HAVE_DM_QUEUE_FLUSH */
9402 + }
9403 +
9404 + /* Return both the tevp and the reply which was stored in the tevp by
9405 + dm_respond_event. The tevp structure has already been removed from
9406 + the reply queue by this point in dm_respond_event().
9407 + */
9408 +
9409 + reply = tevp->te_reply;
9410 + mutex_spinunlock(&tevp->te_lock, lc);
9411 + return reply;
9412 +}
9413 +
9414 +
9415 +/* The filesystem is guaranteed to stay mounted while this event is
9416 + outstanding.
9417 +*/
9418 +
9419 +int
9420 +dm_enqueue_normal_event(
9421 + struct super_block *sb,
9422 + dm_tokevent_t **tevpp,
9423 + int flags)
9424 +{
9425 + dm_session_t *s;
9426 + int error;
9427 + int sync;
9428 + unsigned long lc; /* lock cookie */
9429 +
9430 + switch ((*tevpp)->te_msg.ev_type) {
9431 + case DM_EVENT_READ:
9432 + case DM_EVENT_WRITE:
9433 + case DM_EVENT_TRUNCATE:
9434 + case DM_EVENT_PREUNMOUNT:
9435 + case DM_EVENT_UNMOUNT:
9436 + case DM_EVENT_NOSPACE:
9437 + case DM_EVENT_CREATE:
9438 + case DM_EVENT_REMOVE:
9439 + case DM_EVENT_RENAME:
9440 + case DM_EVENT_SYMLINK:
9441 + case DM_EVENT_LINK:
9442 + case DM_EVENT_DEBUT: /* not currently supported */
9443 + sync = 1;
9444 + break;
9445 +
9446 + case DM_EVENT_DESTROY:
9447 + case DM_EVENT_POSTCREATE:
9448 + case DM_EVENT_POSTREMOVE:
9449 + case DM_EVENT_POSTRENAME:
9450 + case DM_EVENT_POSTSYMLINK:
9451 + case DM_EVENT_POSTLINK:
9452 + case DM_EVENT_ATTRIBUTE:
9453 + case DM_EVENT_CLOSE: /* not currently supported */
9454 + case DM_EVENT_CANCEL: /* not currently supported */
9455 + sync = 0;
9456 + break;
9457 +
9458 + default:
9459 + return(-EIO); /* garbage event number */
9460 + }
9461 +
9462 + /* Wait until a session selects disposition for the event. The session
9463 + is locked upon return from dm_waitfor_disp_session().
9464 + */
9465 +
9466 + if ((error = dm_waitfor_disp_session(sb, *tevpp, &s, &lc)) != 0)
9467 + return(error);
9468 +
9469 + return(dm_enqueue(s, lc, tevpp, sync, flags, 0));
9470 +}
9471 +
9472 +
9473 +/* Traverse the session list checking for sessions with the WANTMOUNT flag
9474 + set. When one is found, send it the message. Possible responses to the
9475 + message are one of DONTCARE, CONTINUE, or ABORT. The action taken in each
9476 + case is:
9477 + DONTCARE (-1) - Send the event to the next session with WANTMOUNT set
9478 + CONTINUE ( 0) - Proceed with the mount, errno zero.
9479 + ABORT (>0) - Fail the mount, return the returned errno.
9480 +
9481 + The mount request is sent to sessions in ascending session ID order.
9482 + Since the session list can change dramatically while this process is
9483 + sleeping in dm_enqueue(), this routine must use session IDs rather than
9484 + session pointers when keeping track of where it is in the list. Since
9485 + new sessions are always added at the end of the queue, and have increasing
9486 + session ID values, we don't have to worry about missing any session.
9487 +*/
9488 +
9489 +int
9490 +dm_enqueue_mount_event(
9491 + struct super_block *sb,
9492 + dm_tokevent_t *tevp)
9493 +{
9494 + dm_session_t *s;
9495 + dm_sessid_t sid;
9496 + int error;
9497 + unsigned long lc; /* lock cookie */
9498 +
9499 + /* Make the mounting filesystem visible to other DMAPI calls. */
9500 +
9501 + if ((error = dm_add_fsys_entry(sb, tevp)) != 0){
9502 + return(error);
9503 + }
9504 +
9505 + /* Walk through the session list presenting the mount event to each
9506 + session that is interested until a session accepts or rejects it,
9507 + or until all sessions ignore it.
9508 + */
9509 +
9510 + for (sid = DM_NO_SESSION, error = 1; error > 0; sid = s->sn_sessid) {
9511 +
9512 + lc = mutex_spinlock(&dm_session_lock);
9513 + for (s = dm_sessions; s; s = s->sn_next) {
9514 + if (s->sn_sessid > sid && s->sn_flags & DM_SN_WANTMOUNT) {
9515 + nested_spinlock(&s->sn_qlock);
9516 + nested_spinunlock(&dm_session_lock);
9517 + break;
9518 + }
9519 + }
9520 + if (s == NULL) {
9521 + mutex_spinunlock(&dm_session_lock, lc);
9522 + break; /* noone wants it; proceed with mount */
9523 + }
9524 + error = dm_enqueue(s, lc, &tevp, 1, 0, 0);
9525 + }
9526 +
9527 + /* If the mount will be allowed to complete, then update the fsrp entry
9528 + accordingly. If the mount is to be aborted, remove the fsrp entry.
9529 + */
9530 +
9531 + if (error >= 0) {
9532 + dm_change_fsys_entry(sb, DM_STATE_MOUNTED);
9533 + error = 0;
9534 + } else {
9535 + dm_remove_fsys_entry(sb);
9536 + }
9537 + return(error);
9538 +}
9539 +
9540 +int
9541 +dm_enqueue_sendmsg_event(
9542 + dm_sessid_t targetsid,
9543 + dm_tokevent_t *tevp,
9544 + int sync)
9545 +{
9546 + dm_session_t *s;
9547 + int error;
9548 + unsigned long lc; /* lock cookie */
9549 +
9550 + if ((error = dm_find_session_and_lock(targetsid, &s, &lc)) != 0)
9551 + return(error);
9552 +
9553 + return(dm_enqueue(s, lc, &tevp, sync, 0, 1));
9554 +}
9555 +
9556 +
9557 +dm_token_t
9558 +dm_enqueue_user_event(
9559 + dm_sessid_t sid,
9560 + dm_tokevent_t *tevp,
9561 + dm_token_t *tokenp)
9562 +{
9563 + dm_session_t *s;
9564 + int error;
9565 + unsigned long lc; /* lock cookie */
9566 +
9567 + /* Atomically find and lock the session whose session id is 'sid'. */
9568 +
9569 + if ((error = dm_find_session_and_lock(sid, &s, &lc)) != 0)
9570 + return(error);
9571 +
9572 + /* Assign a sequence number and token to the event, bump the
9573 + application reference count by one, and decrement the event
9574 + count because the caller gives up all ownership of the event.
9575 + We don't need 'te_lock' here because this thread is still the
9576 + only thread that can see the event.
9577 + */
9578 +
9579 + nested_spinlock(&dm_token_lock);
9580 + tevp->te_msg.ev_sequence = dm_next_sequence++;
9581 + *tokenp = tevp->te_msg.ev_token = dm_next_token++;
9582 + nested_spinunlock(&dm_token_lock);
9583 +
9584 + tevp->te_flags &= ~(DM_TEF_INTERMED|DM_TEF_FINAL);
9585 + tevp->te_app_ref++;
9586 + tevp->te_evt_ref--;
9587 +
9588 + /* Add the request to the tail of the sn_delq. Now it's visible. */
9589 +
9590 + dm_link_event(tevp, &s->sn_delq);
9591 + mutex_spinunlock(&s->sn_qlock, lc);
9592 +
9593 + return(0);
9594 +}
9595 +
9596 +#ifdef HAVE_DM_QUEUE_FLUSH
9597 +/* If inode is non-null, find any tdp referencing that inode and flush the
9598 + * thread waiting on that inode and set DM_TEF_FLUSH for that tokevent.
9599 + * Otherwise, if inode is null, find any tdp referencing the specified fsid
9600 + * and flush that thread and set DM_TEF_FLUSH for that tokevent.
9601 + */
9602 +static int
9603 +dm_flush_events(
9604 + dm_session_t *s,
9605 + dm_fsid_t *fsidp,
9606 + struct inode *inode, /* may be null */
9607 + dm_eventq_t *queue,
9608 + int is_writerq,
9609 + int errno)
9610 +{
9611 + dm_tokevent_t *tevp, *next_tevp;
9612 + dm_tokdata_t *tdp;
9613 + int found_events = 0;
9614 +
9615 + ASSERT(fsidp);
9616 + for (tevp = queue->eq_head; tevp; tevp = next_tevp) {
9617 + nested_spinlock(&tevp->te_lock);
9618 + next_tevp = tevp->te_next;
9619 +
9620 + for (tdp = tevp->te_tdp; tdp; tdp = tdp->td_next) {
9621 + if( inode ) {
9622 + if( tdp->td_ip == inode ) {
9623 + break;
9624 + }
9625 + }
9626 + else if(memcmp(fsidp, &tdp->td_handle.ha_fsid, sizeof(*fsidp)) == 0) {
9627 + break;
9628 + }
9629 + }
9630 +
9631 + if (tdp != NULL) {
9632 + /* found a handle reference in this event */
9633 + ++found_events;
9634 + tevp->te_flags |= DM_TEF_FLUSH;
9635 +
9636 + /* Set the reply value, unless dm_get_events is
9637 + already on this one.
9638 + */
9639 + if (! (tevp->te_flags & DM_TEF_LOCKED))
9640 + tevp->te_reply = errno;
9641 +
9642 + /* If it is on the sn_evt_writerq or is being
9643 + used by dm_get_events then we're done with it.
9644 + */
9645 + if (is_writerq || (tevp->te_flags & DM_TEF_LOCKED)) {
9646 + nested_spinunlock(&tevp->te_lock);
9647 + continue;
9648 + }
9649 +
9650 + /* If there is a thread waiting on a synchronous
9651 + event then be like dm_respond_event.
9652 + */
9653 +
9654 + if ((tevp->te_evt_ref) &&
9655 + (tevp->te_msg.ev_token != DM_INVALID_TOKEN)) {
9656 +
9657 + tevp->te_flags |= DM_TEF_FINAL;
9658 + dm_unlink_event(tevp, queue);
9659 + if (tevp->te_flags & DM_TEF_HASHED)
9660 + unhash_event(s, tevp);
9661 + sv_broadcast(&tevp->te_evt_queue);
9662 + nested_spinunlock(&tevp->te_lock);
9663 + dm_put_tevp(tevp, NULL);
9664 + continue;
9665 + }
9666 + }
9667 + nested_spinunlock(&tevp->te_lock);
9668 + }
9669 +
9670 + return(found_events);
9671 +}
9672 +
9673 +
9674 +/* If inode is non-null then find any threads that have a reference to that
9675 + * inode and flush them with the specified errno.
9676 + * Otherwise,if inode is null, then find any threads that have a reference
9677 + * to that sb and flush them with the specified errno.
9678 + * We look for these threads in each session's sn_evt_writerq, sn_newq,
9679 + * and sn_delq.
9680 + */
9681 +int
9682 +dm_release_threads(
9683 + struct super_block *sb,
9684 + struct inode *inode, /* may be null */
9685 + int errno)
9686 +{
9687 + dm_sessid_t sid;
9688 + dm_session_t *s;
9689 + unsigned long lc;
9690 + u_int sesscnt;
9691 + dm_sessid_t *sidlist;
9692 + int i;
9693 + int found_events = 0;
9694 + dm_fsid_t fsid;
9695 + struct filesystem_dmapi_operations *dops;
9696 +
9697 + ASSERT(sb);
9698 + dops = dm_fsys_ops(sb);
9699 + ASSERT(dops);
9700 + dops->get_fsid(sb, &fsid);
9701 + dm_release_disp_threads(&fsid, inode, errno);
9702 +
9703 + /* Loop until we can get the right amount of temp space, being careful
9704 + not to hold a mutex during the allocation. Usually only one trip.
9705 + */
9706 +
9707 + for (;;) {
9708 + lc = mutex_spinlock(&dm_session_lock);
9709 + sesscnt = dm_sessions_active;
9710 + mutex_spinunlock(&dm_session_lock, lc);
9711 +
9712 + if (sesscnt == 0)
9713 + return 0;
9714 +
9715 + sidlist = kmalloc(sesscnt * sizeof(sid), GFP_KERNEL);
9716 +
9717 + lc = mutex_spinlock(&dm_session_lock);
9718 + if (sesscnt == dm_sessions_active)
9719 + break;
9720 +
9721 + mutex_spinunlock(&dm_session_lock, lc);
9722 + kfree(sidlist);
9723 + }
9724 +
9725 + for (i = 0, s = dm_sessions; i < sesscnt; i++, s = s->sn_next)
9726 + sidlist[i] = s->sn_sessid;
9727 +
9728 + mutex_spinunlock(&dm_session_lock, lc);
9729 +
9730 +
9731 + for (i = 0; i < sesscnt; i++) {
9732 + sid = sidlist[i];
9733 + if( dm_find_session_and_lock( sid, &s, &lc ) == 0 ){
9734 + found_events = dm_flush_events( s, &fsid, inode,
9735 + &s->sn_evt_writerq, 1,
9736 + errno );
9737 + if (found_events)
9738 + sv_broadcast(&s->sn_writerq);
9739 +
9740 + dm_flush_events(s, &fsid, inode, &s->sn_newq, 0, errno);
9741 + dm_flush_events(s, &fsid, inode, &s->sn_delq, 0, errno);
9742 +
9743 + mutex_spinunlock( &s->sn_qlock, lc );
9744 + }
9745 + }
9746 + kfree(sidlist);
9747 +
9748 + return 0;
9749 +}
9750 +#endif /* HAVE_DM_QUEUE_FLUSH */
9751 Index: linux-2.6.26/fs/dmapi/dmapi_sysent.c
9752 ===================================================================
9753 --- /dev/null
9754 +++ linux-2.6.26/fs/dmapi/dmapi_sysent.c
9755 @@ -0,0 +1,805 @@
9756 +/*
9757 + * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
9758 + *
9759 + * This program is free software; you can redistribute it and/or modify it
9760 + * under the terms of version 2 of the GNU General Public License as
9761 + * published by the Free Software Foundation.
9762 + *
9763 + * This program is distributed in the hope that it would be useful, but
9764 + * WITHOUT ANY WARRANTY; without even the implied warranty of
9765 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
9766 + *
9767 + * Further, this software is distributed without any warranty that it is
9768 + * free of the rightful claim of any third person regarding infringement
9769 + * or the like. Any license provided herein, whether implied or
9770 + * otherwise, applies only to this software file. Patent licenses, if
9771 + * any, provided herein do not apply to combinations of this program with
9772 + * other software, or any other product whatsoever.
9773 + *
9774 + * You should have received a copy of the GNU General Public License along
9775 + * with this program; if not, write the Free Software Foundation, Inc., 59
9776 + * Temple Place - Suite 330, Boston MA 02111-1307, USA.
9777 + *
9778 + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
9779 + * Mountain View, CA 94043, or:
9780 + *
9781 + * http://www.sgi.com
9782 + *
9783 + * For further information regarding this notice, see:
9784 + *
9785 + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
9786 + */
9787 +
9788 +/* Data Migration API (DMAPI)
9789 + */
9790 +
9791 +
9792 +/* We're using MISC_MAJOR / MISC_DYNAMIC_MINOR. */
9793 +
9794 +#include <linux/version.h>
9795 +#include <linux/kernel.h>
9796 +#include <linux/miscdevice.h>
9797 +#include <linux/major.h>
9798 +#include <linux/init.h>
9799 +#include <linux/proc_fs.h>
9800 +#include <linux/module.h>
9801 +#include <linux/smp_lock.h>
9802 +
9803 +#include <asm/uaccess.h>
9804 +
9805 +#include "dmapi.h"
9806 +#include "dmapi_kern.h"
9807 +#include "dmapi_private.h"
9808 +
9809 +struct kmem_cache *dm_fsreg_cachep = NULL;
9810 +struct kmem_cache *dm_tokdata_cachep = NULL;
9811 +struct kmem_cache *dm_session_cachep = NULL;
9812 +struct kmem_cache *dm_fsys_map_cachep = NULL;
9813 +struct kmem_cache *dm_fsys_vptr_cachep = NULL;
9814 +
9815 +static int
9816 +dmapi_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
9817 + unsigned long arg)
9818 +{
9819 + sys_dmapi_args_t kargs;
9820 + sys_dmapi_args_t *uap = &kargs;
9821 + int error = 0;
9822 + int rvp = -ENOSYS;
9823 + int use_rvp = 0;
9824 +
9825 + if (!capable(CAP_MKNOD))
9826 + return -EPERM;
9827 +
9828 + if( copy_from_user( &kargs, (sys_dmapi_args_t __user *)arg,
9829 + sizeof(sys_dmapi_args_t) ) )
9830 + return -EFAULT;
9831 +
9832 + unlock_kernel();
9833 +
9834 + switch (cmd) {
9835 + case DM_CLEAR_INHERIT:
9836 + error = dm_clear_inherit(
9837 + (dm_sessid_t) DM_Uarg(uap,1), /* sid */
9838 + (void __user *) DM_Parg(uap,2), /* hanp */
9839 + (size_t) DM_Uarg(uap,3), /* hlen */
9840 + (dm_token_t) DM_Uarg(uap,4), /* token */
9841 + (dm_attrname_t __user *) DM_Parg(uap,5));/* attrnamep */
9842 + break;
9843 + case DM_CREATE_BY_HANDLE:
9844 + error = dm_create_by_handle(
9845 + (dm_sessid_t) DM_Uarg(uap,1), /* sid */
9846 + (void __user *) DM_Parg(uap,2), /* dirhanp */
9847 + (size_t) DM_Uarg(uap,3), /* dirhlen */
9848 + (dm_token_t) DM_Uarg(uap,4), /* token */
9849 + (void __user *) DM_Parg(uap,5), /* hanp */
9850 + (size_t) DM_Uarg(uap,6), /* hlen */
9851 + (char __user *) DM_Parg(uap,7));/* cname */
9852 + break;
9853 + case DM_CREATE_SESSION:
9854 + error = dm_create_session(
9855 + (dm_sessid_t) DM_Uarg(uap,1), /* oldsid */
9856 + (char __user *) DM_Parg(uap,2), /* sessinfop */
9857 + (dm_sessid_t __user *) DM_Parg(uap,3));/* newsidp */
9858 + break;
9859 + case DM_CREATE_USEREVENT:
9860 + error = dm_create_userevent(
9861 + (dm_sessid_t) DM_Uarg(uap,1), /* sid */
9862 + (size_t) DM_Uarg(uap,2), /* msglen */
9863 + (void __user *) DM_Parg(uap,3), /* msgdatap */
9864 + (dm_token_t __user *) DM_Parg(uap,4));/* tokenp */
9865 + break;
9866 + case DM_DESTROY_SESSION:
9867 + error = dm_destroy_session(
9868 + (dm_sessid_t) DM_Uarg(uap,1));/* sid */
9869 + break;
9870 + case DM_DOWNGRADE_RIGHT:
9871 + error = dm_downgrade_right(
9872 + (dm_sessid_t) DM_Uarg(uap,1), /* sid */
9873 + (void __user *) DM_Parg(uap,2), /* hanp */
9874 + (size_t) DM_Uarg(uap,3), /* hlen */
9875 + (dm_token_t) DM_Uarg(uap,4));/* token */
9876 + break;
9877 + case DM_FD_TO_HANDLE:
9878 + error = dm_fd_to_hdl(
9879 + (int) DM_Uarg(uap,1), /* fd */
9880 + (void __user *) DM_Parg(uap,2), /* hanp */
9881 + (size_t __user *) DM_Parg(uap,3));/* hlenp */
9882 + break;
9883 + case DM_FIND_EVENTMSG:
9884 + error = dm_find_eventmsg(
9885 + (dm_sessid_t) DM_Uarg(uap,1), /* sid */
9886 + (dm_token_t) DM_Uarg(uap,2), /* token */
9887 + (size_t) DM_Uarg(uap,3), /* buflen */
9888 + (void __user *) DM_Parg(uap,4), /* bufp */
9889 + (size_t __user *) DM_Parg(uap,5));/* rlenp */
9890 + break;
9891 + case DM_GET_ALLOCINFO:
9892 + use_rvp = 1;
9893 + error = dm_get_allocinfo_rvp(
9894 + (dm_sessid_t) DM_Uarg(uap,1), /* sid */
9895 + (void __user *) DM_Parg(uap,2), /* hanp */
9896 + (size_t) DM_Uarg(uap,3), /* hlen */
9897 + (dm_token_t) DM_Uarg(uap,4), /* token */
9898 + (dm_off_t __user *) DM_Parg(uap,5), /* offp */
9899 + (u_int) DM_Uarg(uap,6), /* nelem */
9900 + (dm_extent_t __user *) DM_Parg(uap,7), /* extentp */
9901 + (u_int __user *) DM_Parg(uap,8), /* nelemp */
9902 + &rvp);
9903 + break;
9904 + case DM_GET_BULKALL:
9905 + use_rvp = 1;
9906 + error = dm_get_bulkall_rvp(
9907 + (dm_sessid_t) DM_Uarg(uap,1), /* sid */
9908 + (void __user *) DM_Parg(uap,2), /* hanp */
9909 + (size_t) DM_Uarg(uap,3), /* hlen */
9910 + (dm_token_t) DM_Uarg(uap,4), /* token */
9911 + (u_int) DM_Uarg(uap,5), /* mask */
9912 + (dm_attrname_t __user *) DM_Parg(uap,6),/* attrnamep */
9913 + (dm_attrloc_t __user *) DM_Parg(uap,7),/* locp */
9914 + (size_t) DM_Uarg(uap,8), /* buflen */
9915 + (void __user *) DM_Parg(uap,9), /* bufp */
9916 + (size_t __user *) DM_Parg(uap,10),/* rlenp */
9917 + &rvp);
9918 + break;
9919 + case DM_GET_BULKATTR:
9920 + use_rvp = 1;
9921 + error = dm_get_bulkattr_rvp(
9922 + (dm_sessid_t) DM_Uarg(uap,1), /* sid */
9923 + (void __user *) DM_Parg(uap,2), /* hanp */
9924 + (size_t) DM_Uarg(uap,3), /* hlen */
9925 + (dm_token_t) DM_Uarg(uap,4), /* token */
9926 + (u_int) DM_Uarg(uap,5), /* mask */
9927 + (dm_attrloc_t __user *)DM_Parg(uap,6), /* locp */
9928 + (size_t) DM_Uarg(uap,7), /* buflen */
9929 + (void __user *) DM_Parg(uap,8), /* bufp */
9930 + (size_t __user *) DM_Parg(uap,9), /* rlenp */
9931 + &rvp);
9932 + break;
9933 + case DM_GET_CONFIG:
9934 + error = dm_get_config(
9935 + (void __user *) DM_Parg(uap,1), /* hanp */
9936 + (size_t) DM_Uarg(uap,2), /* hlen */
9937 + (dm_config_t) DM_Uarg(uap,3), /* flagname */
9938 + (dm_size_t __user *)DM_Parg(uap,4));/* retvalp */
9939 + break;
9940 + case DM_GET_CONFIG_EVENTS:
9941 + error = dm_get_config_events(
9942 + (void __user *) DM_Parg(uap,1), /* hanp */
9943 + (size_t) DM_Uarg(uap,2), /* hlen */
9944 + (u_int) DM_Uarg(uap,3), /* nelem */
9945 + (dm_eventset_t __user *) DM_Parg(uap,4),/* eventsetp */
9946 + (u_int __user *) DM_Parg(uap,5));/* nelemp */
9947 + break;
9948 + case DM_GET_DIOINFO:
9949 + error = dm_get_dioinfo(
9950 + (dm_sessid_t) DM_Uarg(uap,1), /* sid */
9951 + (void __user *) DM_Parg(uap,2), /* hanp */
9952 + (size_t) DM_Uarg(uap,3), /* hlen */
9953 + (dm_token_t) DM_Uarg(uap,4), /* token */
9954 + (dm_dioinfo_t __user *)DM_Parg(uap,5));/* diop */
9955 + break;
9956 + case DM_GET_DIRATTRS:
9957 + use_rvp = 1;
9958 + error = dm_get_dirattrs_rvp(
9959 + (dm_sessid_t) DM_Uarg(uap,1), /* sid */
9960 + (void __user *) DM_Parg(uap,2), /* hanp */
9961 + (size_t) DM_Uarg(uap,3), /* hlen */
9962 + (dm_token_t) DM_Uarg(uap,4), /* token */
9963 + (u_int) DM_Uarg(uap,5), /* mask */
9964 + (dm_attrloc_t __user *)DM_Parg(uap,6), /* locp */
9965 + (size_t) DM_Uarg(uap,7), /* buflen */
9966 + (void __user *) DM_Parg(uap,8), /* bufp */
9967 + (size_t __user *) DM_Parg(uap,9), /* rlenp */
9968 + &rvp);
9969 + break;
9970 + case DM_GET_DMATTR:
9971 + error = dm_get_dmattr(
9972 + (dm_sessid_t) DM_Uarg(uap,1), /* sid */
9973 + (void __user *) DM_Parg(uap,2), /* hanp */
9974 + (size_t) DM_Uarg(uap,3), /* hlen */
9975 + (dm_token_t) DM_Uarg(uap,4), /* token */
9976 + (dm_attrname_t __user *) DM_Parg(uap,5),/* attrnamep */
9977 + (size_t) DM_Uarg(uap,6), /* buflen */
9978 + (void __user *) DM_Parg(uap,7), /* bufp */
9979 + (size_t __user *) DM_Parg(uap,8));/* rlenp */
9980 +
9981 + break;
9982 + case DM_GET_EVENTLIST:
9983 + error = dm_get_eventlist(
9984 + (dm_sessid_t) DM_Uarg(uap,1), /* sid */
9985 + (void __user *) DM_Parg(uap,2), /* hanp */
9986 + (size_t) DM_Uarg(uap,3), /* hlen */
9987 + (dm_token_t) DM_Uarg(uap,4), /* token */
9988 + (u_int) DM_Uarg(uap,5), /* nelem */
9989 + (dm_eventset_t __user *) DM_Parg(uap,6),/* eventsetp */
9990 + (u_int __user *) DM_Parg(uap,7));/* nelemp */
9991 + break;
9992 + case DM_GET_EVENTS:
9993 + error = dm_get_events(
9994 + (dm_sessid_t) DM_Uarg(uap,1), /* sid */
9995 + (u_int) DM_Uarg(uap,2), /* maxmsgs */
9996 + (u_int) DM_Uarg(uap,3), /* flags */
9997 + (size_t) DM_Uarg(uap,4), /* buflen */
9998 + (void __user *) DM_Parg(uap,5), /* bufp */
9999 + (size_t __user *) DM_Parg(uap,6));/* rlenp */
10000 + break;
10001 + case DM_GET_FILEATTR:
10002 + error = dm_get_fileattr(
10003 + (dm_sessid_t) DM_Uarg(uap,1), /* sid */
10004 + (void __user *) DM_Parg(uap,2), /* hanp */
10005 + (size_t) DM_Uarg(uap,3), /* hlen */
10006 + (dm_token_t) DM_Uarg(uap,4), /* token */
10007 + (u_int) DM_Uarg(uap,5), /* mask */
10008 + (dm_stat_t __user *) DM_Parg(uap,6));/* statp */
10009 + break;
10010 + case DM_GET_MOUNTINFO:
10011 + error = dm_get_mountinfo(
10012 + (dm_sessid_t) DM_Uarg(uap,1), /* sid */
10013 + (void __user *) DM_Parg(uap,2), /* hanp */
10014 + (size_t) DM_Uarg(uap,3), /* hlen */
10015 + (dm_token_t) DM_Uarg(uap,4), /* token */
10016 + (size_t) DM_Uarg(uap,5), /* buflen */
10017 + (void __user *) DM_Parg(uap,6), /* bufp */
10018 + (size_t __user *) DM_Parg(uap,7));/* rlenp */
10019 + break;
10020 + case DM_GET_REGION:
10021 + error = dm_get_region(
10022 + (dm_sessid_t) DM_Uarg(uap,1), /* sid */
10023 + (void __user *) DM_Parg(uap,2), /* hanp */
10024 + (size_t) DM_Uarg(uap,3), /* hlen */
10025 + (dm_token_t) DM_Uarg(uap,4), /* token */
10026 + (u_int) DM_Uarg(uap,5), /* nelem */
10027 + (dm_region_t __user *) DM_Parg(uap,6), /* regbufp */
10028 + (u_int __user *) DM_Parg(uap,7));/* nelemp */
10029 + break;
10030 + case DM_GETALL_DISP:
10031 + error = dm_getall_disp(
10032 + (dm_sessid_t) DM_Uarg(uap,1), /* sid */
10033 + (size_t) DM_Uarg(uap,2), /* buflen */
10034 + (void __user *) DM_Parg(uap,3), /* bufp */
10035 + (size_t __user *) DM_Parg(uap,4));/* rlenp */
10036 + break;
10037 + case DM_GETALL_DMATTR:
10038 + error = dm_getall_dmattr(
10039 + (dm_sessid_t) DM_Uarg(uap,1), /* sid */
10040 + (void __user *) DM_Parg(uap,2), /* hanp */
10041 + (size_t) DM_Uarg(uap,3), /* hlen */
10042 + (dm_token_t) DM_Uarg(uap,4), /* token */
10043 + (size_t) DM_Uarg(uap,5), /* buflen */
10044 + (void __user *) DM_Parg(uap,6), /* bufp */
10045 + (size_t __user *) DM_Parg(uap,7));/* rlenp */
10046 + break;
10047 + case DM_GETALL_INHERIT:
10048 + error = dm_getall_inherit(
10049 + (dm_sessid_t) DM_Uarg(uap,1), /* sid */
10050 + (void __user *) DM_Parg(uap,2), /* hanp */
10051 + (size_t) DM_Uarg(uap,3), /* hlen */
10052 + (dm_token_t) DM_Uarg(uap,4), /* token */
10053 + (u_int) DM_Uarg(uap,5), /* nelem */
10054 + (dm_inherit_t __user *)DM_Parg(uap,6), /* inheritbufp*/
10055 + (u_int __user *) DM_Parg(uap,7));/* nelemp */
10056 + break;
10057 + case DM_GETALL_SESSIONS:
10058 + error = dm_getall_sessions(
10059 + (u_int) DM_Uarg(uap,1), /* nelem */
10060 + (dm_sessid_t __user *) DM_Parg(uap,2), /* sidbufp */
10061 + (u_int __user *) DM_Parg(uap,3));/* nelemp */
10062 + break;
10063 + case DM_GETALL_TOKENS:
10064 + error = dm_getall_tokens(
10065 + (dm_sessid_t) DM_Uarg(uap,1), /* sid */
10066 + (u_int) DM_Uarg(uap,2), /* nelem */
10067 + (dm_token_t __user *) DM_Parg(uap,3), /* tokenbufp */
10068 + (u_int __user *) DM_Parg(uap,4));/* nelemp */
10069 + break;
10070 + case DM_INIT_ATTRLOC:
10071 + error = dm_init_attrloc(
10072 + (dm_sessid_t) DM_Uarg(uap,1), /* sid */
10073 + (void __user *) DM_Parg(uap,2), /* hanp */
10074 + (size_t) DM_Uarg(uap,3), /* hlen */
10075 + (dm_token_t) DM_Uarg(uap,4), /* token */
10076 + (dm_attrloc_t __user *) DM_Parg(uap,5));/* locp */
10077 + break;
10078 + case DM_MKDIR_BY_HANDLE:
10079 + error = dm_mkdir_by_handle(
10080 + (dm_sessid_t) DM_Uarg(uap,1), /* sid */
10081 + (void __user *) DM_Parg(uap,2), /* dirhanp */
10082 + (size_t) DM_Uarg(uap,3), /* dirhlen */
10083 + (dm_token_t) DM_Uarg(uap,4), /* token */
10084 + (void __user *) DM_Parg(uap,5), /* hanp */
10085 + (size_t) DM_Uarg(uap,6), /* hlen */
10086 + (char __user *) DM_Parg(uap,7));/* cname */
10087 + break;
10088 + case DM_MOVE_EVENT:
10089 + error = dm_move_event(
10090 + (dm_sessid_t) DM_Uarg(uap,1), /* srcsid */
10091 + (dm_token_t) DM_Uarg(uap,2), /* token */
10092 + (dm_sessid_t) DM_Uarg(uap,3), /* targetsid */
10093 + (dm_token_t __user *) DM_Parg(uap,4));/* rtokenp */
10094 + break;
10095 + case DM_OBJ_REF_HOLD:
10096 + error = dm_obj_ref_hold(
10097 + (dm_sessid_t) DM_Uarg(uap,1), /* sid */
10098 + (dm_token_t) DM_Uarg(uap,2), /* token */
10099 + (void __user *) DM_Parg(uap,3), /* hanp */
10100 + (size_t) DM_Uarg(uap,4));/* hlen */
10101 + break;
10102 + case DM_OBJ_REF_QUERY:
10103 + use_rvp = 1;
10104 + error = dm_obj_ref_query_rvp(
10105 + (dm_sessid_t) DM_Uarg(uap,1), /* sid */
10106 + (dm_token_t) DM_Uarg(uap,2), /* token */
10107 + (void __user *) DM_Parg(uap,3), /* hanp */
10108 + (size_t) DM_Uarg(uap,4), /* hlen */
10109 + &rvp);
10110 + break;
10111 + case DM_OBJ_REF_RELE:
10112 + error = dm_obj_ref_rele(
10113 + (dm_sessid_t) DM_Uarg(uap,1), /* sid */
10114 + (dm_token_t) DM_Uarg(uap,2), /* token */
10115 + (void __user *) DM_Parg(uap,3), /* hanp */
10116 + (size_t) DM_Uarg(uap,4));/* hlen */
10117 + break;
10118 + case DM_PATH_TO_FSHANDLE:
10119 + error = dm_path_to_fshdl(
10120 + (char __user *) DM_Parg(uap,1), /* path */
10121 + (void __user *) DM_Parg(uap,2), /* hanp */
10122 + (size_t __user *) DM_Parg(uap,3));/* hlenp */
10123 + break;
10124 + case DM_PATH_TO_HANDLE:
10125 + error = dm_path_to_hdl(
10126 + (char __user *) DM_Parg(uap,1), /* path */
10127 + (void __user *) DM_Parg(uap,2), /* hanp */
10128 + (size_t __user *) DM_Parg(uap,3));/* hlenp */
10129 + break;
10130 + case DM_PENDING:
10131 + error = dm_pending(
10132 + (dm_sessid_t) DM_Uarg(uap,1), /* sid */
10133 + (dm_token_t) DM_Uarg(uap,2), /* token */
10134 + (dm_timestruct_t __user *) DM_Parg(uap,3));/* delay */
10135 + break;
10136 + case DM_PROBE_HOLE:
10137 + error = dm_probe_hole(
10138 + (dm_sessid_t) DM_Uarg(uap,1), /* sid */
10139 + (void __user *) DM_Parg(uap,2), /* hanp */
10140 + (size_t) DM_Uarg(uap,3), /* hlen */
10141 + (dm_token_t) DM_Uarg(uap,4), /* token */
10142 + (dm_off_t) DM_Uarg(uap,5), /* off */
10143 + (dm_size_t) DM_Uarg(uap,6), /* len */
10144 + (dm_off_t __user *) DM_Parg(uap,7), /* roffp */
10145 + (dm_size_t __user *) DM_Parg(uap,8));/* rlenp */
10146 + break;
10147 + case DM_PUNCH_HOLE:
10148 + error = dm_punch_hole(
10149 + (dm_sessid_t) DM_Uarg(uap,1), /* sid */
10150 + (void __user *) DM_Parg(uap,2), /* hanp */
10151 + (size_t) DM_Uarg(uap,3), /* hlen */
10152 + (dm_token_t) DM_Uarg(uap,4), /* token */
10153 + (dm_off_t) DM_Uarg(uap,5), /* off */
10154 + (dm_size_t) DM_Uarg(uap,6));/* len */
10155 + break;
10156 + case DM_QUERY_RIGHT:
10157 + error = dm_query_right(
10158 + (dm_sessid_t) DM_Uarg(uap,1), /* sid */
10159 + (void __user *) DM_Parg(uap,2), /* hanp */
10160 + (size_t) DM_Uarg(uap,3), /* hlen */
10161 + (dm_token_t) DM_Uarg(uap,4), /* token */
10162 + (dm_right_t __user *) DM_Parg(uap,5));/* rightp */
10163 + break;
10164 + case DM_QUERY_SESSION:
10165 + error = dm_query_session(
10166 + (dm_sessid_t) DM_Uarg(uap,1), /* sid */
10167 + (size_t) DM_Uarg(uap,2), /* buflen */
10168 + (void __user *) DM_Parg(uap,3), /* bufp */
10169 + (size_t __user *) DM_Parg(uap,4));/* rlenp */
10170 + break;
10171 + case DM_READ_INVIS:
10172 + use_rvp = 1;
10173 + error = dm_read_invis_rvp(
10174 + (dm_sessid_t) DM_Uarg(uap,1), /* sid */
10175 + (void __user *) DM_Parg(uap,2), /* hanp */
10176 + (size_t) DM_Uarg(uap,3), /* hlen */
10177 + (dm_token_t) DM_Uarg(uap,4), /* token */
10178 + (dm_off_t) DM_Uarg(uap,5), /* off */
10179 + (dm_size_t) DM_Uarg(uap,6), /* len */
10180 + (void __user *) DM_Parg(uap,7), /* bufp */
10181 + &rvp);
10182 + break;
10183 + case DM_RELEASE_RIGHT:
10184 + error = dm_release_right(
10185 + (dm_sessid_t) DM_Uarg(uap,1), /* sid */
10186 + (void __user *) DM_Parg(uap,2), /* hanp */
10187 + (size_t) DM_Uarg(uap,3), /* hlen */
10188 + (dm_token_t) DM_Uarg(uap,4));/* token */
10189 + break;
10190 + case DM_REMOVE_DMATTR:
10191 + error = dm_remove_dmattr(
10192 + (dm_sessid_t) DM_Uarg(uap,1), /* sid */
10193 + (void __user *) DM_Parg(uap,2), /* hanp */
10194 + (size_t) DM_Uarg(uap,3), /* hlen */
10195 + (dm_token_t) DM_Uarg(uap,4), /* token */
10196 + (int) DM_Uarg(uap,5), /* setdtime */
10197 + (dm_attrname_t __user *) DM_Parg(uap,6));/* attrnamep */
10198 + break;
10199 + case DM_REQUEST_RIGHT:
10200 + error = dm_request_right(
10201 + (dm_sessid_t) DM_Uarg(uap,1), /* sid */
10202 + (void __user *) DM_Parg(uap,2), /* hanp */
10203 + (size_t) DM_Uarg(uap,3), /* hlen */
10204 + (dm_token_t) DM_Uarg(uap,4), /* token */
10205 + (u_int) DM_Uarg(uap,5), /* flags */
10206 + (dm_right_t) DM_Uarg(uap,6));/* right */
10207 + break;
10208 + case DM_RESPOND_EVENT:
10209 + error = dm_respond_event(
10210 + (dm_sessid_t) DM_Uarg(uap,1), /* sid */
10211 + (dm_token_t) DM_Uarg(uap,2), /* token */
10212 + (dm_response_t) DM_Uarg(uap,3), /* response */
10213 + (int) DM_Uarg(uap,4), /* reterror */
10214 + (size_t) DM_Uarg(uap,5), /* buflen */
10215 + (void __user *) DM_Parg(uap,6));/* respbufp */
10216 + break;
10217 + case DM_SEND_MSG:
10218 + error = dm_send_msg(
10219 + (dm_sessid_t) DM_Uarg(uap,1), /* targetsid */
10220 + (dm_msgtype_t) DM_Uarg(uap,2), /* msgtype */
10221 + (size_t) DM_Uarg(uap,3), /* buflen */
10222 + (void __user *) DM_Parg(uap,4));/* bufp */
10223 + break;
10224 + case DM_SET_DISP:
10225 + error = dm_set_disp(
10226 + (dm_sessid_t) DM_Uarg(uap,1), /* sid */
10227 + (void __user *) DM_Parg(uap,2), /* hanp */
10228 + (size_t) DM_Uarg(uap,3), /* hlen */
10229 + (dm_token_t) DM_Uarg(uap,4), /* token */
10230 + (dm_eventset_t __user *) DM_Parg(uap,5),/* eventsetp */
10231 + (u_int) DM_Uarg(uap,6));/* maxevent */
10232 + break;
10233 + case DM_SET_DMATTR:
10234 + error = dm_set_dmattr(
10235 + (dm_sessid_t) DM_Uarg(uap,1), /* sid */
10236 + (void __user *) DM_Parg(uap,2), /* hanp */
10237 + (size_t) DM_Uarg(uap,3), /* hlen */
10238 + (dm_token_t) DM_Uarg(uap,4), /* token */
10239 + (dm_attrname_t __user *) DM_Parg(uap,5),/* attrnamep */
10240 + (int) DM_Uarg(uap,6), /* setdtime */
10241 + (size_t) DM_Uarg(uap,7), /* buflen */
10242 + (void __user *) DM_Parg(uap,8));/* bufp */
10243 + break;
10244 + case DM_SET_EVENTLIST:
10245 + error = dm_set_eventlist(
10246 + (dm_sessid_t) DM_Uarg(uap,1), /* sid */
10247 + (void __user *) DM_Parg(uap,2), /* hanp */
10248 + (size_t) DM_Uarg(uap,3), /* hlen */
10249 + (dm_token_t) DM_Uarg(uap,4), /* token */
10250 + (dm_eventset_t __user *) DM_Parg(uap,5),/* eventsetp */
10251 + (u_int) DM_Uarg(uap,6));/* maxevent */
10252 + break;
10253 + case DM_SET_FILEATTR:
10254 + error = dm_set_fileattr(
10255 + (dm_sessid_t) DM_Uarg(uap,1), /* sid */
10256 + (void __user *) DM_Parg(uap,2), /* hanp */
10257 + (size_t) DM_Uarg(uap,3), /* hlen */
10258 + (dm_token_t) DM_Uarg(uap,4), /* token */
10259 + (u_int) DM_Uarg(uap,5), /* mask */
10260 + (dm_fileattr_t __user *)DM_Parg(uap,6));/* attrp */
10261 + break;
10262 + case DM_SET_INHERIT:
10263 + error = dm_set_inherit(
10264 + (dm_sessid_t) DM_Uarg(uap,1), /* sid */
10265 + (void __user *) DM_Parg(uap,2), /* hanp */
10266 + (size_t) DM_Uarg(uap,3), /* hlen */
10267 + (dm_token_t) DM_Uarg(uap,4), /* token */
10268 + (dm_attrname_t __user *)DM_Parg(uap,5),/* attrnamep */
10269 + (mode_t) DM_Uarg(uap,6));/* mode */
10270 + break;
10271 + case DM_SET_REGION:
10272 + error = dm_set_region(
10273 + (dm_sessid_t) DM_Uarg(uap,1), /* sid */
10274 + (void __user *) DM_Parg(uap,2), /* hanp */
10275 + (size_t) DM_Uarg(uap,3), /* hlen */
10276 + (dm_token_t) DM_Uarg(uap,4), /* token */
10277 + (u_int) DM_Uarg(uap,5), /* nelem */
10278 + (dm_region_t __user *) DM_Parg(uap,6), /* regbufp */
10279 + (dm_boolean_t __user *) DM_Parg(uap,7));/* exactflagp */
10280 + break;
10281 + case DM_SET_RETURN_ON_DESTROY:
10282 + error = dm_set_return_on_destroy(
10283 + (dm_sessid_t) DM_Uarg(uap,1), /* sid */
10284 + (void __user *) DM_Parg(uap,2), /* hanp */
10285 + (size_t) DM_Uarg(uap,3), /* hlen */
10286 + (dm_token_t) DM_Uarg(uap,4), /* token */
10287 + (dm_attrname_t __user *) DM_Parg(uap,5),/* attrnamep */
10288 + (dm_boolean_t) DM_Uarg(uap,6));/* enable */
10289 + break;
10290 + case DM_SYMLINK_BY_HANDLE:
10291 + error = dm_symlink_by_handle(
10292 + (dm_sessid_t) DM_Uarg(uap,1), /* sid */
10293 + (void __user *) DM_Parg(uap,2), /* dirhanp */
10294 + (size_t) DM_Uarg(uap,3), /* dirhlen */
10295 + (dm_token_t) DM_Uarg(uap,4), /* token */
10296 + (void __user *) DM_Parg(uap,5), /* hanp */
10297 + (size_t) DM_Uarg(uap,6), /* hlen */
10298 + (char __user *) DM_Parg(uap,7), /* cname */
10299 + (char __user *) DM_Parg(uap,8));/* path */
10300 + break;
10301 + case DM_SYNC_BY_HANDLE:
10302 + error = dm_sync_by_handle(
10303 + (dm_sessid_t) DM_Uarg(uap,1), /* sid */
10304 + (void __user *) DM_Parg(uap,2), /* hanp */
10305 + (size_t) DM_Uarg(uap,3), /* hlen */
10306 + (dm_token_t) DM_Uarg(uap,4));/* token */
10307 + break;
10308 + case DM_UPGRADE_RIGHT:
10309 + error = dm_upgrade_right(
10310 + (dm_sessid_t) DM_Uarg(uap,1), /* sid */
10311 + (void __user *) DM_Parg(uap,2), /* hanp */
10312 + (size_t) DM_Uarg(uap,3), /* hlen */
10313 + (dm_token_t) DM_Uarg(uap,4));/* token */
10314 + break;
10315 + case DM_WRITE_INVIS:
10316 + use_rvp = 1;
10317 + error = dm_write_invis_rvp(
10318 + (dm_sessid_t) DM_Uarg(uap,1), /* sid */
10319 + (void __user *) DM_Parg(uap,2), /* hanp */
10320 + (size_t) DM_Uarg(uap,3), /* hlen */
10321 + (dm_token_t) DM_Uarg(uap,4), /* token */
10322 + (int) DM_Uarg(uap,5), /* flags */
10323 + (dm_off_t) DM_Uarg(uap,6), /* off */
10324 + (dm_size_t) DM_Uarg(uap,7), /* len */
10325 + (void __user *) DM_Parg(uap,8), /* bufp */
10326 + &rvp);
10327 + break;
10328 + case DM_OPEN_BY_HANDLE:
10329 + use_rvp = 1;
10330 + error = dm_open_by_handle_rvp(
10331 + (unsigned int) DM_Uarg(uap,1), /* fd */
10332 + (void __user *) DM_Parg(uap,2), /* hanp */
10333 + (size_t) DM_Uarg(uap,3), /* hlen */
10334 + (int) DM_Uarg(uap,4), /* flags */
10335 + &rvp);
10336 + break;
10337 + default:
10338 + error = -ENOSYS;
10339 + break;
10340 + }
10341 +
10342 + lock_kernel();
10343 +
10344 + /* If it was an *_rvp() function, then
10345 + if error==0, return |rvp|
10346 + */
10347 + if( use_rvp && (error == 0) )
10348 + return rvp;
10349 + else
10350 + return error;
10351 +}
10352 +
10353 +
10354 +
10355 +static int
10356 +dmapi_open(struct inode *inode, struct file *file)
10357 +{
10358 + return 0;
10359 +}
10360 +
10361 +
10362 +static int
10363 +dmapi_release(struct inode *inode, struct file *file)
10364 +{
10365 + return 0;
10366 +}
10367 +
10368 +
10369 +/* say hello, and let me know the device is hooked up */
10370 +static ssize_t
10371 +dmapi_dump(struct file *file, char __user *buf, size_t count, loff_t *ppos)
10372 +{
10373 + char tmp[50];
10374 + int len;
10375 + if( *ppos == 0 ){
10376 + len = sprintf( tmp, "# " DM_VER_STR_CONTENTS "\n" );
10377 + if( copy_to_user(buf, tmp, len) )
10378 + return -EFAULT;
10379 + *ppos += 1;
10380 + return len;
10381 + }
10382 + return 0;
10383 +}
10384 +
10385 +static struct file_operations dmapi_fops = {
10386 + .open = dmapi_open,
10387 + .ioctl = dmapi_ioctl,
10388 + .read = dmapi_dump,
10389 + .release = dmapi_release
10390 +};
10391 +
10392 +static struct miscdevice dmapi_dev = {
10393 + .minor = MISC_DYNAMIC_MINOR,
10394 + .name = "dmapi",
10395 + .fops = &dmapi_fops
10396 +};
10397 +
10398 +
10399 +
10400 +#ifdef CONFIG_PROC_FS
10401 +static int
10402 +dmapi_summary(char *buffer, char **start, off_t offset,
10403 + int count, int *eof, void *data)
10404 +{
10405 + int len;
10406 +
10407 + extern u_int dm_sessions_active;
10408 + extern dm_sessid_t dm_next_sessid;
10409 + extern dm_token_t dm_next_token;
10410 + extern dm_sequence_t dm_next_sequence;
10411 + extern int dm_fsys_cnt;
10412 +
10413 +#define CHKFULL if(len >= count) break;
10414 +#define ADDBUF(a,b) len += sprintf(buffer + len, a, b); CHKFULL;
10415 +
10416 + len=0;
10417 + while(1){
10418 + ADDBUF("dm_sessions_active=%u\n", dm_sessions_active);
10419 + ADDBUF("dm_next_sessid=%d\n", (int)dm_next_sessid);
10420 + ADDBUF("dm_next_token=%d\n", (int)dm_next_token);
10421 + ADDBUF("dm_next_sequence=%u\n", (u_int)dm_next_sequence);
10422 + ADDBUF("dm_fsys_cnt=%d\n", dm_fsys_cnt);
10423 +
10424 + break;
10425 + }
10426 +
10427 + if (offset >= len) {
10428 + *start = buffer;
10429 + *eof = 1;
10430 + return 0;
10431 + }
10432 + *start = buffer + offset;
10433 + if ((len -= offset) > count)
10434 + return count;
10435 + *eof = 1;
10436 +
10437 + return len;
10438 +}
10439 +#endif
10440 +
10441 +
10442 +static void __init
10443 +dmapi_init_procfs(int dmapi_minor)
10444 +{
10445 +#ifdef CONFIG_PROC_FS
10446 + struct proc_dir_entry *entry;
10447 +
10448 + if ((entry = proc_mkdir( DMAPI_DBG_PROCFS, NULL)) == NULL )
10449 + return;
10450 + entry->owner = THIS_MODULE;
10451 + entry->mode = S_IFDIR | S_IRUSR | S_IXUSR;
10452 +
10453 + if ((entry = proc_mkdir( DMAPI_DBG_PROCFS "/fsreg", NULL)) == NULL )
10454 + return;
10455 + entry->owner = THIS_MODULE;
10456 +
10457 + if ((entry = proc_mkdir( DMAPI_DBG_PROCFS "/sessions", NULL)) == NULL )
10458 + return;
10459 + entry->owner = THIS_MODULE;
10460 +
10461 + entry = create_proc_read_entry( DMAPI_DBG_PROCFS "/summary",
10462 + 0, NULL, dmapi_summary, NULL);
10463 + entry->owner = THIS_MODULE;
10464 +#endif
10465 +}
10466 +
10467 +#if 0
10468 +static void __exit
10469 +dmapi_cleanup_procfs(void)
10470 +{
10471 +#ifdef CONFIG_PROC_FS
10472 + remove_proc_entry( DMAPI_DBG_PROCFS "/summary", NULL);
10473 + remove_proc_entry( DMAPI_DBG_PROCFS "/fsreg", NULL);
10474 + remove_proc_entry( DMAPI_DBG_PROCFS "/sessions", NULL);
10475 + remove_proc_entry( DMAPI_DBG_PROCFS, NULL);
10476 +#endif
10477 +}
10478 +#endif
10479 +
10480 +int __init dmapi_init(void)
10481 +{
10482 + int ret;
10483 +
10484 + dm_tokdata_cachep = kmem_cache_create("dm_tokdata",
10485 + sizeof(struct dm_tokdata), 0, 0, NULL);
10486 + if (dm_tokdata_cachep == NULL)
10487 + goto out;
10488 +
10489 + dm_fsreg_cachep = kmem_cache_create("dm_fsreg",
10490 + sizeof(struct dm_fsreg), 0, 0, NULL);
10491 + if (dm_fsreg_cachep == NULL)
10492 + goto out_free_tokdata_cachep;
10493 +
10494 + dm_session_cachep = kmem_cache_create("dm_session",
10495 + sizeof(struct dm_session), 0, 0, NULL);
10496 + if (dm_session_cachep == NULL)
10497 + goto out_free_fsreg_cachep;
10498 +
10499 + dm_fsys_map_cachep = kmem_cache_create("dm_fsys_map",
10500 + sizeof(dm_vector_map_t), 0, 0, NULL);
10501 + if (dm_fsys_map_cachep == NULL)
10502 + goto out_free_session_cachep;
10503 + dm_fsys_vptr_cachep = kmem_cache_create("dm_fsys_vptr",
10504 + sizeof(dm_fsys_vector_t), 0, 0, NULL);
10505 + if (dm_fsys_vptr_cachep == NULL)
10506 + goto out_free_fsys_map_cachep;
10507 +
10508 + ret = misc_register(&dmapi_dev);
10509 + if (ret) {
10510 + printk(KERN_ERR "dmapi_init: misc_register returned %d\n", ret);
10511 + goto out_free_fsys_vptr_cachep;
10512 + }
10513 +
10514 + dmapi_init_procfs(dmapi_dev.minor);
10515 + return 0;
10516 +
10517 + out_free_fsys_vptr_cachep:
10518 + kmem_cache_destroy(dm_fsys_vptr_cachep);
10519 + out_free_fsys_map_cachep:
10520 + kmem_cache_destroy(dm_fsys_map_cachep);
10521 + out_free_session_cachep:
10522 + kmem_cache_destroy(dm_session_cachep);
10523 + out_free_fsreg_cachep:
10524 + kmem_cache_destroy(dm_fsreg_cachep);
10525 + out_free_tokdata_cachep:
10526 + kmem_cache_destroy(dm_tokdata_cachep);
10527 + out:
10528 + return -ENOMEM;
10529 +}
10530 +
10531 +#if 0
10532 +void __exit dmapi_uninit(void)
10533 +{
10534 + misc_deregister(&dmapi_dev);
10535 + dmapi_cleanup_procfs();
10536 + kmem_cache_destroy(dm_tokdata_cachep);
10537 + kmem_cache_destroy(dm_fsreg_cachep);
10538 + kmem_cache_destroy(dm_session_cachep);
10539 + kmem_cache_destroy(dm_fsys_map_cachep);
10540 + kmem_cache_destroy(dm_fsys_vptr_cachep);
10541 +}
10542 +#endif
10543 +
10544 +module_init(dmapi_init);
10545 +/*module_exit(dmapi_uninit);*/ /* Some other day */
10546 +
10547 +MODULE_AUTHOR("Silicon Graphics, Inc.");
10548 +MODULE_DESCRIPTION("SGI Data Migration Subsystem");
10549 +MODULE_LICENSE("GPL");
10550 +
10551 +EXPORT_SYMBOL(dm_send_mount_event);
10552 +EXPORT_SYMBOL(dm_send_namesp_event);
10553 +EXPORT_SYMBOL(dm_send_unmount_event);
10554 +EXPORT_SYMBOL(dm_send_data_event);
10555 +EXPORT_SYMBOL(dm_send_destroy_event);
10556 +EXPORT_SYMBOL(dm_ip_to_handle);
10557 +EXPORT_SYMBOL(dmapi_register);
10558 +EXPORT_SYMBOL(dmapi_unregister);
10559 +EXPORT_SYMBOL(dmapi_registered);
10560 +EXPORT_SYMBOL(dm_release_threads);
10561 Index: linux-2.6.26/fs/dmapi/Makefile
10562 ===================================================================
10563 --- /dev/null
10564 +++ linux-2.6.26/fs/dmapi/Makefile
10565 @@ -0,0 +1,53 @@
10566 +#
10567 +# Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
10568 +#
10569 +# This program is free software; you can redistribute it and/or modify it
10570 +# under the terms of version 2 of the GNU General Public License as
10571 +# published by the Free Software Foundation.
10572 +#
10573 +# This program is distributed in the hope that it would be useful, but
10574 +# WITHOUT ANY WARRANTY; without even the implied warranty of
10575 +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
10576 +#
10577 +# Further, this software is distributed without any warranty that it is
10578 +# free of the rightful claim of any third person regarding infringement
10579 +# or the like. Any license provided herein, whether implied or
10580 +# otherwise, applies only to this software file. Patent licenses, if
10581 +# any, provided herein do not apply to combinations of this program with
10582 +# other software, or any other product whatsoever.
10583 +#
10584 +# You should have received a copy of the GNU General Public License along
10585 +# with this program; if not, write the Free Software Foundation, Inc., 59
10586 +# Temple Place - Suite 330, Boston MA 02111-1307, USA.
10587 +#
10588 +# Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
10589 +# Mountain View, CA 94043, or:
10590 +#
10591 +# http://www.sgi.com
10592 +#
10593 +# For further information regarding this notice, see:
10594 +#
10595 +# http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
10596 +#
10597 +
10598 +ifeq ($(CONFIG_DMAPI_DEBUG),y)
10599 + EXTRA_CFLAGS += -DDEBUG
10600 + EXTRA_CFLAGS += -g
10601 +endif
10602 +
10603 +obj-$(CONFIG_DMAPI) += dmapi.o
10604 +
10605 +dmapi-y += dmapi_sysent.o \
10606 + dmapi_attr.o \
10607 + dmapi_config.o \
10608 + dmapi_bulkattr.o \
10609 + dmapi_dmattr.o \
10610 + dmapi_event.o \
10611 + dmapi_handle.o \
10612 + dmapi_hole.o \
10613 + dmapi_io.o \
10614 + dmapi_mountinfo.o \
10615 + dmapi_region.o \
10616 + dmapi_register.o \
10617 + dmapi_right.o \
10618 + dmapi_session.o
10619 Index: linux-2.6.26/fs/dmapi/Status
10620 ===================================================================
10621 --- /dev/null
10622 +++ linux-2.6.26/fs/dmapi/Status
10623 @@ -0,0 +1,128 @@
10624 +Jan21,04 - dm_get_bulkall is now implemented. roehrich
10625 +
10626 +for linux:
10627 +
10628 +
10629 +68 external interfaces in libdm
10630 +
10631 + 56 of those interfaces go through to dmi(), the kernel side of DMAPI
10632 +
10633 +
10634 +
10635 +Functions known to work
10636 +----------------------------------------------
10637 +
10638 +dm_create_session
10639 +dm_create_userevent
10640 +dm_destroy_session
10641 +dm_getall_sessions
10642 +dm_getall_tokens
10643 +dm_get_allocinfo
10644 +dm_get_bulkall
10645 +dm_get_bulkattr
10646 +dm_get_config_events
10647 +dm_get_dmattr
10648 +dm_get_eventlist
10649 +dm_get_events
10650 +dm_get_fileattr
10651 +dm_get_region
10652 +dm_handle_free
10653 +dm_init_attrloc
10654 +dm_init_service
10655 +dm_obj_ref_hold
10656 +dm_obj_ref_query
10657 +dm_obj_ref_rele
10658 +dm_path_to_fshandle
10659 +dm_path_to_handle
10660 +dm_punch_hole
10661 +dm_query_session
10662 +dm_read_invis
10663 +dm_remove_dmattr
10664 +dm_respond_event
10665 +dm_send_msg
10666 +dm_set_disp
10667 +dm_set_dmattr
10668 +dm_set_eventlist
10669 +dm_set_fileattr
10670 +dm_set_region
10671 +dm_sync_by_handle
10672 +dm_write_invis
10673 +35
10674 +
10675 +Functions that seem to work (would like more rigorous test case)
10676 +------------------------------------------
10677 +
10678 +dm_pending
10679 +dm_probe_hole - one test case of test_hole.c fails
10680 +dm_request_right
10681 +3
10682 +
10683 +Functions untested but probably work
10684 +----------------------------------------------
10685 +
10686 +dm_find_eventmsg
10687 +dm_handle_cmp
10688 +dm_handle_to_fshandle
10689 +dm_handle_to_ino
10690 +dm_release_right
10691 +5
10692 +
10693 +Functions that do not work
10694 +-----------------------------------------
10695 +
10696 +dm_get_dioinfo - directio not implemented
10697 +1
10698 +
10699 +Functions not supported in SGI DMAPI
10700 +-------------------------------------------------------------
10701 +
10702 +dm_clear_inherit
10703 +dm_create_by_handle
10704 +dm_getall_inherit
10705 +dm_mkdir_by_handle
10706 +dm_set_inherit
10707 +dm_symlink_by_handle
10708 +
10709 +
10710 +
10711 +
10712 +Functions that seem to work (would like more rigorous test case)
10713 +----------------------------------------------------------------
10714 +
10715 +dm_get_config
10716 +dm_downgrade_right
10717 +dm_get_mountinfo
10718 +dm_set_return_on_destory
10719 +dm_upgrade_right
10720 +
10721 +
10722 +
10723 +Functions that do not work
10724 +-----------------------------------------------------------------
10725 +
10726 +dm_fd_to_handle - Irix getf not implemented on linux
10727 +dm_get_dirattrs - null pointer reference
10728 +dm_handle_to_path
10729 +dm_getall_dmattr - needs a copy_from_user in place of useracc
10730 +
10731 +
10732 +Functions that are untested, but probably work
10733 +-----------------------------------------------------------------
10734 +
10735 +dm_getall_disp
10736 +dm_handle_hash
10737 +dm_handle_is_valid
10738 +dm_handle_to_fsid
10739 +dm_handle_to_igen
10740 +dm_make_fshandle
10741 +dm_make_handle
10742 +dm_move_event
10743 +dm_query_right
10744 +
10745 +
10746 +
10747 +Other things not working
10748 +----------------------------------
10749 +
10750 +- read/write events for memory-mapped I/O?
10751 +
10752 Index: linux-2.6.26/fs/dmapi/sv.h
10753 ===================================================================
10754 --- /dev/null
10755 +++ linux-2.6.26/fs/dmapi/sv.h
10756 @@ -0,0 +1,89 @@
10757 +/*
10758 + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
10759 + *
10760 + * This program is free software; you can redistribute it and/or modify it
10761 + * under the terms of version 2 of the GNU General Public License as
10762 + * published by the Free Software Foundation.
10763 + *
10764 + * This program is distributed in the hope that it would be useful, but
10765 + * WITHOUT ANY WARRANTY; without even the implied warranty of
10766 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
10767 + *
10768 + * Further, this software is distributed without any warranty that it is
10769 + * free of the rightful claim of any third person regarding infringement
10770 + * or the like. Any license provided herein, whether implied or
10771 + * otherwise, applies only to this software file. Patent licenses, if
10772 + * any, provided herein do not apply to combinations of this program with
10773 + * other software, or any other product whatsoever.
10774 + *
10775 + * You should have received a copy of the GNU General Public License along
10776 + * with this program; if not, write the Free Software Foundation, Inc., 59
10777 + * Temple Place - Suite 330, Boston MA 02111-1307, USA.
10778 + *
10779 + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
10780 + * Mountain View, CA 94043, or:
10781 + *
10782 + * http://www.sgi.com
10783 + *
10784 + * For further information regarding this notice, see:
10785 + *
10786 + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
10787 + */
10788 +#ifndef __DMAPI_SV_H__
10789 +#define __DMAPI_SV_H__
10790 +
10791 +#include <linux/wait.h>
10792 +#include <linux/sched.h>
10793 +#include <linux/spinlock.h>
10794 +
10795 +/*
10796 + * Synchronisation variables.
10797 + *
10798 + * (Parameters "pri", "svf" and "rts" are not implemented)
10799 + */
10800 +
10801 +typedef struct sv_s {
10802 + wait_queue_head_t waiters;
10803 +} sv_t;
10804 +
10805 +#define SV_FIFO 0x0 /* sv_t is FIFO type */
10806 +#define SV_LIFO 0x2 /* sv_t is LIFO type */
10807 +#define SV_PRIO 0x4 /* sv_t is PRIO type */
10808 +#define SV_KEYED 0x6 /* sv_t is KEYED type */
10809 +#define SV_DEFAULT SV_FIFO
10810 +
10811 +
10812 +static inline void _sv_wait(sv_t *sv, spinlock_t *lock, int state,
10813 + unsigned long timeout)
10814 +{
10815 + DECLARE_WAITQUEUE(wait, current);
10816 +
10817 + add_wait_queue_exclusive(&sv->waiters, &wait);
10818 + __set_current_state(state);
10819 + spin_unlock(lock);
10820 +
10821 + schedule_timeout(timeout);
10822 +
10823 + remove_wait_queue(&sv->waiters, &wait);
10824 +}
10825 +
10826 +#define init_sv(sv,type,name,flag) \
10827 + init_waitqueue_head(&(sv)->waiters)
10828 +#define sv_init(sv,flag,name) \
10829 + init_waitqueue_head(&(sv)->waiters)
10830 +#define sv_destroy(sv) \
10831 + /*NOTHING*/
10832 +#define sv_wait(sv, pri, lock, s) \
10833 + _sv_wait(sv, lock, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT)
10834 +#define sv_wait_sig(sv, pri, lock, s) \
10835 + _sv_wait(sv, lock, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT)
10836 +#define sv_timedwait(sv, pri, lock, s, svf, ts, rts) \
10837 + _sv_wait(sv, lock, TASK_UNINTERRUPTIBLE, timespec_to_jiffies(ts))
10838 +#define sv_timedwait_sig(sv, pri, lock, s, svf, ts, rts) \
10839 + _sv_wait(sv, lock, TASK_INTERRUPTIBLE, timespec_to_jiffies(ts))
10840 +#define sv_signal(sv) \
10841 + wake_up(&(sv)->waiters)
10842 +#define sv_broadcast(sv) \
10843 + wake_up_all(&(sv)->waiters)
10844 +
10845 +#endif /* __DMAPI_SV_H__ */