]> git.ipfire.org Git - thirdparty/openembedded/openembedded-core-contrib.git/blob
a1a0fd61a730d2a0922fb0588647bec1fd7ea2c2
[thirdparty/openembedded/openembedded-core-contrib.git] /
1 Index: linux-2.6.33/drivers/staging/Kconfig
2 ===================================================================
3 --- linux-2.6.33.orig/drivers/staging/Kconfig
4 +++ linux-2.6.33/drivers/staging/Kconfig
5 @@ -141,5 +141,7 @@ source "drivers/staging/netwave/Kconfig"
6
7 source "drivers/staging/sm7xx/Kconfig"
8
9 +source "drivers/staging/rar_register/Kconfig"
10 +
11 endif # !STAGING_EXCLUDE_BUILD
12 endif # STAGING
13 Index: linux-2.6.33/drivers/staging/Makefile
14 ===================================================================
15 --- linux-2.6.33.orig/drivers/staging/Makefile
16 +++ linux-2.6.33/drivers/staging/Makefile
17 @@ -38,7 +38,7 @@ obj-$(CONFIG_VT6656) += vt6656/
18 obj-$(CONFIG_FB_UDL) += udlfb/
19 obj-$(CONFIG_HYPERV) += hv/
20 obj-$(CONFIG_VME_BUS) += vme/
21 -obj-$(CONFIG_RAR_REGISTER) += rar/
22 +obj-$(CONFIG_RAR_DRIVER) += rar/
23 obj-$(CONFIG_DX_SEP) += sep/
24 obj-$(CONFIG_IIO) += iio/
25 obj-$(CONFIG_RAMZSWAP) += ramzswap/
26 @@ -52,3 +52,4 @@ obj-$(CONFIG_WAVELAN) += wavelan/
27 obj-$(CONFIG_PCMCIA_WAVELAN) += wavelan/
28 obj-$(CONFIG_PCMCIA_NETWAVE) += netwave/
29 obj-$(CONFIG_FB_SM7XX) += sm7xx/
30 +obj-$(CONFIG_RAR_REGISTER) += rar_register/
31 Index: linux-2.6.33/drivers/staging/rar_register/Kconfig
32 ===================================================================
33 --- /dev/null
34 +++ linux-2.6.33/drivers/staging/rar_register/Kconfig
35 @@ -0,0 +1,14 @@
36 +#
37 +# Serial device configuration
38 +#
39 +
40 +menu "RAR Register Driver"
41 +
42 +config RAR_REGISTER
43 + tristate "Intel Restricted Access Region Register Driver"
44 + default n
45 + ---help---
46 + This driver allows other kernel drivers access to the
47 + contents of the restricted access region registers.
48 +
49 +endmenu
50 Index: linux-2.6.33/drivers/staging/rar_register/Makefile
51 ===================================================================
52 --- /dev/null
53 +++ linux-2.6.33/drivers/staging/rar_register/Makefile
54 @@ -0,0 +1,3 @@
55 +EXTRA_CFLAGS += -DLITTLE__ENDIAN
56 +obj-$(CONFIG_RAR_REGISTER) += rar_register.o
57 +rar_register_driver-objs := rar_register.o
58 Index: linux-2.6.33/drivers/staging/rar_register/rar_register.c
59 ===================================================================
60 --- /dev/null
61 +++ linux-2.6.33/drivers/staging/rar_register/rar_register.c
62 @@ -0,0 +1,669 @@
63 +/*
64 + * rar_register.c - An Intel Restricted Access Region register driver
65 + *
66 + * Copyright(c) 2009 Intel Corporation. All rights reserved.
67 + *
68 + * This program is free software; you can redistribute it and/or
69 + * modify it under the terms of the GNU General Public License as
70 + * published by the Free Software Foundation; either version 2 of the
71 + * License, or (at your option) any later version.
72 + *
73 + * This program is distributed in the hope that it will be useful,
74 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
75 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
76 + * General Public License for more details.
77 + *
78 + * You should have received a copy of the GNU General Public License
79 + * along with this program; if not, write to the Free Software
80 + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
81 + * 02111-1307, USA.
82 + *
83 + * -------------------------------------------------------------------
84 + *
85 + * 20090806 Ossama Othman <ossama.othman@intel.com>
86 + * Return zero high address if upper 22 bits is zero.
87 + * Cleaned up checkpatch errors.
88 + * Clarified that driver is dealing with bus addresses.
89 + *
90 + * 20090702 Ossama Othman <ossama.othman@intel.com>
91 + * Removed unnecessary include directives
92 + * Cleaned up spinlocks.
93 + * Cleaned up logging.
94 + * Improved invalid parameter checks.
95 + * Fixed and simplified RAR address retrieval and RAR locking
96 + * code.
97 + *
98 + * 20090626 Mark Allyn <mark.a.allyn@intel.com>
99 + * Initial publish
100 + */
101 +
102 +#include <linux/rar/rar_register.h>
103 +#include <linux/rar/memrar.h>
104 +
105 +#include <linux/module.h>
106 +#include <linux/pci.h>
107 +#include <linux/spinlock.h>
108 +#include <linux/device.h>
109 +#include <linux/kernel.h>
110 +
111 +
112 +/* PCI vendor id for controller */
113 +#define VENDOR_ID 0x8086
114 +
115 +/* PCI device id for controller */
116 +#define DEVICE_ID 0x4110
117 +
118 +
119 +/* === Lincroft Message Bus Interface === */
120 +/* Message Control Register */
121 +#define LNC_MCR_OFFSET 0xD0
122 +
123 +/* Message Data Register */
124 +#define LNC_MDR_OFFSET 0xD4
125 +
126 +/* Message Opcodes */
127 +#define LNC_MESSAGE_READ_OPCODE 0xD0
128 +#define LNC_MESSAGE_WRITE_OPCODE 0xE0
129 +
130 +/* Message Write Byte Enables */
131 +#define LNC_MESSAGE_BYTE_WRITE_ENABLES 0xF
132 +
133 +/* B-unit Port */
134 +#define LNC_BUNIT_PORT 0x3
135 +
136 +/* === Lincroft B-Unit Registers - Programmed by IA32 firmware === */
137 +#define LNC_BRAR0L 0x10
138 +#define LNC_BRAR0H 0x11
139 +#define LNC_BRAR1L 0x12
140 +#define LNC_BRAR1H 0x13
141 +
142 +/* Reserved for SeP */
143 +#define LNC_BRAR2L 0x14
144 +#define LNC_BRAR2H 0x15
145 +
146 +/* Moorestown supports three restricted access regions. */
147 +#define MRST_NUM_RAR 3
148 +
149 +
150 +/* RAR Bus Address Range */
151 +struct RAR_address_range {
152 + u32 low;
153 + u32 high;
154 +};
155 +
156 +/* Structure containing low and high RAR register offsets. */
157 +struct RAR_offsets {
158 + u32 low; /* Register offset for low RAR bus address. */
159 + u32 high; /* Register offset for high RAR bus address. */
160 +};
161 +
162 +struct RAR_client {
163 + int (*client_callback)(void *client_data);
164 + void *customer_data;
165 + int client_called;
166 + };
167 +
168 +DEFINE_SPINLOCK(rar_spinlock_lock);
169 +DEFINE_SPINLOCK(lnc_reg_lock);
170 +
171 +struct RAR_device {
172 + unsigned long rar_flags;
173 + unsigned long lnc_reg_flags;
174 + struct RAR_offsets rar_offsets[MRST_NUM_RAR];
175 + struct RAR_address_range rar_addr[MRST_NUM_RAR];
176 + struct pci_dev *rar_dev;
177 + u32 registered;
178 + };
179 +
180 +/* this platform has only one rar_device for 3 rar regions */
181 +struct RAR_device my_rar_device;
182 +
183 +/* flag to indicatew whether or not this driver is registered;
184 + * this is for the entire driver and not just a device */
185 +int driver_registered;
186 +
187 +/* this data is for handling requests from other drivers which arrive
188 + * prior to this driver initializing
189 + */
190 +
191 +struct RAR_client clients[MRST_NUM_RAR];
192 +int num_clients;
193 +
194 +/* prototype for init */
195 +static int __init rar_init_handler(void);
196 +static void __exit rar_exit_handler(void);
197 +
198 +const struct pci_device_id rar_pci_id_tbl[] = {
199 + { PCI_DEVICE(VENDOR_ID, DEVICE_ID) },
200 + { 0 }
201 +};
202 +
203 +MODULE_DEVICE_TABLE(pci, rar_pci_id_tbl);
204 +
205 +/*
206 + * Function that is activated on the succesful probe of the RAR
207 + * device (Moorestown host controller).
208 + */
209 +static int rar_probe(struct pci_dev *dev, const struct pci_device_id *id);
210 +
211 +/* field for registering driver to PCI device */
212 +static struct pci_driver rar_pci_driver = {
213 + .name = "rar_register",
214 + .id_table = rar_pci_id_tbl,
215 + .probe = rar_probe
216 +};
217 +
218 +const struct pci_device_id *my_id_table = rar_pci_id_tbl;
219 +
220 +/*
221 + * This function is used to retrieved RAR info using the Lincroft
222 + * message bus interface.
223 + */
224 +static int memrar_get_rar_addr(struct pci_dev *pdev,
225 + int offset,
226 + u32 *addr)
227 +{
228 + /*
229 + * ======== The Lincroft Message Bus Interface ========
230 + * Lincroft registers may be obtained from the PCI
231 + * (the Host Bridge) using the Lincroft Message Bus
232 + * Interface. That message bus interface is generally
233 + * comprised of two registers: a control register (MCR, 0xDO)
234 + * and a data register (MDR, 0xD4).
235 + *
236 + * The MCR (message control register) format is the following:
237 + * 1. [31:24]: Opcode
238 + * 2. [23:16]: Port
239 + * 3. [15:8]: Register Offset
240 + * 4. [7:4]: Byte Enables (use 0xF to set all of these bits
241 + * to 1)
242 + * 5. [3:0]: reserved
243 + *
244 + * Read (0xD0) and write (0xE0) opcodes are written to the
245 + * control register when reading and writing to Lincroft
246 + * registers, respectively.
247 + *
248 + * We're interested in registers found in the Lincroft
249 + * B-unit. The B-unit port is 0x3.
250 + *
251 + * The six B-unit RAR register offsets we use are listed
252 + * earlier in this file.
253 + *
254 + * Lastly writing to the MCR register requires the "Byte
255 + * enables" bits to be set to 1. This may be achieved by
256 + * writing 0xF at bit 4.
257 + *
258 + * The MDR (message data register) format is the following:
259 + * 1. [31:0]: Read/Write Data
260 + *
261 + * Data being read from this register is only available after
262 + * writing the appropriate control message to the MCR
263 + * register.
264 + *
265 + * Data being written to this register must be written before
266 + * writing the appropriate control message to the MCR
267 + * register.
268 + */
269 +
270 + int result;
271 +
272 + /* Construct control message */
273 + u32 const message =
274 + (LNC_MESSAGE_READ_OPCODE << 24)
275 + | (LNC_BUNIT_PORT << 16)
276 + | (offset << 8)
277 + | (LNC_MESSAGE_BYTE_WRITE_ENABLES << 4);
278 +
279 + dev_dbg(&pdev->dev, "Offset for 'get' LNC MSG is %x\n", offset);
280 +
281 + if (addr == 0) {
282 + WARN_ON(1);
283 + return -EINVAL;
284 + }
285 +
286 + spin_lock_irqsave(&lnc_reg_lock, my_rar_device.lnc_reg_flags);
287 +
288 + /* Send the control message */
289 + result = pci_write_config_dword(pdev,
290 + LNC_MCR_OFFSET,
291 + message);
292 +
293 + dev_dbg(&pdev->dev,
294 + "Result from send ctl register is %x\n",
295 + result);
296 +
297 + if (!result) {
298 + result = pci_read_config_dword(pdev,
299 + LNC_MDR_OFFSET,
300 + addr);
301 +
302 + dev_dbg(&pdev->dev,
303 + "Result from read data register is %x\n",
304 + result);
305 +
306 + dev_dbg(&pdev->dev,
307 + "Value read from data register is %x\n",
308 + *addr);
309 + }
310 +
311 + spin_unlock_irqrestore(&lnc_reg_lock, my_rar_device.lnc_reg_flags);
312 +
313 + return result;
314 +}
315 +
316 +static int memrar_set_rar_addr(struct pci_dev *pdev,
317 + int offset,
318 + u32 addr)
319 +{
320 + /*
321 + * Data being written to this register must be written before
322 + * writing the appropriate control message to the MCR
323 + * register.
324 + *
325 + * @note See memrar_get_rar_addr() for a description of the
326 + * message bus interface being used here.
327 + */
328 +
329 + int result = 0;
330 +
331 + /* Construct control message */
332 + u32 const message =
333 + (LNC_MESSAGE_WRITE_OPCODE << 24)
334 + | (LNC_BUNIT_PORT << 16)
335 + | (offset << 8)
336 + | (LNC_MESSAGE_BYTE_WRITE_ENABLES << 4);
337 +
338 + if (addr == 0) {
339 + WARN_ON(1);
340 + return -EINVAL;
341 + }
342 +
343 + spin_lock_irqsave(&lnc_reg_lock, my_rar_device.lnc_reg_flags);
344 +
345 + dev_dbg(&pdev->dev,
346 + "Offset for 'set' LNC MSG is %x\n", offset);
347 +
348 + /* Send the control message */
349 + result = pci_write_config_dword(pdev,
350 + LNC_MDR_OFFSET,
351 + addr);
352 +
353 + dev_dbg(&pdev->dev,
354 + "Result from write data register is %x\n",
355 + result);
356 +
357 + if (!result) {
358 + dev_dbg(&pdev->dev,
359 + "Value written to data register is %x\n",
360 + addr);
361 +
362 + result = pci_write_config_dword(pdev,
363 + LNC_MCR_OFFSET,
364 + message);
365 +
366 + dev_dbg(&pdev->dev,
367 + "Result from send ctl register is %x\n",
368 + result);
369 + }
370 +
371 + spin_unlock_irqrestore(&lnc_reg_lock, my_rar_device.lnc_reg_flags);
372 +
373 + return result;
374 +}
375 +
376 +/*
377 + * Initialize RAR parameters, such as bus addresses, etc.
378 + */
379 +static int memrar_init_rar_params(struct pci_dev *pdev)
380 +{
381 + struct RAR_offsets const *end = my_rar_device.rar_offsets
382 + + MRST_NUM_RAR;
383 + struct RAR_offsets const *i;
384 + struct pci_dev *my_pdev;
385 + unsigned int n = 0;
386 + int result = 0;
387 +
388 + /* Retrieve RAR start and end bus addresses. */
389 +
390 + /*
391 + * Access the RAR registers through the Lincroft Message Bus
392 + * Interface on PCI device: 00:00.0 Host bridge.
393 + */
394 +
395 + /* struct pci_dev *pdev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0)); */
396 +
397 + my_pdev = pci_dev_get(pdev);
398 +
399 + if (my_pdev == NULL) {
400 + WARN_ON(1);
401 + return -ENODEV;
402 + }
403 +
404 + for (i = my_rar_device.rar_offsets; i != end; ++i, ++n) {
405 + if (memrar_get_rar_addr(my_pdev,
406 + i->low,
407 + &(my_rar_device.rar_addr[n].low)) != 0
408 + || memrar_get_rar_addr(my_pdev,
409 + i->high,
410 + &(my_rar_device.rar_addr[n].high))
411 + != 0) {
412 + result = -1;
413 + break;
414 + }
415 +
416 + /*
417 + * Only the upper 22 bits of the RAR addresses are
418 + * stored in their corresponding RAR registers so we
419 + * must set the lower 10 bits accordingly.
420 + *
421 + * The low address has its lower 10 bits cleared, and
422 + * the high address has all its lower 10 bits set,
423 + * e.g.:
424 + *
425 + * low = 0x2ffffc00
426 + * high = 0x3fffffff
427 + *
428 + * This is not arbitrary, and is actually how RAR
429 + * addressing/configuration works.
430 + */
431 + my_rar_device.rar_addr[n].low &= 0xfffffc00u;
432 +
433 + /*
434 + * Set bits 9:0 if the 1 KiB aligned (the upper 22
435 + * bits) high address is non-zero.
436 + *
437 + * Otherwise set all bits to zero since that indicates
438 + * no RAR address is configured.
439 + */
440 + if ((my_rar_device.rar_addr[n].high & 0xfffffc00u) == 0)
441 + my_rar_device.rar_addr[n].high = 0;
442 + else
443 + my_rar_device.rar_addr[n].high |= 0x3ffu;
444 + }
445 +
446 + /* Done accessing the device. */
447 + /* pci_dev_put(pdev); */
448 +
449 + if (result == 0) {
450 + size_t z;
451 + for (z = 0; z != MRST_NUM_RAR; ++z) {
452 + /*
453 + * "BRAR" refers to the RAR registers in the
454 + * Lincroft B-unit.
455 + */
456 + dev_info(&pdev->dev,
457 + "BRAR[%u] bus address range = "
458 + "[0x%08x, 0x%08x]\n",
459 + z,
460 + my_rar_device.rar_addr[z].low,
461 + my_rar_device.rar_addr[z].high);
462 + }
463 + }
464 +
465 + return result;
466 +}
467 +
468 +/*
469 + * This function registers the driver with the device subsystem (
470 + * either PCI, USB, etc).
471 +*/
472 +static int __init rar_init_handler(void)
473 +{
474 + return pci_register_driver(&rar_pci_driver);
475 +}
476 +
477 +static void __exit rar_exit_handler(void)
478 +{
479 + pci_unregister_driver(&rar_pci_driver);
480 +}
481 +
482 +module_init(rar_init_handler);
483 +module_exit(rar_exit_handler);
484 +
485 +MODULE_LICENSE("GPL");
486 +MODULE_DESCRIPTION("Intel Restricted Access Region Register Driver");
487 +
488 +/*
489 + * Function that is activaed on the succesful probe of the RAR device
490 + * (Moorestown host controller).
491 + */
492 +int rar_probe(struct pci_dev *dev, const struct pci_device_id *id)
493 +{
494 + int error;
495 + int counter;
496 +
497 + dev_dbg(&dev->dev,
498 + "PCI probe starting\n");
499 +
500 + /* enable the device */
501 + error = pci_enable_device(dev);
502 + if (error) {
503 + dev_err(&dev->dev,
504 + "Error enabling RAR register PCI device\n");
505 + goto end_function;
506 + }
507 +
508 + /* we have only one device; fill in the rar_device structure */
509 + my_rar_device.rar_dev = dev;
510 + my_rar_device.rar_flags = 0;
511 + my_rar_device.lnc_reg_flags = 0;
512 + my_rar_device.rar_offsets[0].low = LNC_BRAR0L;
513 + my_rar_device.rar_offsets[0].high = LNC_BRAR0H;
514 + my_rar_device.rar_offsets[1].low = LNC_BRAR1L;
515 + my_rar_device.rar_offsets[1].high = LNC_BRAR1H;
516 + my_rar_device.rar_offsets[2].low = LNC_BRAR2L;
517 + my_rar_device.rar_offsets[2].high = LNC_BRAR2H;
518 + my_rar_device.registered = 1;
519 +
520 + /*
521 + * Initialize the RAR parameters, which have to be retrieved */
522 + /* via the message bus interface.
523 + */
524 + error = memrar_init_rar_params(dev);
525 + if (error) {
526 + pci_disable_device(dev);
527 +
528 + dev_err(&dev->dev,
529 + "Error retrieving RAR addresses\n");
530 +
531 + goto end_function;
532 + }
533 +
534 + driver_registered = 1;
535 +
536 + /* now call anyone who has registered (using callbacks) */
537 + for (counter = 0; counter < num_clients; counter += 1) {
538 + if (!clients[counter].client_called) {
539 + error = (*clients[counter].client_callback)(
540 + clients[counter].customer_data);
541 + clients[counter].client_called = 1;
542 + dev_dbg(&my_rar_device.rar_dev->dev,
543 + "Callback called for %d\n",
544 + counter);
545 + }
546 + }
547 +
548 +end_function:
549 +
550 + return error;
551 +}
552 +
553 +
554 +/*
555 + * The rar_get_address function is used by other device drivers
556 + * to obtain RAR address information on a RAR. It takes three
557 + * parameters:
558 + *
559 + * int rar_index
560 + * The rar_index is an index to the rar for which you wish to retrieve
561 + * the address information.
562 + * Values can be 0,1, or 2.
563 + *
564 + * The function returns a 0 upon success or a -1 if there is no RAR
565 + * facility on this system.
566 + */
567 +int rar_get_address(int rar_index,
568 + u32 *start_address,
569 + u32 *end_address)
570 +{
571 + int result = -ENODEV;
572 +
573 + if (my_rar_device.registered) {
574 + if (start_address == 0
575 + || end_address == 0
576 + || rar_index >= MRST_NUM_RAR
577 + || rar_index < 0) {
578 + result = -EINVAL;
579 + } else {
580 + *start_address = my_rar_device.rar_addr[rar_index].low;
581 + *end_address = my_rar_device.rar_addr[rar_index].high;
582 + result = 0;
583 + }
584 + }
585 +
586 + return result;
587 +}
588 +EXPORT_SYMBOL(rar_get_address);
589 +
590 +/*
591 + * The rar_lock function is ued by other device drivers to lock an RAR.
592 + * once an RAR is locked, it stays locked until the next system reboot.
593 + * The function takes one parameter:
594 + *
595 + * int rar_index
596 + * The rar_index is an index to the rar that you want to lock.
597 + * Values can be 0,1, or 2.
598 + *
599 + * The function returns a 0 upon success or a -1 if there is no RAR
600 + * facility on this system.
601 + */
602 +int rar_lock(int rar_index)
603 +{
604 + int result = -ENODEV;
605 +
606 + if (rar_index >= MRST_NUM_RAR || rar_index < 0) {
607 + result = -EINVAL;
608 + goto exit_rar_lock;
609 + }
610 +
611 + spin_lock_irqsave(&rar_spinlock_lock, my_rar_device.rar_flags);
612 +
613 + if (my_rar_device.registered) {
614 +
615 + u32 low;
616 + u32 high;
617 +
618 + /*
619 + * Clear bits 4:0 in low register to lock.
620 + * Clear bits 8,4:0 in high register to lock.
621 + *
622 + * The rest of the lower 10 bits in both registers are
623 + * unused so we might as well clear them all.
624 + */
625 + if (rar_index == RAR_TYPE_VIDEO) {
626 + low = my_rar_device.rar_addr[rar_index].low &
627 + 0xfffffc00u;
628 + high = my_rar_device.rar_addr[rar_index].high &
629 + 0xfffffc00u;
630 + low |= 0x00000009;
631 + high |= 0x00000015;
632 + }
633 +
634 + else if (rar_index == RAR_TYPE_AUDIO) {
635 + low = my_rar_device.rar_addr[rar_index].low &
636 + 0xfffffc00u;
637 + high = my_rar_device.rar_addr[rar_index].high &
638 + 0xfffffc00u;
639 + low |= 0x00000008;
640 + high |= 0x00000018;
641 + }
642 +
643 + else {
644 + low = my_rar_device.rar_addr[rar_index].low &
645 + 0xfffffc00u;
646 + high = my_rar_device.rar_addr[rar_index].high &
647 + 0xfffffc00u;
648 + high |= 0x00000018;
649 + }
650 +
651 + /*
652 + * Now program the register using the Lincroft message
653 + * bus interface.
654 + */
655 + result = memrar_set_rar_addr(my_rar_device.rar_dev,
656 + my_rar_device.rar_offsets[rar_index].low,
657 + low);
658 +
659 + if (result == 0)
660 + result = memrar_set_rar_addr(
661 + my_rar_device.rar_dev,
662 + my_rar_device.rar_offsets[rar_index].high,
663 + high);
664 + }
665 +
666 + spin_unlock_irqrestore(&rar_spinlock_lock, my_rar_device.rar_flags);
667 +
668 +exit_rar_lock:
669 +
670 + return result;
671 +}
672 +EXPORT_SYMBOL(rar_lock);
673 +
674 +/* The register_rar function is to used by other device drivers
675 + * to ensure that this driver is ready. As we cannot be sure of
676 + * the compile/execute order of dirvers in ther kernel, it is
677 + * best to give this driver a callback function to call when
678 + * it is ready to give out addresses. The callback function
679 + * would have those steps that continue the initialization of
680 + * a driver that do require a valid RAR address. One of those
681 + * steps would be to call get_rar_address()
682 + * This function return 0 on success an -1 on failure.
683 + */
684 +int register_rar(int (*callback)(void *yourparameter), void *yourparameter)
685 +{
686 +
687 + int result;
688 +
689 + result = 0;
690 +
691 + if (driver_registered) {
692 +
693 + /* if the driver already registered, then we can simply
694 + call the callback right now */
695 +
696 + result = (*callback)(yourparameter);
697 + if (result) {
698 + dev_dbg(&my_rar_device.rar_dev->dev,
699 + "Immediate Callback failed: %x\n",
700 + result);
701 + } else {
702 + dev_dbg(&my_rar_device.rar_dev->dev,
703 + "Immediate Callback ran okay\n");
704 + }
705 +
706 + return result;
707 + }
708 +
709 + else if (num_clients >= MRST_NUM_RAR) {
710 + return -ENODEV;
711 + }
712 +
713 + else {
714 +
715 + clients[num_clients].client_callback = callback;
716 + clients[num_clients].customer_data = yourparameter;
717 + clients[num_clients].client_called = 0;
718 + num_clients += 1;
719 + dev_dbg(&my_rar_device.rar_dev->dev, "Callback registered\n");
720 + }
721 +
722 +return result;
723 +
724 +}
725 +EXPORT_SYMBOL(register_rar);
726 +
727 +/*
728 + Local Variables:
729 + c-file-style: "linux"
730 + End:
731 +*/
732 Index: linux-2.6.33/include/linux/rar/memrar.h
733 ===================================================================
734 --- /dev/null
735 +++ linux-2.6.33/include/linux/rar/memrar.h
736 @@ -0,0 +1,172 @@
737 +/*
738 + * RAR Handler (/dev/memrar) internal driver API.
739 + * Copyright (C) 2009 Intel Corporation. All rights reserved.
740 + *
741 + * This program is free software; you can redistribute it and/or
742 + * modify it under the terms of version 2 of the GNU General
743 + * Public License as published by the Free Software Foundation.
744 + *
745 + * This program is distributed in the hope that it will be
746 + * useful, but WITHOUT ANY WARRANTY; without even the implied
747 + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
748 + * PURPOSE. See the GNU General Public License for more details.
749 + * You should have received a copy of the GNU General Public
750 + * License along with this program; if not, write to the Free
751 + * Software Foundation, Inc., 59 Temple Place - Suite 330,
752 + * Boston, MA 02111-1307, USA.
753 + * The full GNU General Public License is included in this
754 + * distribution in the file called COPYING.
755 + */
756 +
757 +
758 +#ifndef _MEMRAR_H
759 +#define _MEMRAR_H
760 +
761 +#include <linux/ioctl.h>
762 +#include <linux/types.h>
763 +
764 +
765 +/*
766 + * Constants that specify different kinds of RAR regions that could be
767 + * set up.
768 + */
769 +static __u32 const RAR_TYPE_VIDEO; /* 0 */
770 +static __u32 const RAR_TYPE_AUDIO = 1;
771 +static __u32 const RAR_TYPE_IMAGE = 2;
772 +static __u32 const RAR_TYPE_DATA = 3;
773 +
774 +/*
775 + * @struct RAR_stat
776 + *
777 + * @brief This structure is used for @c RAR_HANDLER_STAT ioctl and for
778 + * @c RAR_get_stat() user space wrapper function.
779 + */
780 +struct RAR_stat {
781 + /* Type of RAR memory (e.g., audio vs. video) */
782 + __u32 type;
783 +
784 + /*
785 + * Total size of RAR memory region.
786 + */
787 + __u32 capacity;
788 +
789 + /* Size of the largest reservable block. */
790 + __u32 largest_block_size;
791 +};
792 +
793 +
794 +/*
795 + * @struct RAR_block_info
796 + *
797 + * @brief The argument for the @c RAR_HANDLER_RESERVE @c ioctl.
798 + *
799 + */
800 +struct RAR_block_info {
801 + /* Type of RAR memory (e.g., audio vs. video) */
802 + __u32 type;
803 +
804 + /* Requested size of a block to be reserved in RAR. */
805 + __u32 size;
806 +
807 + /* Handle that can be used to refer to reserved block. */
808 + __u32 handle;
809 +};
810 +
811 +/*
812 + * @struct RAR_buffer
813 + *
814 + * Structure that contains all information related to a given block of
815 + * memory in RAR. It is generally only used when retrieving bus
816 + * addresses.
817 + *
818 + * @note This structure is used only by RAR-enabled drivers, and is
819 + * not intended to be exposed to the user space.
820 + */
821 +struct RAR_buffer {
822 + /* Structure containing base RAR buffer information */
823 + struct RAR_block_info info;
824 +
825 + /* Buffer bus address */
826 + __u32 bus_address;
827 +};
828 +
829 +
830 +#define RAR_IOCTL_BASE 0xE0
831 +
832 +/* Reserve RAR block. */
833 +#define RAR_HANDLER_RESERVE _IOWR(RAR_IOCTL_BASE, 0x00, struct RAR_block_info)
834 +
835 +/* Release previously reserved RAR block. */
836 +#define RAR_HANDLER_RELEASE _IOW(RAR_IOCTL_BASE, 0x01, __u32)
837 +
838 +/* Get RAR stats. */
839 +#define RAR_HANDLER_STAT _IOWR(RAR_IOCTL_BASE, 0x02, struct RAR_stat)
840 +
841 +
842 +/* -------------------------------------------------------------- */
843 +/* Kernel Side RAR Handler Interface */
844 +/* -------------------------------------------------------------- */
845 +
846 +/*
847 + * @function rar_reserve
848 + *
849 + * @brief Reserve RAR buffers.
850 + *
851 + * This function will reserve buffers in the restricted access regions
852 + * of given types.
853 + *
854 + * @return Number of successfully reserved buffers.
855 + * Successful buffer reservations will have the corresponding
856 + * @c bus_address field set to a non-zero value in the
857 + * given @a buffers vector.
858 + */
859 +extern size_t rar_reserve(struct RAR_buffer *buffers,
860 + size_t count);
861 +
862 +/*
863 + * @function rar_release
864 + *
865 + * @brief Release RAR buffers retrieved through call to
866 + * @c rar_reserve() or @c rar_handle_to_bus().
867 + *
868 + * This function will release RAR buffers that were retrieved through
869 + * a call to @c rar_reserve() or @c rar_handle_to_bus() by
870 + * decrementing the reference count. The RAR buffer will be reclaimed
871 + * when the reference count drops to zero.
872 + *
873 + * @return Number of successfully released buffers.
874 + * Successful releases will have their handle field set to
875 + * zero in the given @a buffers vector.
876 + */
877 +extern size_t rar_release(struct RAR_buffer *buffers,
878 + size_t count);
879 +
880 +/*
881 + * @function rar_handle_to_bus
882 + *
883 + * @brief Convert a vector of RAR handles to bus addresses.
884 + *
885 + * This function will retrieve the RAR buffer bus addresses, type and
886 + * size corresponding to the RAR handles provided in the @a buffers
887 + * vector.
888 + *
889 + * @return Number of successfully converted buffers.
890 + * The bus address will be set to @c 0 for unrecognized
891 + * handles.
892 + *
893 + * @note The reference count for each corresponding buffer in RAR will
894 + * be incremented. Call @c rar_release() when done with the
895 + * buffers.
896 + */
897 +extern size_t rar_handle_to_bus(struct RAR_buffer *buffers,
898 + size_t count);
899 +
900 +
901 +#endif /* _MEMRAR_H */
902 +
903 +
904 +/*
905 + Local Variables:
906 + c-file-style: "linux"
907 + End:
908 +*/
909 Index: linux-2.6.33/include/linux/rar/rar_register.h
910 ===================================================================
911 --- /dev/null
912 +++ linux-2.6.33/include/linux/rar/rar_register.h
913 @@ -0,0 +1,79 @@
914 +/*
915 + * Copyright (C) 2008, 2009 Intel Corporation. All rights reserved.
916 + *
917 + * This program is free software; you can redistribute it and/or
918 + * modify it under the terms of version 2 of the GNU General
919 + * Public License as published by the Free Software Foundation.
920 + *
921 + * This program is distributed in the hope that it will be
922 + * useful, but WITHOUT ANY WARRANTY; without even the implied
923 + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
924 + * PURPOSE. See the GNU General Public License for more details.
925 + * You should have received a copy of the GNU General Public
926 + * License along with this program; if not, write to the Free
927 + * Software Foundation, Inc., 59 Temple Place - Suite 330,
928 + * Boston, MA 02111-1307, USA.
929 + * The full GNU General Public License is included in this
930 + * distribution in the file called COPYING.
931 + */
932 +
933 +
934 +#ifndef _RAR_REGISTER_H
935 +#define _RAR_REGISTER_H
936 +
937 +# include <linux/types.h>
938 +
939 +/* The register_rar function is to used by other device drivers
940 + * to ensure that this driver is ready. As we cannot be sure of
941 + * the compile/execute order of dirvers in ther kernel, it is
942 + * best to give this driver a callback function to call when
943 + * it is ready to give out addresses. The callback function
944 + * would have those steps that continue the initialization of
945 + * a driver that do require a valid RAR address. One of those
946 + * steps would be to call get_rar_address()
947 + * This function return 0 on success an -1 on failure.
948 + */
949 +int register_rar(int (*callback)(void *yourparameter), void *yourparameter);
950 +
951 +/* The get_rar_address function is used by other device drivers
952 + * to obtain RAR address information on a RAR. It takes two
953 + * parameter:
954 + *
955 + * int rar_index
956 + * The rar_index is an index to the rar for which you wish to retrieve
957 + * the address information.
958 + * Values can be 0,1, or 2.
959 + *
960 + * struct RAR_address_struct is a pointer to a place to which the function
961 + * can return the address structure for the RAR.
962 + *
963 + * The function returns a 0 upon success or a -1 if there is no RAR
964 + * facility on this system.
965 + */
966 +int rar_get_address(int rar_index,
967 + u32 *start_address,
968 + u32 *end_address);
969 +
970 +
971 +/* The lock_rar function is ued by other device drivers to lock an RAR.
972 + * once an RAR is locked, it stays locked until the next system reboot.
973 + * The function takes one parameter:
974 + *
975 + * int rar_index
976 + * The rar_index is an index to the rar that you want to lock.
977 + * Values can be 0,1, or 2.
978 + *
979 + * The function returns a 0 upon success or a -1 if there is no RAR
980 + * facility on this system.
981 + */
982 +int rar_lock(int rar_index);
983 +
984 +
985 +#endif /* _RAR_REGISTER_H */
986 +
987 +
988 +/*
989 + Local Variables:
990 + c-file-style: "linux"
991 + End:
992 +*/
993 Index: linux-2.6.33/drivers/misc/Kconfig
994 ===================================================================
995 --- linux-2.6.33.orig/drivers/misc/Kconfig
996 +++ linux-2.6.33/drivers/misc/Kconfig
997 @@ -249,6 +249,17 @@ config SGI_GRU_DEBUG
998 This option enables addition debugging code for the SGI GRU driver. If
999 you are unsure, say N.
1000
1001 +config MRST_RAR_HANDLER
1002 + tristate "RAR handler driver for Intel Moorestown platform"
1003 + depends on X86
1004 + select RAR_REGISTER
1005 + ---help---
1006 + This driver provides a memory management interface to
1007 + restricted access regions available in the Intel Moorestown
1008 + platform.
1009 +
1010 + If unsure, say N.
1011 +
1012 config MRST_VIB
1013 tristate "vibrator driver for Intel Moorestown platform"
1014 help
1015 Index: linux-2.6.33/drivers/misc/Makefile
1016 ===================================================================
1017 --- linux-2.6.33.orig/drivers/misc/Makefile
1018 +++ linux-2.6.33/drivers/misc/Makefile
1019 @@ -22,6 +22,8 @@ obj-$(CONFIG_CS5535_MFGPT) += cs5535-mfg
1020 obj-$(CONFIG_HP_ILO) += hpilo.o
1021 obj-$(CONFIG_MRST) += intel_mrst.o
1022 obj-$(CONFIG_ISL29003) += isl29003.o
1023 +obj-$(CONFIG_MRST_RAR_HANDLER) += memrar.o
1024 +memrar-y := memrar_allocator.o memrar_handler.o
1025 obj-$(CONFIG_MRST_VIB) += mrst_vib.o
1026 obj-$(CONFIG_EP93XX_PWM) += ep93xx_pwm.o
1027 obj-$(CONFIG_DS1682) += ds1682.o
1028 Index: linux-2.6.33/drivers/misc/memrar_allocator.c
1029 ===================================================================
1030 --- /dev/null
1031 +++ linux-2.6.33/drivers/misc/memrar_allocator.c
1032 @@ -0,0 +1,374 @@
1033 +/*
1034 + * memrar_allocator 0.2: An allocator for Intel RAR.
1035 + *
1036 + * Copyright (C) 2009 Intel Corporation. All rights reserved.
1037 + *
1038 + * This program is free software; you can redistribute it and/or
1039 + * modify it under the terms of version 2 of the GNU General
1040 + * Public License as published by the Free Software Foundation.
1041 + *
1042 + * This program is distributed in the hope that it will be
1043 + * useful, but WITHOUT ANY WARRANTY; without even the implied
1044 + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
1045 + * PURPOSE. See the GNU General Public License for more details.
1046 + * You should have received a copy of the GNU General Public
1047 + * License along with this program; if not, write to the Free
1048 + * Software Foundation, Inc., 59 Temple Place - Suite 330,
1049 + * Boston, MA 02111-1307, USA.
1050 + * The full GNU General Public License is included in this
1051 + * distribution in the file called COPYING.
1052 + *
1053 + *
1054 + * ------------------------------------------------------------------
1055 + *
1056 + * This simple allocator implementation provides a
1057 + * malloc()/free()-like interface for reserving space within a
1058 + * previously reserved block of memory. It is not specific to
1059 + * any hardware, nor is it coupled with the lower level paging
1060 + * mechanism.
1061 + *
1062 + * The primary goal of this implementation is to provide a means
1063 + * to partition an arbitrary block of memory without actually
1064 + * accessing the memory or incurring any hardware side-effects
1065 + * (e.g. paging). It is, in effect, a bookkeeping mechanism for
1066 + * buffers.
1067 + */
1068 +
1069 +
1070 +#include "memrar_allocator.h"
1071 +#include <linux/slab.h>
1072 +#include <linux/bug.h>
1073 +#include <linux/kernel.h>
1074 +
1075 +
1076 +struct memrar_allocator *memrar_create_allocator(unsigned long base,
1077 + size_t capacity,
1078 + size_t block_size)
1079 +{
1080 + struct memrar_allocator *allocator = NULL;
1081 + struct memrar_free_list *first_node = NULL;
1082 +
1083 + /*
1084 + * Make sure the base address is aligned on a block_size
1085 + * boundary.
1086 + *
1087 + * @todo Is this necessary?
1088 + */
1089 + /* base = ALIGN(base, block_size); */
1090 +
1091 + /* Validate parameters.
1092 + *
1093 + * Make sure we can allocate the entire memory allocator
1094 + * space. Zero capacity or block size are obviously invalid.
1095 + */
1096 + if (base == 0
1097 + || capacity == 0
1098 + || block_size == 0
1099 + || ULONG_MAX - capacity < base
1100 + || capacity < block_size)
1101 + return allocator;
1102 +
1103 + /*
1104 + * There isn't much point in creating a memory allocator that
1105 + * is only capable of holding one block but we'll allow it,
1106 + * and issue a diagnostic.
1107 + */
1108 + WARN(capacity < block_size * 2,
1109 + "memrar: Only one block available to allocator.\n");
1110 +
1111 + allocator = kmalloc(sizeof(*allocator), GFP_KERNEL);
1112 +
1113 + if (allocator == NULL)
1114 + return allocator;
1115 +
1116 + mutex_init(&allocator->lock);
1117 + allocator->base = base;
1118 +
1119 + /* Round the capacity down to a multiple of block_size. */
1120 + allocator->capacity = (capacity / block_size) * block_size;
1121 +
1122 + allocator->block_size = block_size;
1123 +
1124 + allocator->largest_free_area = allocator->capacity;
1125 +
1126 + /* Initialize the handle and free lists. */
1127 + INIT_LIST_HEAD(&allocator->handle_list.list);
1128 + INIT_LIST_HEAD(&allocator->free_list.list);
1129 +
1130 + first_node = kmalloc(sizeof(*first_node), GFP_KERNEL);
1131 + if (first_node == NULL) {
1132 + kfree(allocator);
1133 + allocator = NULL;
1134 + } else {
1135 + /* Full range of blocks is available. */
1136 + first_node->begin = base;
1137 + first_node->end = base + allocator->capacity;
1138 + list_add(&first_node->list,
1139 + &allocator->free_list.list);
1140 + }
1141 +
1142 + return allocator;
1143 +}
1144 +
1145 +void memrar_destroy_allocator(struct memrar_allocator *allocator)
1146 +{
1147 + /*
1148 + * Assume that the memory allocator lock isn't held at this
1149 + * point in time. Caller must ensure that.
1150 + */
1151 +
1152 + struct memrar_free_list *pos;
1153 + struct memrar_free_list *n;
1154 +
1155 + if (allocator == NULL)
1156 + return;
1157 +
1158 + mutex_lock(&allocator->lock);
1159 +
1160 + /* Reclaim free list resources. */
1161 + list_for_each_entry_safe(pos,
1162 + n,
1163 + &allocator->free_list.list,
1164 + list) {
1165 + list_del(&pos->list);
1166 + kfree(pos);
1167 + }
1168 +
1169 + mutex_unlock(&allocator->lock);
1170 +
1171 + kfree(allocator);
1172 +}
1173 +
1174 +unsigned long memrar_allocator_alloc(struct memrar_allocator *allocator,
1175 + size_t size)
1176 +{
1177 + struct memrar_free_list *pos = NULL;
1178 +
1179 + size_t num_blocks;
1180 + unsigned long reserved_bytes;
1181 +
1182 + /*
1183 + * Address of allocated buffer. We assume that zero is not a
1184 + * valid address.
1185 + */
1186 + unsigned long addr = 0;
1187 +
1188 + if (allocator == NULL || size == 0)
1189 + return addr;
1190 +
1191 + /* Reserve enough blocks to hold the amount of bytes requested. */
1192 + num_blocks = DIV_ROUND_UP(size, allocator->block_size);
1193 +
1194 + reserved_bytes = num_blocks * allocator->block_size;
1195 +
1196 + mutex_lock(&allocator->lock);
1197 +
1198 + if (reserved_bytes > allocator->largest_free_area) {
1199 + mutex_unlock(&allocator->lock);
1200 + return addr;
1201 + }
1202 +
1203 + /*
1204 + * Iterate through the free list to find a suitably sized
1205 + * range of free contiguous memory blocks.
1206 + */
1207 + list_for_each_entry(pos, &allocator->free_list.list, list) {
1208 + size_t const curr_size = pos->end - pos->begin;
1209 +
1210 + if (curr_size >= reserved_bytes) {
1211 + struct memrar_handle *handle = NULL;
1212 + struct memrar_handle_list * const new_node =
1213 + kmalloc(sizeof(*new_node), GFP_KERNEL);
1214 +
1215 + if (new_node == NULL)
1216 + break;
1217 +
1218 + list_add(&new_node->list,
1219 + &allocator->handle_list.list);
1220 +
1221 + handle = &new_node->handle;
1222 + handle->end = pos->end;
1223 + pos->end -= reserved_bytes;
1224 + handle->begin = pos->end;
1225 + addr = handle->begin;
1226 +
1227 + if (curr_size == allocator->largest_free_area)
1228 + allocator->largest_free_area -=
1229 + reserved_bytes;
1230 +
1231 + break;
1232 + }
1233 + }
1234 +
1235 + mutex_unlock(&allocator->lock);
1236 +
1237 + return addr;
1238 +}
1239 +
1240 +long memrar_allocator_free(struct memrar_allocator *allocator,
1241 + unsigned long addr)
1242 +{
1243 + struct list_head *pos = NULL;
1244 + struct list_head *tmp = NULL;
1245 + struct memrar_handle_list *handles = NULL;
1246 + struct memrar_handle *handle = NULL;
1247 + struct memrar_free_list *new_node = NULL;
1248 + int result = -ENOMEM;
1249 +
1250 + if (allocator == NULL)
1251 + return -EINVAL;
1252 +
1253 + if (addr == 0)
1254 + return 0; /* Ignore free(0). */
1255 +
1256 + mutex_lock(&allocator->lock);
1257 +
1258 + /* Find the corresponding handle. */
1259 + list_for_each_entry(handles,
1260 + &allocator->handle_list.list,
1261 + list) {
1262 + if (handles->handle.begin == addr) {
1263 + handle = &handles->handle;
1264 + break;
1265 + }
1266 + }
1267 +
1268 + /* No such buffer created by this allocator. */
1269 + if (handle == NULL) {
1270 + mutex_unlock(&allocator->lock);
1271 + return -EFAULT;
1272 + }
1273 +
1274 + /*
1275 + * Coalesce adjacent chunks of memory if possible.
1276 + *
1277 + * @note This isn't full blown coalescing since we're only
1278 + * coalescing at most three chunks of memory.
1279 + */
1280 + list_for_each_safe(pos, tmp, &allocator->free_list.list) {
1281 + /* @todo O(n) performance. Optimize. */
1282 +
1283 + struct memrar_free_list * const chunk =
1284 + list_entry(pos,
1285 + struct memrar_free_list,
1286 + list);
1287 +
1288 + struct memrar_free_list * const next =
1289 + list_entry(pos->next,
1290 + struct memrar_free_list,
1291 + list);
1292 +
1293 + /* Extend size of existing free adjacent chunk. */
1294 + if (chunk->end == handle->begin) {
1295 + /*
1296 + * Chunk "less than" than the one we're
1297 + * freeing is adjacent.
1298 + */
1299 +
1300 + unsigned long new_chunk_size;
1301 +
1302 + chunk->end = handle->end;
1303 +
1304 + /*
1305 + * Now check if next free chunk is adjacent to
1306 + * the current extended free chunk.
1307 + */
1308 + if (pos != pos->next
1309 + && chunk->end == next->begin) {
1310 + chunk->end = next->end;
1311 + list_del(pos->next);
1312 + kfree(next);
1313 + }
1314 +
1315 + new_chunk_size = chunk->end - chunk->begin;
1316 +
1317 + if (new_chunk_size > allocator->largest_free_area)
1318 + allocator->largest_free_area =
1319 + new_chunk_size;
1320 +
1321 + result = 0;
1322 + goto exit_memrar_free;
1323 + } else if (chunk->begin == handle->end) {
1324 + /*
1325 + * Chunk "greater than" than the one we're
1326 + * freeing is adjacent.
1327 + */
1328 +
1329 + unsigned long new_chunk_size;
1330 +
1331 + chunk->begin = handle->begin;
1332 +
1333 + /*
1334 + * Now check if next free chunk is adjacent to
1335 + * the current extended free chunk.
1336 + */
1337 + if (pos != pos->next
1338 + && chunk->begin == next->end) {
1339 + chunk->begin = next->begin;
1340 + list_del(pos->next);
1341 + kfree(next);
1342 + }
1343 +
1344 + new_chunk_size = chunk->end - chunk->begin;
1345 +
1346 + if (new_chunk_size > allocator->largest_free_area)
1347 + allocator->largest_free_area =
1348 + new_chunk_size;
1349 +
1350 + result = 0;
1351 + goto exit_memrar_free;
1352 + }
1353 + }
1354 +
1355 + /*
1356 + * Memory being freed is not adjacent to existing free areas
1357 + * of memory in the allocator. Add a new item to the free list.
1358 + *
1359 + * @todo Allocate this free_list node when the buffer itself
1360 + * is allocated to avoid a potential problem where a new
1361 + * node cannot be allocated due to lack of available
1362 + * kernel memory. We can then free this node in the
1363 + * above coalescing code node if it isn't needed.
1364 + *
1365 + * @todo While making this change would address potential
1366 + * memory allocation failure, it would also
1367 + * unfortunately reduce performance of buffer allocation
1368 + * provided by this allocator.
1369 + */
1370 + new_node = kmalloc(sizeof(*new_node), GFP_KERNEL);
1371 + if (new_node != NULL) {
1372 + unsigned long new_chunk_size;
1373 +
1374 + new_node->begin = handle->begin;
1375 + new_node->end = handle->end;
1376 + list_add(&new_node->list,
1377 + &allocator->free_list.list);
1378 +
1379 + new_chunk_size = handle->end - handle->begin;
1380 +
1381 + if (new_chunk_size > allocator->largest_free_area)
1382 + allocator->largest_free_area =
1383 + new_chunk_size;
1384 +
1385 + result = 0;
1386 + }
1387 +
1388 +exit_memrar_free:
1389 +
1390 + if (result == 0)
1391 + list_del(&handles->list);
1392 +
1393 + mutex_unlock(&allocator->lock);
1394 +
1395 + kfree(handles);
1396 +
1397 + return result;
1398 +}
1399 +
1400 +
1401 +
1402 +/*
1403 + Local Variables:
1404 + c-file-style: "linux"
1405 + End:
1406 +*/
1407 Index: linux-2.6.33/drivers/misc/memrar_allocator.h
1408 ===================================================================
1409 --- /dev/null
1410 +++ linux-2.6.33/drivers/misc/memrar_allocator.h
1411 @@ -0,0 +1,165 @@
1412 +/*
1413 + * Copyright (C) 2009 Intel Corporation. All rights reserved.
1414 + *
1415 + * This program is free software; you can redistribute it and/or
1416 + * modify it under the terms of version 2 of the GNU General
1417 + * Public License as published by the Free Software Foundation.
1418 + *
1419 + * This program is distributed in the hope that it will be
1420 + * useful, but WITHOUT ANY WARRANTY; without even the implied
1421 + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
1422 + * PURPOSE. See the GNU General Public License for more details.
1423 + * You should have received a copy of the GNU General Public
1424 + * License along with this program; if not, write to the Free
1425 + * Software Foundation, Inc., 59 Temple Place - Suite 330,
1426 + * Boston, MA 02111-1307, USA.
1427 + * The full GNU General Public License is included in this
1428 + * distribution in the file called COPYING.
1429 + */
1430 +
1431 +#ifndef MEMRAR_ALLOCATOR_H
1432 +#define MEMRAR_ALLOCATOR_H
1433 +
1434 +
1435 +#include <linux/mutex.h>
1436 +#include <linux/list.h>
1437 +#include <linux/types.h>
1438 +#include <linux/kernel.h>
1439 +
1440 +/*
1441 + * @struct memrar_free_list
1442 + *
1443 + * @brief List of available areas of memory.
1444 + */
1445 +struct memrar_free_list {
1446 + /* Linked list of free memory allocator blocks. */
1447 + struct list_head list;
1448 +
1449 + /* Beginning of available address range. */
1450 + unsigned long begin;
1451 +
1452 + /*
1453 + * End of available address range, one past the end,
1454 + * i.e. [begin, end).
1455 + */
1456 + unsigned long end;
1457 +};
1458 +
1459 +struct memrar_allocator;
1460 +
1461 +/* Structure that describes a chunk memory reserved by the allocator. */
1462 +struct memrar_handle {
1463 + /* Beginning of available address range. */
1464 + unsigned long begin;
1465 +
1466 + /*
1467 + * End of available address range, one past the end,
1468 + * i.e. [begin, end).
1469 + */
1470 + unsigned long end;
1471 +};
1472 +
1473 +/*
1474 + * @struct memrar_handle_list
1475 + *
1476 + * @brief List of handles corresponding to allocated blocks of memory.
1477 + */
1478 +struct memrar_handle_list {
1479 + /* Linked list of handles corresponding to allocated blocks. */
1480 + struct list_head list;
1481 +
1482 + /* Handle for the allocated block of memory. */
1483 + struct memrar_handle handle;
1484 +};
1485 +
1486 +/*
1487 + * @struct memrar_allocator
1488 + *
1489 + * @brief Encapsulation of the memory allocator state.
1490 + *
1491 + * This structure contains all memory allocator state, including the
1492 + * base address, capacity, free list, lock, etc.
1493 + */
1494 +struct memrar_allocator {
1495 + /*
1496 + * Lock used to synchronize access to the memory allocator
1497 + * state.
1498 + */
1499 + struct mutex lock;
1500 +
1501 + /* Base (start) address of the memory allocator. */
1502 + unsigned long base;
1503 +
1504 + /* Size of the memory allocator in bytes. */
1505 + size_t capacity;
1506 +
1507 + /*
1508 + * The size in bytes of individual blocks within the memory
1509 + * allocator.
1510 + */
1511 + size_t block_size;
1512 +
1513 + /* Largest free area of memory in the allocator in bytes. */
1514 + size_t largest_free_area;
1515 +
1516 + /* List of handles for allocated blocks of memory. */
1517 + struct memrar_handle_list handle_list;
1518 +
1519 + /* List of free address ranges. */
1520 + struct memrar_free_list free_list;
1521 +};
1522 +
1523 +/*
1524 + * @function memrar_create_allocator
1525 + *
1526 + * @brief Create a memory allocator.
1527 + *
1528 + * Create a memory allocator with the given capacity and block size.
1529 + * The capacity will be reduced to be a multiple of the block size, if
1530 + * necessary.
1531 + *
1532 + * @param base Address at which the memory allocator begins.
1533 + * @param capacity Desired size of the memory allocator. This value
1534 + * must be larger than the block_size, ideally more
1535 + * than twice as large since there wouldn't be much
1536 + * point in using a memory allocator otherwise.
1537 + * @param block_size The size of individual blocks within the memory
1538 + * allocator. This value must smaller than the
1539 + * capacity.
1540 + * @return An instance of the memory allocator, if creation succeeds.
1541 + * @return Zero if creation fails. Failure may occur if not enough
1542 + * kernel memory exists to create the memrar_allocator
1543 + * instance itself, or if the capacity and block_size
1544 + * arguments are not compatible or make sense.
1545 + */
1546 +struct memrar_allocator *memrar_create_allocator(unsigned long base,
1547 + size_t capacity,
1548 + size_t block_size);
1549 +
1550 +/*
1551 + * Reclaim resources held by the memory allocator. The caller must
1552 + * explicitly free all memory reserved by memrar_allocator_alloc()
1553 + * prior to calling this function. Otherwise leaks will occur.
1554 + */
1555 +void memrar_destroy_allocator(struct memrar_allocator *allocator);
1556 +
1557 +/*
1558 + * Reserve chunk of memory of given size in the memory allocator.
1559 + */
1560 +unsigned long memrar_allocator_alloc(struct memrar_allocator *allocator,
1561 + size_t size);
1562 +
1563 +/*
1564 + * Reserve chunk of memory of given size in the memory allocator.
1565 + */
1566 +long memrar_allocator_free(struct memrar_allocator *allocator,
1567 + unsigned long handle);
1568 +
1569 +#endif /* MEMRAR_ALLOCATOR_H */
1570 +
1571 +
1572 +/*
1573 + Local Variables:
1574 + c-file-style: "linux"
1575 + End:
1576 +*/
1577 Index: linux-2.6.33/drivers/misc/memrar_handler.c
1578 ===================================================================
1579 --- /dev/null
1580 +++ linux-2.6.33/drivers/misc/memrar_handler.c
1581 @@ -0,0 +1,929 @@
1582 +/*
1583 + * memrar_handler 1.0: An Intel restricted access region handler device
1584 + *
1585 + * Copyright (C) 2009 Intel Corporation. All rights reserved.
1586 + *
1587 + * This program is free software; you can redistribute it and/or
1588 + * modify it under the terms of version 2 of the GNU General
1589 + * Public License as published by the Free Software Foundation.
1590 + *
1591 + * This program is distributed in the hope that it will be
1592 + * useful, but WITHOUT ANY WARRANTY; without even the implied
1593 + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
1594 + * PURPOSE. See the GNU General Public License for more details.
1595 + * You should have received a copy of the GNU General Public
1596 + * License along with this program; if not, write to the Free
1597 + * Software Foundation, Inc., 59 Temple Place - Suite 330,
1598 + * Boston, MA 02111-1307, USA.
1599 + * The full GNU General Public License is included in this
1600 + * distribution in the file called COPYING.
1601 + *
1602 + * -------------------------------------------------------------------
1603 + *
1604 + * Moorestown restricted access regions (RAR) provide isolated
1605 + * areas of main memory that are only acceessible by authorized
1606 + * devices.
1607 + *
1608 + * The Intel Moorestown RAR handler module exposes a kernel space
1609 + * RAR memory management mechanism. It is essentially a
1610 + * RAR-specific allocator.
1611 + *
1612 + * Besides providing RAR buffer management, the RAR handler also
1613 + * behaves in many ways like an OS virtual memory manager. For
1614 + * example, the RAR "handles" created by the RAR handler are
1615 + * analogous to user space virtual addresses.
1616 + *
1617 + * RAR memory itself is never accessed directly by the RAR
1618 + * handler.
1619 + *
1620 + * -------------------------------------------------------------------
1621 + *
1622 + * TODO
1623 + *
1624 + * 1. Split user space interface from core/kernel code, e.g.:
1625 + * memrar_handler.c -> memrar_core.c, memrar_user.c
1626 + *
1627 + * 2. Convert API documentation to Kerneldoc.
1628 + *
1629 + * 3. Move memrar_allocator.* to kernel lib' directory since it
1630 + * is HW neutral.
1631 + * a. Alternatively, use lib/genalloc.c instead.
1632 + * b. A kernel port of Doug Lea's malloc() implementation may
1633 + * also be an option.
1634 + */
1635 +
1636 +#include <linux/miscdevice.h>
1637 +#include <linux/fs.h>
1638 +#include <linux/slab.h>
1639 +#include <linux/kref.h>
1640 +#include <linux/mutex.h>
1641 +#include <linux/kernel.h>
1642 +#include <linux/uaccess.h>
1643 +#include <linux/mm.h>
1644 +#include <linux/ioport.h>
1645 +#include <linux/io.h>
1646 +
1647 +#include <linux/rar/rar_register.h>
1648 +#include <linux/rar/memrar.h>
1649 +
1650 +#include "memrar_allocator.h"
1651 +
1652 +
1653 +#define MEMRAR_VER "1.0"
1654 +
1655 +/*
1656 + * Moorestown supports three restricted access regions.
1657 + *
1658 + * We only care about the first two, video and audio. The third,
1659 + * reserved for Chaabi and the P-unit, will be handled by their
1660 + * respective drivers.
1661 + */
1662 +#define MRST_NUM_RAR 2
1663 +
1664 +/* ---------------- -------------------- ------------------- */
1665 +
1666 +/*
1667 + * List structure that keeps track of all RAR buffers.
1668 + */
1669 +struct memrar_buffer_info {
1670 + /* Linked list of memrar_buffer_info objects. */
1671 + struct list_head list;
1672 +
1673 + /* Core RAR buffer information. */
1674 + struct RAR_buffer buffer;
1675 +
1676 + /* Reference count */
1677 + struct kref refcount;
1678 +
1679 + /*
1680 + * File handle corresponding to process that reserved the
1681 + * block of memory in RAR. This will be zero for buffers
1682 + * allocated by other drivers instead of by a user space
1683 + * process.
1684 + */
1685 + struct file *owner;
1686 +};
1687 +
1688 +/*
1689 + * Structure that describes that characteristics of a given RAR.
1690 + */
1691 +struct memrar_rar_info {
1692 + /* Base bus address of the RAR. */
1693 + unsigned long base;
1694 +
1695 + /* Length of the RAR. */
1696 + unsigned long length;
1697 +
1698 + /* Virtual address of RAR mapped into kernel. */
1699 + void __iomem *iobase;
1700 +
1701 + /*
1702 + * Allocator associated with the RAR.
1703 + *
1704 + * @note The allocator "capacity" may be smaller than the RAR
1705 + * length if the length is not a multiple of the
1706 + * configured allocator block size.
1707 + */
1708 + struct memrar_allocator *allocator;
1709 +
1710 + /*
1711 + * Table that keeps track of all reserved RAR buffers.
1712 + */
1713 + struct memrar_buffer_info buffers;
1714 +
1715 + /*
1716 + * Lock used to synchronize access to RAR-specific data
1717 + * structures.
1718 + */
1719 + struct mutex lock;
1720 +};
1721 +
1722 +/*
1723 + * Array of RAR characteristics.
1724 + */
1725 +static struct memrar_rar_info memrars[MRST_NUM_RAR];
1726 +
1727 +
1728 +/* ---------------- -------------------- ------------------- */
1729 +
1730 +/* Validate RAR type. */
1731 +static inline int memrar_is_valid_rar_type(u32 type)
1732 +{
1733 + return type == RAR_TYPE_VIDEO || type == RAR_TYPE_AUDIO;
1734 +}
1735 +
1736 +/* Check if an address/handle falls with the given RAR memory range. */
1737 +static inline int memrar_handle_in_range(struct memrar_rar_info *rar,
1738 + u32 vaddr)
1739 +{
1740 + unsigned long const iobase = (unsigned long) (rar->iobase);
1741 + return (vaddr >= iobase && vaddr < iobase + rar->length);
1742 +}
1743 +
1744 +/* Retrieve RAR information associated with the given handle. */
1745 +static struct memrar_rar_info *memrar_get_rar_info(u32 vaddr)
1746 +{
1747 + int i;
1748 + for (i = 0; i < MRST_NUM_RAR; ++i) {
1749 + struct memrar_rar_info * const rar = &memrars[i];
1750 + if (memrar_handle_in_range(rar, vaddr))
1751 + return rar;
1752 + }
1753 +
1754 + return NULL;
1755 +}
1756 +
1757 +/*
1758 + * Retrieve bus address from given handle.
1759 + *
1760 + * @return Address corresponding to given handle. Zero if handle
1761 + * is invalid.
1762 + */
1763 +static unsigned long memrar_get_bus_address(
1764 + struct memrar_rar_info *rar,
1765 + u32 vaddr)
1766 +{
1767 + unsigned long const iobase = (unsigned long) (rar->iobase);
1768 +
1769 + if (!memrar_handle_in_range(rar, vaddr))
1770 + return 0;
1771 +
1772 + /*
1773 + * An assumption is made that the virtual address offset is
1774 + * the same as the bus address offset, at least based on the
1775 + * way this driver is implemented. For example, vaddr + 2 ==
1776 + * baddr + 2.
1777 + *
1778 + * @todo Is that a valid assumption?
1779 + */
1780 + return rar->base + (vaddr - iobase);
1781 +}
1782 +
1783 +/*
1784 + * Retrieve physical address from given handle.
1785 + *
1786 + * @return Address corresponding to given handle. Zero if handle
1787 + * is invalid.
1788 + */
1789 +static unsigned long memrar_get_physical_address(
1790 + struct memrar_rar_info *rar,
1791 + u32 vaddr)
1792 +{
1793 + /*
1794 + * @todo This assumes that the bus address and physical
1795 + * address are the same. That is true for Moorestown
1796 + * but not necessarily on other platforms. This
1797 + * deficiency should be addressed at some point.
1798 + */
1799 + return memrar_get_bus_address(rar, vaddr);
1800 +}
1801 +
1802 +/*
1803 + * Core block release code.
1804 + *
1805 + * @note This code removes the node from a list. Make sure any list
1806 + * iteration is performed using list_for_each_safe().
1807 + */
1808 +static void memrar_release_block_i(struct kref *ref)
1809 +{
1810 + /*
1811 + * Last reference is being released. Remove from the table,
1812 + * and reclaim resources.
1813 + */
1814 +
1815 + struct memrar_buffer_info * const node =
1816 + container_of(ref, struct memrar_buffer_info, refcount);
1817 +
1818 + struct RAR_block_info * const user_info =
1819 + &node->buffer.info;
1820 +
1821 + struct memrar_allocator * const allocator =
1822 + memrars[user_info->type].allocator;
1823 +
1824 + list_del(&node->list);
1825 +
1826 + memrar_allocator_free(allocator, user_info->handle);
1827 +
1828 + kfree(node);
1829 +}
1830 +
1831 +/*
1832 + * Initialize RAR parameters, such as bus addresses, etc.
1833 + */
1834 +static int memrar_init_rar_resources(char const *devname)
1835 +{
1836 + /* ---- Sanity Checks ----
1837 + * 1. RAR bus addresses in both Lincroft and Langwell RAR
1838 + * registers should be the same.
1839 + * 2. Secure device ID in Langwell RAR registers should be set
1840 + * appropriately, i.e. only LPE DMA for the audio RAR, and
1841 + * security for the other Langwell based RAR register. The
1842 + * video RAR is not accessed from the Langwell side,
1843 + * meaning its corresponding Langwell RAR should only be
1844 + * accessible by the security engine.
1845 + * 3. Audio and video RAR register and RAR access should be
1846 + * locked. If not, lock them. Except for debugging
1847 + * purposes, there is no reason for them to be unlocked.
1848 + *
1849 + * @todo Should the RAR handler driver even be aware of audio
1850 + * and video RAR settings?
1851 + */
1852 +
1853 + /*
1854 + * RAR buffer block size.
1855 + *
1856 + * We choose it to be the size of a page to simplify the
1857 + * /dev/memrar mmap() implementation and usage. Otherwise
1858 + * paging is not involved once an RAR is locked down.
1859 + */
1860 + static size_t const RAR_BLOCK_SIZE = PAGE_SIZE;
1861 +
1862 + int z;
1863 + int found_rar = 0;
1864 +
1865 + BUG_ON(MRST_NUM_RAR != ARRAY_SIZE(memrars));
1866 +
1867 + for (z = 0; z != MRST_NUM_RAR; ++z) {
1868 + u32 low, high;
1869 + struct memrar_rar_info * const rar = &memrars[z];
1870 +
1871 + BUG_ON(!memrar_is_valid_rar_type(z));
1872 +
1873 + mutex_init(&rar->lock);
1874 +
1875 + /*
1876 + * Initialize the process table before we reach any
1877 + * code that exit on failure since the finalization
1878 + * code requires an initialized list.
1879 + */
1880 + INIT_LIST_HEAD(&rar->buffers.list);
1881 +
1882 + if (rar_get_address(z, &low, &high) != 0) {
1883 + /* No RAR is available. */
1884 + break;
1885 + } else if (low == 0 || high == 0) {
1886 + /*
1887 + * We don't immediately break out of the loop
1888 + * since the next type of RAR may be enabled.
1889 + */
1890 + rar->base = 0;
1891 + rar->length = 0;
1892 + rar->iobase = NULL;
1893 + rar->allocator = NULL;
1894 + continue;
1895 + }
1896 +
1897 + /*
1898 + * @todo Verify that LNC and LNW RAR register contents
1899 + * addresses, security, etc are compatible and
1900 + * consistent).
1901 + */
1902 +
1903 + rar->length = high - low + 1;
1904 +
1905 + /* Claim RAR memory as our own. */
1906 + if (request_mem_region(low, rar->length, devname) == NULL) {
1907 + rar->length = 0;
1908 +
1909 + pr_err("%s: Unable to claim RAR[%d] memory.\n",
1910 + devname,
1911 + z);
1912 + pr_err("%s: RAR[%d] disabled.\n", devname, z);
1913 +
1914 + /*
1915 + * Rather than break out of the loop by
1916 + * returning -EBUSY, for example, we may be
1917 + * able to claim memory of the next RAR region
1918 + * as our own.
1919 + */
1920 + continue;
1921 + }
1922 +
1923 + rar->base = low;
1924 +
1925 + /*
1926 + * Now map it into the kernel address space.
1927 + *
1928 + * Note that the RAR memory may only be accessed by IA
1929 + * when debugging. Otherwise attempts to access the
1930 + * RAR memory when it is locked down will result in
1931 + * behavior similar to writing to /dev/null and
1932 + * reading from /dev/zero. This behavior is enforced
1933 + * by the hardware. Even if we don't access the
1934 + * memory, mapping it into the kernel provides us with
1935 + * a convenient RAR handle to physical address mapping.
1936 + */
1937 + rar->iobase = ioremap_nocache(rar->base, rar->length);
1938 + if (rar->iobase == NULL) {
1939 + pr_err("%s: Unable to map RAR memory.\n",
1940 + devname);
1941 + return -ENOMEM;
1942 + }
1943 +
1944 + /* Initialize corresponding memory allocator. */
1945 + rar->allocator = memrar_create_allocator(
1946 + (unsigned long) rar->iobase,
1947 + rar->length,
1948 + RAR_BLOCK_SIZE);
1949 + if (rar->allocator == NULL)
1950 + return -1;
1951 +
1952 + /*
1953 + * -------------------------------------------------
1954 + * Make sure all RARs handled by us are locked down.
1955 + * -------------------------------------------------
1956 + */
1957 +
1958 + /* Enable RAR protection on the Lincroft side. */
1959 + if (0) {
1960 + /* @todo Enable once LNW A2 is widely available. */
1961 + rar_lock(z);
1962 + } else {
1963 + pr_warning("%s: LNC RAR[%d] no lock sanity check.\n",
1964 + devname,
1965 + z);
1966 + }
1967 +
1968 + /* ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ */
1969 + /* |||||||||||||||||||||||||||||||||||||||||||||||||| */
1970 +
1971 + /*
1972 + * Enable RAR protection on the Langwell side.
1973 + *
1974 + * Ideally Langwell side RAR protection should already
1975 + * have been enabled by the OEM in the SMIP header but
1976 + * we perform a sanity check, just in case.
1977 + *
1978 + * @todo Set appropriate "lock"/"valid" bits in LNW
1979 + * {LOW,UP}RAR[12] SCCB registers **and** LNW
1980 + * {LOW,UP}RAR[01] cDMI registers only if a
1981 + * suitable SDID (i.e. for security or LPE DMA)
1982 + * is set.
1983 + */
1984 + pr_warning("%s: LNW RAR[%d] no lock sanity check.\n",
1985 + devname,
1986 + z);
1987 +
1988 +
1989 + pr_info("%s: BRAR[%d]\n"
1990 + "\tlow address: 0x%x\n"
1991 + "\thigh address: 0x%x\n"
1992 + "\tsize : %u KiB\n",
1993 + devname,
1994 + z,
1995 + low,
1996 + high,
1997 + rar->allocator->capacity / 1024);
1998 +
1999 + found_rar = 1;
2000 + }
2001 +
2002 + if (!found_rar) {
2003 + /*
2004 + * No RAR support. Don't bother continuing.
2005 + *
2006 + * Note that this is not a failure.
2007 + */
2008 + pr_info("%s: No Moorestown RAR support available.\n",
2009 + devname);
2010 + return -ENODEV;
2011 + }
2012 +
2013 + return 0;
2014 +}
2015 +
2016 +/*
2017 + * Finalize RAR resources.
2018 + */
2019 +static void memrar_fini_rar_resources(void)
2020 +{
2021 + int z;
2022 + struct memrar_buffer_info *pos;
2023 + struct memrar_buffer_info *tmp;
2024 +
2025 + /*
2026 + * @todo Do we need to hold a lock at this point in time?
2027 + * (module initialization failure or exit?)
2028 + */
2029 +
2030 + for (z = MRST_NUM_RAR; z-- != 0; ) {
2031 + struct memrar_rar_info * const rar = &memrars[z];
2032 +
2033 + /* Clean up remaining resources. */
2034 +
2035 + list_for_each_entry_safe(pos,
2036 + tmp,
2037 + &rar->buffers.list,
2038 + list) {
2039 + kref_put(&pos->refcount, memrar_release_block_i);
2040 + }
2041 +
2042 + memrar_destroy_allocator(rar->allocator);
2043 + rar->allocator = NULL;
2044 +
2045 + iounmap(rar->iobase);
2046 + rar->iobase = NULL;
2047 +
2048 + release_mem_region(rar->base, rar->length);
2049 + rar->base = 0;
2050 +
2051 + rar->length = 0;
2052 + }
2053 +}
2054 +
2055 +static long memrar_reserve_block(struct RAR_buffer *request,
2056 + struct file *filp)
2057 +{
2058 + struct RAR_block_info * const rinfo = &request->info;
2059 + struct RAR_buffer *buffer;
2060 + struct memrar_buffer_info *buffer_info;
2061 + u32 handle;
2062 + struct memrar_rar_info *rar = NULL;
2063 +
2064 + /* Prevent array overflow. */
2065 + if (!memrar_is_valid_rar_type(rinfo->type))
2066 + return -EINVAL;
2067 +
2068 + rar = &memrars[rinfo->type];
2069 +
2070 + /* Reserve memory in RAR. */
2071 + handle = memrar_allocator_alloc(rar->allocator, rinfo->size);
2072 + if (handle == 0)
2073 + return -ENOMEM;
2074 +
2075 + buffer_info = kmalloc(sizeof(*buffer_info), GFP_KERNEL);
2076 +
2077 + if (buffer_info == NULL) {
2078 + memrar_allocator_free(rar->allocator, handle);
2079 + return -ENOMEM;
2080 + }
2081 +
2082 + buffer = &buffer_info->buffer;
2083 + buffer->info.type = rinfo->type;
2084 + buffer->info.size = rinfo->size;
2085 +
2086 + /* Memory handle corresponding to the bus address. */
2087 + buffer->info.handle = handle;
2088 + buffer->bus_address = memrar_get_bus_address(rar, handle);
2089 +
2090 + /*
2091 + * Keep track of owner so that we can later cleanup if
2092 + * necessary.
2093 + */
2094 + buffer_info->owner = filp;
2095 +
2096 + kref_init(&buffer_info->refcount);
2097 +
2098 + mutex_lock(&rar->lock);
2099 + list_add(&buffer_info->list, &rar->buffers.list);
2100 + mutex_unlock(&rar->lock);
2101 +
2102 + rinfo->handle = buffer->info.handle;
2103 + request->bus_address = buffer->bus_address;
2104 +
2105 + return 0;
2106 +}
2107 +
2108 +static long memrar_release_block(u32 addr)
2109 +{
2110 + struct memrar_buffer_info *pos;
2111 + struct memrar_buffer_info *tmp;
2112 + struct memrar_rar_info * const rar = memrar_get_rar_info(addr);
2113 + long result = -EINVAL;
2114 +
2115 + if (rar == NULL)
2116 + return -EFAULT;
2117 +
2118 + mutex_lock(&rar->lock);
2119 +
2120 + /*
2121 + * Iterate through the buffer list to find the corresponding
2122 + * buffer to be released.
2123 + */
2124 + list_for_each_entry_safe(pos,
2125 + tmp,
2126 + &rar->buffers.list,
2127 + list) {
2128 + if (addr == pos->buffer.info.handle
2129 + && memrar_is_valid_rar_type(pos->buffer.info.type)) {
2130 + kref_put(&pos->refcount, memrar_release_block_i);
2131 + result = 0;
2132 + break;
2133 + }
2134 + }
2135 +
2136 + mutex_unlock(&rar->lock);
2137 +
2138 + return result;
2139 +}
2140 +
2141 +static long memrar_get_stat(struct RAR_stat *r)
2142 +{
2143 + long result = -EINVAL;
2144 +
2145 + if (likely(r != NULL) && memrar_is_valid_rar_type(r->type)) {
2146 + struct memrar_allocator * const allocator =
2147 + memrars[r->type].allocator;
2148 +
2149 + BUG_ON(allocator == NULL);
2150 +
2151 + /*
2152 + * Allocator capacity doesn't change over time. No
2153 + * need to synchronize.
2154 + */
2155 + r->capacity = allocator->capacity;
2156 +
2157 + mutex_lock(&allocator->lock);
2158 +
2159 + r->largest_block_size = allocator->largest_free_area;
2160 +
2161 + mutex_unlock(&allocator->lock);
2162 +
2163 + result = 0;
2164 + }
2165 +
2166 + return result;
2167 +}
2168 +
2169 +static long memrar_ioctl(struct file *filp,
2170 + unsigned int cmd,
2171 + unsigned long arg)
2172 +{
2173 + void __user *argp = (void __user *)arg;
2174 + long result = 0;
2175 +
2176 + struct RAR_buffer buffer;
2177 + struct RAR_block_info * const request = &buffer.info;
2178 + struct RAR_stat rar_info;
2179 + u32 rar_handle;
2180 +
2181 + switch (cmd) {
2182 + case RAR_HANDLER_RESERVE:
2183 + if (copy_from_user(request,
2184 + argp,
2185 + sizeof(*request)))
2186 + return -EFAULT;
2187 +
2188 + result = memrar_reserve_block(&buffer, filp);
2189 + if (result != 0)
2190 + return result;
2191 +
2192 + return copy_to_user(argp, request, sizeof(*request));
2193 +
2194 + case RAR_HANDLER_RELEASE:
2195 + if (copy_from_user(&rar_handle,
2196 + argp,
2197 + sizeof(rar_handle)))
2198 + return -EFAULT;
2199 +
2200 + return memrar_release_block(rar_handle);
2201 +
2202 + case RAR_HANDLER_STAT:
2203 + if (copy_from_user(&rar_info,
2204 + argp,
2205 + sizeof(rar_info)))
2206 + return -EFAULT;
2207 +
2208 + /*
2209 + * Populate the RAR_stat structure based on the RAR
2210 + * type given by the user
2211 + */
2212 + if (memrar_get_stat(&rar_info) != 0)
2213 + return -EINVAL;
2214 +
2215 + /*
2216 + * @todo Do we need to verify destination pointer
2217 + * "argp" is non-zero? Is that already done by
2218 + * copy_to_user()?
2219 + */
2220 + return copy_to_user(argp,
2221 + &rar_info,
2222 + sizeof(rar_info)) ? -EFAULT : 0;
2223 +
2224 + default:
2225 + return -ENOTTY;
2226 + }
2227 +
2228 + return 0;
2229 +}
2230 +
2231 +static int memrar_mmap(struct file *filp, struct vm_area_struct *vma)
2232 +{
2233 + size_t const size = vma->vm_end - vma->vm_start;
2234 +
2235 + /* Users pass the RAR handle as the mmap() offset parameter. */
2236 + unsigned long const handle = vma->vm_pgoff << PAGE_SHIFT;
2237 +
2238 + struct memrar_rar_info * const rar = memrar_get_rar_info(handle);
2239 +
2240 + unsigned long pfn;
2241 +
2242 + /* Invalid RAR handle or size passed to mmap(). */
2243 + if (rar == NULL
2244 + || handle == 0
2245 + || size > (handle - (unsigned long) rar->iobase))
2246 + return -EINVAL;
2247 +
2248 + /*
2249 + * Retrieve physical address corresponding to the RAR handle,
2250 + * and convert it to a page frame.
2251 + */
2252 + pfn = memrar_get_physical_address(rar, handle) >> PAGE_SHIFT;
2253 +
2254 +
2255 + pr_debug("memrar: mapping RAR range [0x%lx, 0x%lx) into user space.\n",
2256 + handle,
2257 + handle + size);
2258 +
2259 + /*
2260 + * Map RAR memory into user space. This is really only useful
2261 + * for debugging purposes since the memory won't be
2262 + * accesssible, i.e. reads return zero and writes are ignired,
2263 + * when it is locked down.
2264 + */
2265 + if (remap_pfn_range(vma,
2266 + vma->vm_start,
2267 + pfn,
2268 + size,
2269 + vma->vm_page_prot))
2270 + return -EAGAIN;
2271 +
2272 + /* vma->vm_ops = &memrar_mem_ops; */
2273 +
2274 + return 0;
2275 +}
2276 +
2277 +static int memrar_open(struct inode *inode, struct file *filp)
2278 +{
2279 + /* Nothing to do yet. */
2280 +
2281 + return 0;
2282 +}
2283 +
2284 +static int memrar_release(struct inode *inode, struct file *filp)
2285 +{
2286 + /* Free all regions associated with the given file handle. */
2287 +
2288 + struct memrar_buffer_info *pos;
2289 + struct memrar_buffer_info *tmp;
2290 + int z;
2291 +
2292 + for (z = 0; z != MRST_NUM_RAR; ++z) {
2293 + struct memrar_rar_info * const rar = &memrars[z];
2294 +
2295 + mutex_lock(&rar->lock);
2296 +
2297 + list_for_each_entry_safe(pos,
2298 + tmp,
2299 + &rar->buffers.list,
2300 + list) {
2301 + if (filp == pos->owner)
2302 + kref_put(&pos->refcount,
2303 + memrar_release_block_i);
2304 + }
2305 +
2306 + mutex_unlock(&rar->lock);
2307 + }
2308 +
2309 + return 0;
2310 +}
2311 +
2312 +/*
2313 + * @note This function is part of the kernel space memrar driver API.
2314 + */
2315 +size_t rar_reserve(struct RAR_buffer *buffers, size_t count)
2316 +{
2317 + struct RAR_buffer * const end =
2318 + (buffers == NULL ? buffers : buffers + count);
2319 + struct RAR_buffer *i;
2320 +
2321 + size_t reserve_count = 0;
2322 +
2323 + for (i = buffers; i != end; ++i) {
2324 + if (memrar_reserve_block(i, NULL) == 0)
2325 + ++reserve_count;
2326 + else
2327 + i->bus_address = 0;
2328 + }
2329 +
2330 + return reserve_count;
2331 +}
2332 +EXPORT_SYMBOL(rar_reserve);
2333 +
2334 +/*
2335 + * @note This function is part of the kernel space memrar driver API.
2336 + */
2337 +size_t rar_release(struct RAR_buffer *buffers, size_t count)
2338 +{
2339 + struct RAR_buffer * const end =
2340 + (buffers == NULL ? buffers : buffers + count);
2341 + struct RAR_buffer *i;
2342 +
2343 + size_t release_count = 0;
2344 +
2345 + for (i = buffers; i != end; ++i) {
2346 + u32 * const handle = &i->info.handle;
2347 + if (memrar_release_block(*handle) == 0) {
2348 + /*
2349 + * @todo We assume we should do this each time
2350 + * the ref count is decremented. Should
2351 + * we instead only do this when the ref
2352 + * count has dropped to zero, and the
2353 + * buffer has been completely
2354 + * released/unmapped?
2355 + */
2356 + *handle = 0;
2357 + ++release_count;
2358 + }
2359 + }
2360 +
2361 + return release_count;
2362 +}
2363 +EXPORT_SYMBOL(rar_release);
2364 +
2365 +/*
2366 + * @note This function is part of the kernel space driver API.
2367 + */
2368 +size_t rar_handle_to_bus(struct RAR_buffer *buffers, size_t count)
2369 +{
2370 + struct RAR_buffer * const end =
2371 + (buffers == NULL ? buffers : buffers + count);
2372 + struct RAR_buffer *i;
2373 + struct memrar_buffer_info *pos;
2374 +
2375 + size_t conversion_count = 0;
2376 +
2377 + /*
2378 + * Find all bus addresses corresponding to the given handles.
2379 + *
2380 + * @todo Not liking this nested loop. Optimize.
2381 + */
2382 + for (i = buffers; i != end; ++i) {
2383 + struct memrar_rar_info * const rar =
2384 + memrar_get_rar_info(i->info.handle);
2385 +
2386 + /*
2387 + * Check if we have a bogus handle, and then continue
2388 + * with remaining buffers.
2389 + */
2390 + if (rar == NULL) {
2391 + i->bus_address = 0;
2392 + continue;
2393 + }
2394 +
2395 + mutex_lock(&rar->lock);
2396 +
2397 + list_for_each_entry(pos, &rar->buffers.list, list) {
2398 + struct RAR_block_info * const user_info =
2399 + &pos->buffer.info;
2400 +
2401 + if (i->info.handle >= user_info->handle
2402 + && i->info.handle < (user_info->handle
2403 + + user_info->size)) {
2404 + u32 const offset =
2405 + i->info.handle - user_info->handle;
2406 +
2407 + i->info.type = user_info->type;
2408 + i->info.size = user_info->size - offset;
2409 + i->bus_address =
2410 + pos->buffer.bus_address
2411 + + offset;
2412 +
2413 + /* Increment the reference count. */
2414 + kref_get(&pos->refcount);
2415 +
2416 + ++conversion_count;
2417 + break;
2418 + } else {
2419 + i->bus_address = 0;
2420 + }
2421 + }
2422 +
2423 + mutex_unlock(&rar->lock);
2424 + }
2425 +
2426 + return conversion_count;
2427 +}
2428 +EXPORT_SYMBOL(rar_handle_to_bus);
2429 +
2430 +static const struct file_operations memrar_fops = {
2431 + .owner = THIS_MODULE,
2432 + .unlocked_ioctl = memrar_ioctl,
2433 + .mmap = memrar_mmap,
2434 + .open = memrar_open,
2435 + .release = memrar_release,
2436 +};
2437 +
2438 +static struct miscdevice memrar_miscdev = {
2439 + .minor = MISC_DYNAMIC_MINOR, /* dynamic allocation */
2440 + .name = "memrar", /* /dev/memrar */
2441 + .fops = &memrar_fops
2442 +};
2443 +
2444 +static char const banner[] __initdata =
2445 + KERN_INFO
2446 + "Intel RAR Handler: " MEMRAR_VER " initialized.\n";
2447 +
2448 +static int __init memrar_init(void)
2449 +{
2450 + int result = 0;
2451 +
2452 + printk(banner);
2453 +
2454 + /*
2455 + * We initialize the RAR parameters early on so that we can
2456 + * discontinue memrar device initialization and registration
2457 + * if suitably configured RARs are not available.
2458 + */
2459 + result = memrar_init_rar_resources(memrar_miscdev.name);
2460 +
2461 + if (result != 0)
2462 + return result;
2463 +
2464 + result = misc_register(&memrar_miscdev);
2465 +
2466 + if (result != 0) {
2467 + pr_err("%s: misc_register() failed.\n",
2468 + memrar_miscdev.name);
2469 +
2470 + /* Clean up resources previously reserved. */
2471 + memrar_fini_rar_resources();
2472 + }
2473 +
2474 + return result;
2475 +}
2476 +
2477 +static void __exit memrar_exit(void)
2478 +{
2479 + memrar_fini_rar_resources();
2480 +
2481 + misc_deregister(&memrar_miscdev);
2482 +}
2483 +
2484 +#ifndef MODULE
2485 +/*
2486 + * The RAR handler must be initialized after the RAR register driver.
2487 + * Otherwise the RAR handler will always assume no RAR support
2488 + * exists.
2489 + */
2490 +late_initcall_sync(memrar_init);
2491 +#else
2492 +module_init(memrar_init);
2493 +#endif /* MODULE */
2494 +
2495 +module_exit(memrar_exit);
2496 +
2497 +
2498 +MODULE_AUTHOR("Ossama Othman <ossama.othman@intel.com>");
2499 +MODULE_DESCRIPTION("Intel Restricted Access Region Handler");
2500 +MODULE_LICENSE("GPL");
2501 +MODULE_ALIAS_MISCDEV(MISC_DYNAMIC_MINOR);
2502 +MODULE_VERSION(MEMRAR_VER);
2503 +
2504 +
2505 +
2506 +/*
2507 + Local Variables:
2508 + c-file-style: "linux"
2509 + End:
2510 +*/
2511 Index: linux-2.6.33/drivers/staging/rar/Kconfig
2512 ===================================================================
2513 --- linux-2.6.33.orig/drivers/staging/rar/Kconfig
2514 +++ linux-2.6.33/drivers/staging/rar/Kconfig
2515 @@ -6,7 +6,7 @@ menu "RAR Register Driver"
2516 #
2517 # Restricted Access Register Manager
2518 #
2519 -config RAR_REGISTER
2520 +config RAR_DRIVER
2521 tristate "Restricted Access Region Register Driver"
2522 default n
2523 ---help---
2524 Index: linux-2.6.33/drivers/staging/rar/Makefile
2525 ===================================================================
2526 --- linux-2.6.33.orig/drivers/staging/rar/Makefile
2527 +++ linux-2.6.33/drivers/staging/rar/Makefile
2528 @@ -1,2 +1,2 @@
2529 EXTRA_CFLAGS += -DLITTLE__ENDIAN
2530 -obj-$(CONFIG_RAR_REGISTER) += rar_driver.o
2531 +obj-$(CONFIG_RAR_DRIVER) += rar_driver.o