]> git.ipfire.org Git - people/ms/linux.git/blob - drivers/pci/pci-sysfs.c
PCI: altera: Check TLP completion status
[people/ms/linux.git] / drivers / pci / pci-sysfs.c
1 /*
2 * drivers/pci/pci-sysfs.c
3 *
4 * (C) Copyright 2002-2004 Greg Kroah-Hartman <greg@kroah.com>
5 * (C) Copyright 2002-2004 IBM Corp.
6 * (C) Copyright 2003 Matthew Wilcox
7 * (C) Copyright 2003 Hewlett-Packard
8 * (C) Copyright 2004 Jon Smirl <jonsmirl@yahoo.com>
9 * (C) Copyright 2004 Silicon Graphics, Inc. Jesse Barnes <jbarnes@sgi.com>
10 *
11 * File attributes for PCI devices
12 *
13 * Modeled after usb's driverfs.c
14 *
15 */
16
17
18 #include <linux/kernel.h>
19 #include <linux/sched.h>
20 #include <linux/pci.h>
21 #include <linux/stat.h>
22 #include <linux/export.h>
23 #include <linux/topology.h>
24 #include <linux/mm.h>
25 #include <linux/fs.h>
26 #include <linux/capability.h>
27 #include <linux/security.h>
28 #include <linux/pci-aspm.h>
29 #include <linux/slab.h>
30 #include <linux/vgaarb.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/of.h>
33 #include "pci.h"
34
35 static int sysfs_initialized; /* = 0 */
36
37 /* show configuration fields */
38 #define pci_config_attr(field, format_string) \
39 static ssize_t \
40 field##_show(struct device *dev, struct device_attribute *attr, char *buf) \
41 { \
42 struct pci_dev *pdev; \
43 \
44 pdev = to_pci_dev(dev); \
45 return sprintf(buf, format_string, pdev->field); \
46 } \
47 static DEVICE_ATTR_RO(field)
48
49 pci_config_attr(vendor, "0x%04x\n");
50 pci_config_attr(device, "0x%04x\n");
51 pci_config_attr(subsystem_vendor, "0x%04x\n");
52 pci_config_attr(subsystem_device, "0x%04x\n");
53 pci_config_attr(class, "0x%06x\n");
54 pci_config_attr(irq, "%u\n");
55
56 static ssize_t broken_parity_status_show(struct device *dev,
57 struct device_attribute *attr,
58 char *buf)
59 {
60 struct pci_dev *pdev = to_pci_dev(dev);
61 return sprintf(buf, "%u\n", pdev->broken_parity_status);
62 }
63
64 static ssize_t broken_parity_status_store(struct device *dev,
65 struct device_attribute *attr,
66 const char *buf, size_t count)
67 {
68 struct pci_dev *pdev = to_pci_dev(dev);
69 unsigned long val;
70
71 if (kstrtoul(buf, 0, &val) < 0)
72 return -EINVAL;
73
74 pdev->broken_parity_status = !!val;
75
76 return count;
77 }
78 static DEVICE_ATTR_RW(broken_parity_status);
79
80 static ssize_t pci_dev_show_local_cpu(struct device *dev, bool list,
81 struct device_attribute *attr, char *buf)
82 {
83 const struct cpumask *mask;
84
85 #ifdef CONFIG_NUMA
86 mask = (dev_to_node(dev) == -1) ? cpu_online_mask :
87 cpumask_of_node(dev_to_node(dev));
88 #else
89 mask = cpumask_of_pcibus(to_pci_dev(dev)->bus);
90 #endif
91 return cpumap_print_to_pagebuf(list, buf, mask);
92 }
93
94 static ssize_t local_cpus_show(struct device *dev,
95 struct device_attribute *attr, char *buf)
96 {
97 return pci_dev_show_local_cpu(dev, false, attr, buf);
98 }
99 static DEVICE_ATTR_RO(local_cpus);
100
101 static ssize_t local_cpulist_show(struct device *dev,
102 struct device_attribute *attr, char *buf)
103 {
104 return pci_dev_show_local_cpu(dev, true, attr, buf);
105 }
106 static DEVICE_ATTR_RO(local_cpulist);
107
108 /*
109 * PCI Bus Class Devices
110 */
111 static ssize_t cpuaffinity_show(struct device *dev,
112 struct device_attribute *attr, char *buf)
113 {
114 const struct cpumask *cpumask = cpumask_of_pcibus(to_pci_bus(dev));
115
116 return cpumap_print_to_pagebuf(false, buf, cpumask);
117 }
118 static DEVICE_ATTR_RO(cpuaffinity);
119
120 static ssize_t cpulistaffinity_show(struct device *dev,
121 struct device_attribute *attr, char *buf)
122 {
123 const struct cpumask *cpumask = cpumask_of_pcibus(to_pci_bus(dev));
124
125 return cpumap_print_to_pagebuf(true, buf, cpumask);
126 }
127 static DEVICE_ATTR_RO(cpulistaffinity);
128
129 /* show resources */
130 static ssize_t resource_show(struct device *dev, struct device_attribute *attr,
131 char *buf)
132 {
133 struct pci_dev *pci_dev = to_pci_dev(dev);
134 char *str = buf;
135 int i;
136 int max;
137 resource_size_t start, end;
138
139 if (pci_dev->subordinate)
140 max = DEVICE_COUNT_RESOURCE;
141 else
142 max = PCI_BRIDGE_RESOURCES;
143
144 for (i = 0; i < max; i++) {
145 struct resource *res = &pci_dev->resource[i];
146 pci_resource_to_user(pci_dev, i, res, &start, &end);
147 str += sprintf(str, "0x%016llx 0x%016llx 0x%016llx\n",
148 (unsigned long long)start,
149 (unsigned long long)end,
150 (unsigned long long)res->flags);
151 }
152 return (str - buf);
153 }
154 static DEVICE_ATTR_RO(resource);
155
156 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
157 char *buf)
158 {
159 struct pci_dev *pci_dev = to_pci_dev(dev);
160
161 return sprintf(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X\n",
162 pci_dev->vendor, pci_dev->device,
163 pci_dev->subsystem_vendor, pci_dev->subsystem_device,
164 (u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8),
165 (u8)(pci_dev->class));
166 }
167 static DEVICE_ATTR_RO(modalias);
168
169 static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
170 const char *buf, size_t count)
171 {
172 struct pci_dev *pdev = to_pci_dev(dev);
173 unsigned long val;
174 ssize_t result = kstrtoul(buf, 0, &val);
175
176 if (result < 0)
177 return result;
178
179 /* this can crash the machine when done on the "wrong" device */
180 if (!capable(CAP_SYS_ADMIN))
181 return -EPERM;
182
183 if (!val) {
184 if (pci_is_enabled(pdev))
185 pci_disable_device(pdev);
186 else
187 result = -EIO;
188 } else
189 result = pci_enable_device(pdev);
190
191 return result < 0 ? result : count;
192 }
193
194 static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
195 char *buf)
196 {
197 struct pci_dev *pdev;
198
199 pdev = to_pci_dev(dev);
200 return sprintf(buf, "%u\n", atomic_read(&pdev->enable_cnt));
201 }
202 static DEVICE_ATTR_RW(enable);
203
204 #ifdef CONFIG_NUMA
205 static ssize_t numa_node_store(struct device *dev,
206 struct device_attribute *attr, const char *buf,
207 size_t count)
208 {
209 struct pci_dev *pdev = to_pci_dev(dev);
210 int node, ret;
211
212 if (!capable(CAP_SYS_ADMIN))
213 return -EPERM;
214
215 ret = kstrtoint(buf, 0, &node);
216 if (ret)
217 return ret;
218
219 if ((node < 0 && node != NUMA_NO_NODE) || node >= MAX_NUMNODES)
220 return -EINVAL;
221
222 if (node != NUMA_NO_NODE && !node_online(node))
223 return -EINVAL;
224
225 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
226 dev_alert(&pdev->dev, FW_BUG "Overriding NUMA node to %d. Contact your vendor for updates.",
227 node);
228
229 dev->numa_node = node;
230 return count;
231 }
232
233 static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr,
234 char *buf)
235 {
236 return sprintf(buf, "%d\n", dev->numa_node);
237 }
238 static DEVICE_ATTR_RW(numa_node);
239 #endif
240
241 static ssize_t dma_mask_bits_show(struct device *dev,
242 struct device_attribute *attr, char *buf)
243 {
244 struct pci_dev *pdev = to_pci_dev(dev);
245
246 return sprintf(buf, "%d\n", fls64(pdev->dma_mask));
247 }
248 static DEVICE_ATTR_RO(dma_mask_bits);
249
250 static ssize_t consistent_dma_mask_bits_show(struct device *dev,
251 struct device_attribute *attr,
252 char *buf)
253 {
254 return sprintf(buf, "%d\n", fls64(dev->coherent_dma_mask));
255 }
256 static DEVICE_ATTR_RO(consistent_dma_mask_bits);
257
258 static ssize_t msi_bus_show(struct device *dev, struct device_attribute *attr,
259 char *buf)
260 {
261 struct pci_dev *pdev = to_pci_dev(dev);
262 struct pci_bus *subordinate = pdev->subordinate;
263
264 return sprintf(buf, "%u\n", subordinate ?
265 !(subordinate->bus_flags & PCI_BUS_FLAGS_NO_MSI)
266 : !pdev->no_msi);
267 }
268
269 static ssize_t msi_bus_store(struct device *dev, struct device_attribute *attr,
270 const char *buf, size_t count)
271 {
272 struct pci_dev *pdev = to_pci_dev(dev);
273 struct pci_bus *subordinate = pdev->subordinate;
274 unsigned long val;
275
276 if (kstrtoul(buf, 0, &val) < 0)
277 return -EINVAL;
278
279 if (!capable(CAP_SYS_ADMIN))
280 return -EPERM;
281
282 /*
283 * "no_msi" and "bus_flags" only affect what happens when a driver
284 * requests MSI or MSI-X. They don't affect any drivers that have
285 * already requested MSI or MSI-X.
286 */
287 if (!subordinate) {
288 pdev->no_msi = !val;
289 dev_info(&pdev->dev, "MSI/MSI-X %s for future drivers\n",
290 val ? "allowed" : "disallowed");
291 return count;
292 }
293
294 if (val)
295 subordinate->bus_flags &= ~PCI_BUS_FLAGS_NO_MSI;
296 else
297 subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
298
299 dev_info(&subordinate->dev, "MSI/MSI-X %s for future drivers of devices on this bus\n",
300 val ? "allowed" : "disallowed");
301 return count;
302 }
303 static DEVICE_ATTR_RW(msi_bus);
304
305 static ssize_t bus_rescan_store(struct bus_type *bus, const char *buf,
306 size_t count)
307 {
308 unsigned long val;
309 struct pci_bus *b = NULL;
310
311 if (kstrtoul(buf, 0, &val) < 0)
312 return -EINVAL;
313
314 if (val) {
315 pci_lock_rescan_remove();
316 while ((b = pci_find_next_bus(b)) != NULL)
317 pci_rescan_bus(b);
318 pci_unlock_rescan_remove();
319 }
320 return count;
321 }
322 static BUS_ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, bus_rescan_store);
323
324 static struct attribute *pci_bus_attrs[] = {
325 &bus_attr_rescan.attr,
326 NULL,
327 };
328
329 static const struct attribute_group pci_bus_group = {
330 .attrs = pci_bus_attrs,
331 };
332
333 const struct attribute_group *pci_bus_groups[] = {
334 &pci_bus_group,
335 NULL,
336 };
337
338 static ssize_t dev_rescan_store(struct device *dev,
339 struct device_attribute *attr, const char *buf,
340 size_t count)
341 {
342 unsigned long val;
343 struct pci_dev *pdev = to_pci_dev(dev);
344
345 if (kstrtoul(buf, 0, &val) < 0)
346 return -EINVAL;
347
348 if (val) {
349 pci_lock_rescan_remove();
350 pci_rescan_bus(pdev->bus);
351 pci_unlock_rescan_remove();
352 }
353 return count;
354 }
355 static struct device_attribute dev_rescan_attr = __ATTR(rescan,
356 (S_IWUSR|S_IWGRP),
357 NULL, dev_rescan_store);
358
359 static ssize_t remove_store(struct device *dev, struct device_attribute *attr,
360 const char *buf, size_t count)
361 {
362 unsigned long val;
363
364 if (kstrtoul(buf, 0, &val) < 0)
365 return -EINVAL;
366
367 if (val && device_remove_file_self(dev, attr))
368 pci_stop_and_remove_bus_device_locked(to_pci_dev(dev));
369 return count;
370 }
371 static struct device_attribute dev_remove_attr = __ATTR(remove,
372 (S_IWUSR|S_IWGRP),
373 NULL, remove_store);
374
375 static ssize_t dev_bus_rescan_store(struct device *dev,
376 struct device_attribute *attr,
377 const char *buf, size_t count)
378 {
379 unsigned long val;
380 struct pci_bus *bus = to_pci_bus(dev);
381
382 if (kstrtoul(buf, 0, &val) < 0)
383 return -EINVAL;
384
385 if (val) {
386 pci_lock_rescan_remove();
387 if (!pci_is_root_bus(bus) && list_empty(&bus->devices))
388 pci_rescan_bus_bridge_resize(bus->self);
389 else
390 pci_rescan_bus(bus);
391 pci_unlock_rescan_remove();
392 }
393 return count;
394 }
395 static DEVICE_ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, dev_bus_rescan_store);
396
397 #if defined(CONFIG_PM) && defined(CONFIG_ACPI)
398 static ssize_t d3cold_allowed_store(struct device *dev,
399 struct device_attribute *attr,
400 const char *buf, size_t count)
401 {
402 struct pci_dev *pdev = to_pci_dev(dev);
403 unsigned long val;
404
405 if (kstrtoul(buf, 0, &val) < 0)
406 return -EINVAL;
407
408 pdev->d3cold_allowed = !!val;
409 pm_runtime_resume(dev);
410
411 return count;
412 }
413
414 static ssize_t d3cold_allowed_show(struct device *dev,
415 struct device_attribute *attr, char *buf)
416 {
417 struct pci_dev *pdev = to_pci_dev(dev);
418 return sprintf(buf, "%u\n", pdev->d3cold_allowed);
419 }
420 static DEVICE_ATTR_RW(d3cold_allowed);
421 #endif
422
423 #ifdef CONFIG_OF
424 static ssize_t devspec_show(struct device *dev,
425 struct device_attribute *attr, char *buf)
426 {
427 struct pci_dev *pdev = to_pci_dev(dev);
428 struct device_node *np = pci_device_to_OF_node(pdev);
429
430 if (np == NULL || np->full_name == NULL)
431 return 0;
432 return sprintf(buf, "%s", np->full_name);
433 }
434 static DEVICE_ATTR_RO(devspec);
435 #endif
436
437 #ifdef CONFIG_PCI_IOV
438 static ssize_t sriov_totalvfs_show(struct device *dev,
439 struct device_attribute *attr,
440 char *buf)
441 {
442 struct pci_dev *pdev = to_pci_dev(dev);
443
444 return sprintf(buf, "%u\n", pci_sriov_get_totalvfs(pdev));
445 }
446
447
448 static ssize_t sriov_numvfs_show(struct device *dev,
449 struct device_attribute *attr,
450 char *buf)
451 {
452 struct pci_dev *pdev = to_pci_dev(dev);
453
454 return sprintf(buf, "%u\n", pdev->sriov->num_VFs);
455 }
456
457 /*
458 * num_vfs > 0; number of VFs to enable
459 * num_vfs = 0; disable all VFs
460 *
461 * Note: SRIOV spec doesn't allow partial VF
462 * disable, so it's all or none.
463 */
464 static ssize_t sriov_numvfs_store(struct device *dev,
465 struct device_attribute *attr,
466 const char *buf, size_t count)
467 {
468 struct pci_dev *pdev = to_pci_dev(dev);
469 int ret;
470 u16 num_vfs;
471
472 ret = kstrtou16(buf, 0, &num_vfs);
473 if (ret < 0)
474 return ret;
475
476 if (num_vfs > pci_sriov_get_totalvfs(pdev))
477 return -ERANGE;
478
479 if (num_vfs == pdev->sriov->num_VFs)
480 return count; /* no change */
481
482 /* is PF driver loaded w/callback */
483 if (!pdev->driver || !pdev->driver->sriov_configure) {
484 dev_info(&pdev->dev, "Driver doesn't support SRIOV configuration via sysfs\n");
485 return -ENOSYS;
486 }
487
488 if (num_vfs == 0) {
489 /* disable VFs */
490 ret = pdev->driver->sriov_configure(pdev, 0);
491 if (ret < 0)
492 return ret;
493 return count;
494 }
495
496 /* enable VFs */
497 if (pdev->sriov->num_VFs) {
498 dev_warn(&pdev->dev, "%d VFs already enabled. Disable before enabling %d VFs\n",
499 pdev->sriov->num_VFs, num_vfs);
500 return -EBUSY;
501 }
502
503 ret = pdev->driver->sriov_configure(pdev, num_vfs);
504 if (ret < 0)
505 return ret;
506
507 if (ret != num_vfs)
508 dev_warn(&pdev->dev, "%d VFs requested; only %d enabled\n",
509 num_vfs, ret);
510
511 return count;
512 }
513
514 static struct device_attribute sriov_totalvfs_attr = __ATTR_RO(sriov_totalvfs);
515 static struct device_attribute sriov_numvfs_attr =
516 __ATTR(sriov_numvfs, (S_IRUGO|S_IWUSR|S_IWGRP),
517 sriov_numvfs_show, sriov_numvfs_store);
518 #endif /* CONFIG_PCI_IOV */
519
520 static ssize_t driver_override_store(struct device *dev,
521 struct device_attribute *attr,
522 const char *buf, size_t count)
523 {
524 struct pci_dev *pdev = to_pci_dev(dev);
525 char *driver_override, *old = pdev->driver_override, *cp;
526
527 /* We need to keep extra room for a newline */
528 if (count >= (PAGE_SIZE - 1))
529 return -EINVAL;
530
531 driver_override = kstrndup(buf, count, GFP_KERNEL);
532 if (!driver_override)
533 return -ENOMEM;
534
535 cp = strchr(driver_override, '\n');
536 if (cp)
537 *cp = '\0';
538
539 if (strlen(driver_override)) {
540 pdev->driver_override = driver_override;
541 } else {
542 kfree(driver_override);
543 pdev->driver_override = NULL;
544 }
545
546 kfree(old);
547
548 return count;
549 }
550
551 static ssize_t driver_override_show(struct device *dev,
552 struct device_attribute *attr, char *buf)
553 {
554 struct pci_dev *pdev = to_pci_dev(dev);
555
556 return snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override);
557 }
558 static DEVICE_ATTR_RW(driver_override);
559
560 static struct attribute *pci_dev_attrs[] = {
561 &dev_attr_resource.attr,
562 &dev_attr_vendor.attr,
563 &dev_attr_device.attr,
564 &dev_attr_subsystem_vendor.attr,
565 &dev_attr_subsystem_device.attr,
566 &dev_attr_class.attr,
567 &dev_attr_irq.attr,
568 &dev_attr_local_cpus.attr,
569 &dev_attr_local_cpulist.attr,
570 &dev_attr_modalias.attr,
571 #ifdef CONFIG_NUMA
572 &dev_attr_numa_node.attr,
573 #endif
574 &dev_attr_dma_mask_bits.attr,
575 &dev_attr_consistent_dma_mask_bits.attr,
576 &dev_attr_enable.attr,
577 &dev_attr_broken_parity_status.attr,
578 &dev_attr_msi_bus.attr,
579 #if defined(CONFIG_PM) && defined(CONFIG_ACPI)
580 &dev_attr_d3cold_allowed.attr,
581 #endif
582 #ifdef CONFIG_OF
583 &dev_attr_devspec.attr,
584 #endif
585 &dev_attr_driver_override.attr,
586 NULL,
587 };
588
589 static const struct attribute_group pci_dev_group = {
590 .attrs = pci_dev_attrs,
591 };
592
593 const struct attribute_group *pci_dev_groups[] = {
594 &pci_dev_group,
595 NULL,
596 };
597
598 static struct attribute *pcibus_attrs[] = {
599 &dev_attr_rescan.attr,
600 &dev_attr_cpuaffinity.attr,
601 &dev_attr_cpulistaffinity.attr,
602 NULL,
603 };
604
605 static const struct attribute_group pcibus_group = {
606 .attrs = pcibus_attrs,
607 };
608
609 const struct attribute_group *pcibus_groups[] = {
610 &pcibus_group,
611 NULL,
612 };
613
614 static ssize_t boot_vga_show(struct device *dev, struct device_attribute *attr,
615 char *buf)
616 {
617 struct pci_dev *pdev = to_pci_dev(dev);
618 struct pci_dev *vga_dev = vga_default_device();
619
620 if (vga_dev)
621 return sprintf(buf, "%u\n", (pdev == vga_dev));
622
623 return sprintf(buf, "%u\n",
624 !!(pdev->resource[PCI_ROM_RESOURCE].flags &
625 IORESOURCE_ROM_SHADOW));
626 }
627 static struct device_attribute vga_attr = __ATTR_RO(boot_vga);
628
629 static ssize_t pci_read_config(struct file *filp, struct kobject *kobj,
630 struct bin_attribute *bin_attr, char *buf,
631 loff_t off, size_t count)
632 {
633 struct pci_dev *dev = to_pci_dev(container_of(kobj, struct device,
634 kobj));
635 unsigned int size = 64;
636 loff_t init_off = off;
637 u8 *data = (u8 *) buf;
638
639 /* Several chips lock up trying to read undefined config space */
640 if (security_capable(filp->f_cred, &init_user_ns, CAP_SYS_ADMIN) == 0)
641 size = dev->cfg_size;
642 else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
643 size = 128;
644
645 if (off > size)
646 return 0;
647 if (off + count > size) {
648 size -= off;
649 count = size;
650 } else {
651 size = count;
652 }
653
654 pci_config_pm_runtime_get(dev);
655
656 if ((off & 1) && size) {
657 u8 val;
658 pci_user_read_config_byte(dev, off, &val);
659 data[off - init_off] = val;
660 off++;
661 size--;
662 }
663
664 if ((off & 3) && size > 2) {
665 u16 val;
666 pci_user_read_config_word(dev, off, &val);
667 data[off - init_off] = val & 0xff;
668 data[off - init_off + 1] = (val >> 8) & 0xff;
669 off += 2;
670 size -= 2;
671 }
672
673 while (size > 3) {
674 u32 val;
675 pci_user_read_config_dword(dev, off, &val);
676 data[off - init_off] = val & 0xff;
677 data[off - init_off + 1] = (val >> 8) & 0xff;
678 data[off - init_off + 2] = (val >> 16) & 0xff;
679 data[off - init_off + 3] = (val >> 24) & 0xff;
680 off += 4;
681 size -= 4;
682 }
683
684 if (size >= 2) {
685 u16 val;
686 pci_user_read_config_word(dev, off, &val);
687 data[off - init_off] = val & 0xff;
688 data[off - init_off + 1] = (val >> 8) & 0xff;
689 off += 2;
690 size -= 2;
691 }
692
693 if (size > 0) {
694 u8 val;
695 pci_user_read_config_byte(dev, off, &val);
696 data[off - init_off] = val;
697 off++;
698 --size;
699 }
700
701 pci_config_pm_runtime_put(dev);
702
703 return count;
704 }
705
706 static ssize_t pci_write_config(struct file *filp, struct kobject *kobj,
707 struct bin_attribute *bin_attr, char *buf,
708 loff_t off, size_t count)
709 {
710 struct pci_dev *dev = to_pci_dev(container_of(kobj, struct device,
711 kobj));
712 unsigned int size = count;
713 loff_t init_off = off;
714 u8 *data = (u8 *) buf;
715
716 if (off > dev->cfg_size)
717 return 0;
718 if (off + count > dev->cfg_size) {
719 size = dev->cfg_size - off;
720 count = size;
721 }
722
723 pci_config_pm_runtime_get(dev);
724
725 if ((off & 1) && size) {
726 pci_user_write_config_byte(dev, off, data[off - init_off]);
727 off++;
728 size--;
729 }
730
731 if ((off & 3) && size > 2) {
732 u16 val = data[off - init_off];
733 val |= (u16) data[off - init_off + 1] << 8;
734 pci_user_write_config_word(dev, off, val);
735 off += 2;
736 size -= 2;
737 }
738
739 while (size > 3) {
740 u32 val = data[off - init_off];
741 val |= (u32) data[off - init_off + 1] << 8;
742 val |= (u32) data[off - init_off + 2] << 16;
743 val |= (u32) data[off - init_off + 3] << 24;
744 pci_user_write_config_dword(dev, off, val);
745 off += 4;
746 size -= 4;
747 }
748
749 if (size >= 2) {
750 u16 val = data[off - init_off];
751 val |= (u16) data[off - init_off + 1] << 8;
752 pci_user_write_config_word(dev, off, val);
753 off += 2;
754 size -= 2;
755 }
756
757 if (size) {
758 pci_user_write_config_byte(dev, off, data[off - init_off]);
759 off++;
760 --size;
761 }
762
763 pci_config_pm_runtime_put(dev);
764
765 return count;
766 }
767
768 static ssize_t read_vpd_attr(struct file *filp, struct kobject *kobj,
769 struct bin_attribute *bin_attr, char *buf,
770 loff_t off, size_t count)
771 {
772 struct pci_dev *dev =
773 to_pci_dev(container_of(kobj, struct device, kobj));
774
775 if (off > bin_attr->size)
776 count = 0;
777 else if (count > bin_attr->size - off)
778 count = bin_attr->size - off;
779
780 return pci_read_vpd(dev, off, count, buf);
781 }
782
783 static ssize_t write_vpd_attr(struct file *filp, struct kobject *kobj,
784 struct bin_attribute *bin_attr, char *buf,
785 loff_t off, size_t count)
786 {
787 struct pci_dev *dev =
788 to_pci_dev(container_of(kobj, struct device, kobj));
789
790 if (off > bin_attr->size)
791 count = 0;
792 else if (count > bin_attr->size - off)
793 count = bin_attr->size - off;
794
795 return pci_write_vpd(dev, off, count, buf);
796 }
797
798 #ifdef HAVE_PCI_LEGACY
799 /**
800 * pci_read_legacy_io - read byte(s) from legacy I/O port space
801 * @filp: open sysfs file
802 * @kobj: kobject corresponding to file to read from
803 * @bin_attr: struct bin_attribute for this file
804 * @buf: buffer to store results
805 * @off: offset into legacy I/O port space
806 * @count: number of bytes to read
807 *
808 * Reads 1, 2, or 4 bytes from legacy I/O port space using an arch specific
809 * callback routine (pci_legacy_read).
810 */
811 static ssize_t pci_read_legacy_io(struct file *filp, struct kobject *kobj,
812 struct bin_attribute *bin_attr, char *buf,
813 loff_t off, size_t count)
814 {
815 struct pci_bus *bus = to_pci_bus(container_of(kobj, struct device,
816 kobj));
817
818 /* Only support 1, 2 or 4 byte accesses */
819 if (count != 1 && count != 2 && count != 4)
820 return -EINVAL;
821
822 return pci_legacy_read(bus, off, (u32 *)buf, count);
823 }
824
825 /**
826 * pci_write_legacy_io - write byte(s) to legacy I/O port space
827 * @filp: open sysfs file
828 * @kobj: kobject corresponding to file to read from
829 * @bin_attr: struct bin_attribute for this file
830 * @buf: buffer containing value to be written
831 * @off: offset into legacy I/O port space
832 * @count: number of bytes to write
833 *
834 * Writes 1, 2, or 4 bytes from legacy I/O port space using an arch specific
835 * callback routine (pci_legacy_write).
836 */
837 static ssize_t pci_write_legacy_io(struct file *filp, struct kobject *kobj,
838 struct bin_attribute *bin_attr, char *buf,
839 loff_t off, size_t count)
840 {
841 struct pci_bus *bus = to_pci_bus(container_of(kobj, struct device,
842 kobj));
843
844 /* Only support 1, 2 or 4 byte accesses */
845 if (count != 1 && count != 2 && count != 4)
846 return -EINVAL;
847
848 return pci_legacy_write(bus, off, *(u32 *)buf, count);
849 }
850
851 /**
852 * pci_mmap_legacy_mem - map legacy PCI memory into user memory space
853 * @filp: open sysfs file
854 * @kobj: kobject corresponding to device to be mapped
855 * @attr: struct bin_attribute for this file
856 * @vma: struct vm_area_struct passed to mmap
857 *
858 * Uses an arch specific callback, pci_mmap_legacy_mem_page_range, to mmap
859 * legacy memory space (first meg of bus space) into application virtual
860 * memory space.
861 */
862 static int pci_mmap_legacy_mem(struct file *filp, struct kobject *kobj,
863 struct bin_attribute *attr,
864 struct vm_area_struct *vma)
865 {
866 struct pci_bus *bus = to_pci_bus(container_of(kobj, struct device,
867 kobj));
868
869 return pci_mmap_legacy_page_range(bus, vma, pci_mmap_mem);
870 }
871
872 /**
873 * pci_mmap_legacy_io - map legacy PCI IO into user memory space
874 * @filp: open sysfs file
875 * @kobj: kobject corresponding to device to be mapped
876 * @attr: struct bin_attribute for this file
877 * @vma: struct vm_area_struct passed to mmap
878 *
879 * Uses an arch specific callback, pci_mmap_legacy_io_page_range, to mmap
880 * legacy IO space (first meg of bus space) into application virtual
881 * memory space. Returns -ENOSYS if the operation isn't supported
882 */
883 static int pci_mmap_legacy_io(struct file *filp, struct kobject *kobj,
884 struct bin_attribute *attr,
885 struct vm_area_struct *vma)
886 {
887 struct pci_bus *bus = to_pci_bus(container_of(kobj, struct device,
888 kobj));
889
890 return pci_mmap_legacy_page_range(bus, vma, pci_mmap_io);
891 }
892
893 /**
894 * pci_adjust_legacy_attr - adjustment of legacy file attributes
895 * @b: bus to create files under
896 * @mmap_type: I/O port or memory
897 *
898 * Stub implementation. Can be overridden by arch if necessary.
899 */
900 void __weak pci_adjust_legacy_attr(struct pci_bus *b,
901 enum pci_mmap_state mmap_type)
902 {
903 }
904
905 /**
906 * pci_create_legacy_files - create legacy I/O port and memory files
907 * @b: bus to create files under
908 *
909 * Some platforms allow access to legacy I/O port and ISA memory space on
910 * a per-bus basis. This routine creates the files and ties them into
911 * their associated read, write and mmap files from pci-sysfs.c
912 *
913 * On error unwind, but don't propagate the error to the caller
914 * as it is ok to set up the PCI bus without these files.
915 */
916 void pci_create_legacy_files(struct pci_bus *b)
917 {
918 int error;
919
920 b->legacy_io = kzalloc(sizeof(struct bin_attribute) * 2,
921 GFP_ATOMIC);
922 if (!b->legacy_io)
923 goto kzalloc_err;
924
925 sysfs_bin_attr_init(b->legacy_io);
926 b->legacy_io->attr.name = "legacy_io";
927 b->legacy_io->size = 0xffff;
928 b->legacy_io->attr.mode = S_IRUSR | S_IWUSR;
929 b->legacy_io->read = pci_read_legacy_io;
930 b->legacy_io->write = pci_write_legacy_io;
931 b->legacy_io->mmap = pci_mmap_legacy_io;
932 pci_adjust_legacy_attr(b, pci_mmap_io);
933 error = device_create_bin_file(&b->dev, b->legacy_io);
934 if (error)
935 goto legacy_io_err;
936
937 /* Allocated above after the legacy_io struct */
938 b->legacy_mem = b->legacy_io + 1;
939 sysfs_bin_attr_init(b->legacy_mem);
940 b->legacy_mem->attr.name = "legacy_mem";
941 b->legacy_mem->size = 1024*1024;
942 b->legacy_mem->attr.mode = S_IRUSR | S_IWUSR;
943 b->legacy_mem->mmap = pci_mmap_legacy_mem;
944 pci_adjust_legacy_attr(b, pci_mmap_mem);
945 error = device_create_bin_file(&b->dev, b->legacy_mem);
946 if (error)
947 goto legacy_mem_err;
948
949 return;
950
951 legacy_mem_err:
952 device_remove_bin_file(&b->dev, b->legacy_io);
953 legacy_io_err:
954 kfree(b->legacy_io);
955 b->legacy_io = NULL;
956 kzalloc_err:
957 printk(KERN_WARNING "pci: warning: could not create legacy I/O port and ISA memory resources to sysfs\n");
958 return;
959 }
960
961 void pci_remove_legacy_files(struct pci_bus *b)
962 {
963 if (b->legacy_io) {
964 device_remove_bin_file(&b->dev, b->legacy_io);
965 device_remove_bin_file(&b->dev, b->legacy_mem);
966 kfree(b->legacy_io); /* both are allocated here */
967 }
968 }
969 #endif /* HAVE_PCI_LEGACY */
970
971 #ifdef HAVE_PCI_MMAP
972
973 int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma,
974 enum pci_mmap_api mmap_api)
975 {
976 unsigned long nr, start, size, pci_start;
977
978 if (pci_resource_len(pdev, resno) == 0)
979 return 0;
980 nr = vma_pages(vma);
981 start = vma->vm_pgoff;
982 size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1;
983 pci_start = (mmap_api == PCI_MMAP_PROCFS) ?
984 pci_resource_start(pdev, resno) >> PAGE_SHIFT : 0;
985 if (start >= pci_start && start < pci_start + size &&
986 start + nr <= pci_start + size)
987 return 1;
988 return 0;
989 }
990
991 /**
992 * pci_mmap_resource - map a PCI resource into user memory space
993 * @kobj: kobject for mapping
994 * @attr: struct bin_attribute for the file being mapped
995 * @vma: struct vm_area_struct passed into the mmap
996 * @write_combine: 1 for write_combine mapping
997 *
998 * Use the regular PCI mapping routines to map a PCI resource into userspace.
999 */
1000 static int pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
1001 struct vm_area_struct *vma, int write_combine)
1002 {
1003 struct pci_dev *pdev = to_pci_dev(container_of(kobj,
1004 struct device, kobj));
1005 struct resource *res = attr->private;
1006 enum pci_mmap_state mmap_type;
1007 resource_size_t start, end;
1008 int i;
1009
1010 for (i = 0; i < PCI_ROM_RESOURCE; i++)
1011 if (res == &pdev->resource[i])
1012 break;
1013 if (i >= PCI_ROM_RESOURCE)
1014 return -ENODEV;
1015
1016 if (!pci_mmap_fits(pdev, i, vma, PCI_MMAP_SYSFS)) {
1017 WARN(1, "process \"%s\" tried to map 0x%08lx bytes at page 0x%08lx on %s BAR %d (start 0x%16Lx, size 0x%16Lx)\n",
1018 current->comm, vma->vm_end-vma->vm_start, vma->vm_pgoff,
1019 pci_name(pdev), i,
1020 (u64)pci_resource_start(pdev, i),
1021 (u64)pci_resource_len(pdev, i));
1022 return -EINVAL;
1023 }
1024
1025 /* pci_mmap_page_range() expects the same kind of entry as coming
1026 * from /proc/bus/pci/ which is a "user visible" value. If this is
1027 * different from the resource itself, arch will do necessary fixup.
1028 */
1029 pci_resource_to_user(pdev, i, res, &start, &end);
1030 vma->vm_pgoff += start >> PAGE_SHIFT;
1031 mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io;
1032
1033 if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(start))
1034 return -EINVAL;
1035
1036 return pci_mmap_page_range(pdev, vma, mmap_type, write_combine);
1037 }
1038
1039 static int pci_mmap_resource_uc(struct file *filp, struct kobject *kobj,
1040 struct bin_attribute *attr,
1041 struct vm_area_struct *vma)
1042 {
1043 return pci_mmap_resource(kobj, attr, vma, 0);
1044 }
1045
1046 static int pci_mmap_resource_wc(struct file *filp, struct kobject *kobj,
1047 struct bin_attribute *attr,
1048 struct vm_area_struct *vma)
1049 {
1050 return pci_mmap_resource(kobj, attr, vma, 1);
1051 }
1052
1053 static ssize_t pci_resource_io(struct file *filp, struct kobject *kobj,
1054 struct bin_attribute *attr, char *buf,
1055 loff_t off, size_t count, bool write)
1056 {
1057 struct pci_dev *pdev = to_pci_dev(container_of(kobj,
1058 struct device, kobj));
1059 struct resource *res = attr->private;
1060 unsigned long port = off;
1061 int i;
1062
1063 for (i = 0; i < PCI_ROM_RESOURCE; i++)
1064 if (res == &pdev->resource[i])
1065 break;
1066 if (i >= PCI_ROM_RESOURCE)
1067 return -ENODEV;
1068
1069 port += pci_resource_start(pdev, i);
1070
1071 if (port > pci_resource_end(pdev, i))
1072 return 0;
1073
1074 if (port + count - 1 > pci_resource_end(pdev, i))
1075 return -EINVAL;
1076
1077 switch (count) {
1078 case 1:
1079 if (write)
1080 outb(*(u8 *)buf, port);
1081 else
1082 *(u8 *)buf = inb(port);
1083 return 1;
1084 case 2:
1085 if (write)
1086 outw(*(u16 *)buf, port);
1087 else
1088 *(u16 *)buf = inw(port);
1089 return 2;
1090 case 4:
1091 if (write)
1092 outl(*(u32 *)buf, port);
1093 else
1094 *(u32 *)buf = inl(port);
1095 return 4;
1096 }
1097 return -EINVAL;
1098 }
1099
1100 static ssize_t pci_read_resource_io(struct file *filp, struct kobject *kobj,
1101 struct bin_attribute *attr, char *buf,
1102 loff_t off, size_t count)
1103 {
1104 return pci_resource_io(filp, kobj, attr, buf, off, count, false);
1105 }
1106
1107 static ssize_t pci_write_resource_io(struct file *filp, struct kobject *kobj,
1108 struct bin_attribute *attr, char *buf,
1109 loff_t off, size_t count)
1110 {
1111 return pci_resource_io(filp, kobj, attr, buf, off, count, true);
1112 }
1113
1114 /**
1115 * pci_remove_resource_files - cleanup resource files
1116 * @pdev: dev to cleanup
1117 *
1118 * If we created resource files for @pdev, remove them from sysfs and
1119 * free their resources.
1120 */
1121 static void pci_remove_resource_files(struct pci_dev *pdev)
1122 {
1123 int i;
1124
1125 for (i = 0; i < PCI_ROM_RESOURCE; i++) {
1126 struct bin_attribute *res_attr;
1127
1128 res_attr = pdev->res_attr[i];
1129 if (res_attr) {
1130 sysfs_remove_bin_file(&pdev->dev.kobj, res_attr);
1131 kfree(res_attr);
1132 }
1133
1134 res_attr = pdev->res_attr_wc[i];
1135 if (res_attr) {
1136 sysfs_remove_bin_file(&pdev->dev.kobj, res_attr);
1137 kfree(res_attr);
1138 }
1139 }
1140 }
1141
1142 static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
1143 {
1144 /* allocate attribute structure, piggyback attribute name */
1145 int name_len = write_combine ? 13 : 10;
1146 struct bin_attribute *res_attr;
1147 int retval;
1148
1149 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
1150 if (res_attr) {
1151 char *res_attr_name = (char *)(res_attr + 1);
1152
1153 sysfs_bin_attr_init(res_attr);
1154 if (write_combine) {
1155 pdev->res_attr_wc[num] = res_attr;
1156 sprintf(res_attr_name, "resource%d_wc", num);
1157 res_attr->mmap = pci_mmap_resource_wc;
1158 } else {
1159 pdev->res_attr[num] = res_attr;
1160 sprintf(res_attr_name, "resource%d", num);
1161 res_attr->mmap = pci_mmap_resource_uc;
1162 }
1163 if (pci_resource_flags(pdev, num) & IORESOURCE_IO) {
1164 res_attr->read = pci_read_resource_io;
1165 res_attr->write = pci_write_resource_io;
1166 }
1167 res_attr->attr.name = res_attr_name;
1168 res_attr->attr.mode = S_IRUSR | S_IWUSR;
1169 res_attr->size = pci_resource_len(pdev, num);
1170 res_attr->private = &pdev->resource[num];
1171 retval = sysfs_create_bin_file(&pdev->dev.kobj, res_attr);
1172 } else
1173 retval = -ENOMEM;
1174
1175 return retval;
1176 }
1177
1178 /**
1179 * pci_create_resource_files - create resource files in sysfs for @dev
1180 * @pdev: dev in question
1181 *
1182 * Walk the resources in @pdev creating files for each resource available.
1183 */
1184 static int pci_create_resource_files(struct pci_dev *pdev)
1185 {
1186 int i;
1187 int retval;
1188
1189 /* Expose the PCI resources from this device as files */
1190 for (i = 0; i < PCI_ROM_RESOURCE; i++) {
1191
1192 /* skip empty resources */
1193 if (!pci_resource_len(pdev, i))
1194 continue;
1195
1196 retval = pci_create_attr(pdev, i, 0);
1197 /* for prefetchable resources, create a WC mappable file */
1198 if (!retval && pdev->resource[i].flags & IORESOURCE_PREFETCH)
1199 retval = pci_create_attr(pdev, i, 1);
1200
1201 if (retval) {
1202 pci_remove_resource_files(pdev);
1203 return retval;
1204 }
1205 }
1206 return 0;
1207 }
1208 #else /* !HAVE_PCI_MMAP */
1209 int __weak pci_create_resource_files(struct pci_dev *dev) { return 0; }
1210 void __weak pci_remove_resource_files(struct pci_dev *dev) { return; }
1211 #endif /* HAVE_PCI_MMAP */
1212
1213 /**
1214 * pci_write_rom - used to enable access to the PCI ROM display
1215 * @filp: sysfs file
1216 * @kobj: kernel object handle
1217 * @bin_attr: struct bin_attribute for this file
1218 * @buf: user input
1219 * @off: file offset
1220 * @count: number of byte in input
1221 *
1222 * writing anything except 0 enables it
1223 */
1224 static ssize_t pci_write_rom(struct file *filp, struct kobject *kobj,
1225 struct bin_attribute *bin_attr, char *buf,
1226 loff_t off, size_t count)
1227 {
1228 struct pci_dev *pdev = to_pci_dev(container_of(kobj, struct device, kobj));
1229
1230 if ((off == 0) && (*buf == '0') && (count == 2))
1231 pdev->rom_attr_enabled = 0;
1232 else
1233 pdev->rom_attr_enabled = 1;
1234
1235 return count;
1236 }
1237
1238 /**
1239 * pci_read_rom - read a PCI ROM
1240 * @filp: sysfs file
1241 * @kobj: kernel object handle
1242 * @bin_attr: struct bin_attribute for this file
1243 * @buf: where to put the data we read from the ROM
1244 * @off: file offset
1245 * @count: number of bytes to read
1246 *
1247 * Put @count bytes starting at @off into @buf from the ROM in the PCI
1248 * device corresponding to @kobj.
1249 */
1250 static ssize_t pci_read_rom(struct file *filp, struct kobject *kobj,
1251 struct bin_attribute *bin_attr, char *buf,
1252 loff_t off, size_t count)
1253 {
1254 struct pci_dev *pdev = to_pci_dev(container_of(kobj, struct device, kobj));
1255 void __iomem *rom;
1256 size_t size;
1257
1258 if (!pdev->rom_attr_enabled)
1259 return -EINVAL;
1260
1261 rom = pci_map_rom(pdev, &size); /* size starts out as PCI window size */
1262 if (!rom || !size)
1263 return -EIO;
1264
1265 if (off >= size)
1266 count = 0;
1267 else {
1268 if (off + count > size)
1269 count = size - off;
1270
1271 memcpy_fromio(buf, rom + off, count);
1272 }
1273 pci_unmap_rom(pdev, rom);
1274
1275 return count;
1276 }
1277
1278 static struct bin_attribute pci_config_attr = {
1279 .attr = {
1280 .name = "config",
1281 .mode = S_IRUGO | S_IWUSR,
1282 },
1283 .size = PCI_CFG_SPACE_SIZE,
1284 .read = pci_read_config,
1285 .write = pci_write_config,
1286 };
1287
1288 static struct bin_attribute pcie_config_attr = {
1289 .attr = {
1290 .name = "config",
1291 .mode = S_IRUGO | S_IWUSR,
1292 },
1293 .size = PCI_CFG_SPACE_EXP_SIZE,
1294 .read = pci_read_config,
1295 .write = pci_write_config,
1296 };
1297
1298 static ssize_t reset_store(struct device *dev, struct device_attribute *attr,
1299 const char *buf, size_t count)
1300 {
1301 struct pci_dev *pdev = to_pci_dev(dev);
1302 unsigned long val;
1303 ssize_t result = kstrtoul(buf, 0, &val);
1304
1305 if (result < 0)
1306 return result;
1307
1308 if (val != 1)
1309 return -EINVAL;
1310
1311 result = pci_reset_function(pdev);
1312 if (result < 0)
1313 return result;
1314
1315 return count;
1316 }
1317
1318 static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_store);
1319
1320 static int pci_create_capabilities_sysfs(struct pci_dev *dev)
1321 {
1322 int retval;
1323 struct bin_attribute *attr;
1324
1325 /* If the device has VPD, try to expose it in sysfs. */
1326 if (dev->vpd) {
1327 attr = kzalloc(sizeof(*attr), GFP_ATOMIC);
1328 if (!attr)
1329 return -ENOMEM;
1330
1331 sysfs_bin_attr_init(attr);
1332 attr->size = dev->vpd->len;
1333 attr->attr.name = "vpd";
1334 attr->attr.mode = S_IRUSR | S_IWUSR;
1335 attr->read = read_vpd_attr;
1336 attr->write = write_vpd_attr;
1337 retval = sysfs_create_bin_file(&dev->dev.kobj, attr);
1338 if (retval) {
1339 kfree(attr);
1340 return retval;
1341 }
1342 dev->vpd->attr = attr;
1343 }
1344
1345 /* Active State Power Management */
1346 pcie_aspm_create_sysfs_dev_files(dev);
1347
1348 if (!pci_probe_reset_function(dev)) {
1349 retval = device_create_file(&dev->dev, &reset_attr);
1350 if (retval)
1351 goto error;
1352 dev->reset_fn = 1;
1353 }
1354 return 0;
1355
1356 error:
1357 pcie_aspm_remove_sysfs_dev_files(dev);
1358 if (dev->vpd && dev->vpd->attr) {
1359 sysfs_remove_bin_file(&dev->dev.kobj, dev->vpd->attr);
1360 kfree(dev->vpd->attr);
1361 }
1362
1363 return retval;
1364 }
1365
1366 int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
1367 {
1368 int retval;
1369 int rom_size = 0;
1370 struct bin_attribute *attr;
1371
1372 if (!sysfs_initialized)
1373 return -EACCES;
1374
1375 if (pdev->cfg_size < PCI_CFG_SPACE_EXP_SIZE)
1376 retval = sysfs_create_bin_file(&pdev->dev.kobj, &pci_config_attr);
1377 else
1378 retval = sysfs_create_bin_file(&pdev->dev.kobj, &pcie_config_attr);
1379 if (retval)
1380 goto err;
1381
1382 retval = pci_create_resource_files(pdev);
1383 if (retval)
1384 goto err_config_file;
1385
1386 if (pci_resource_len(pdev, PCI_ROM_RESOURCE))
1387 rom_size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
1388 else if (pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW)
1389 rom_size = 0x20000;
1390
1391 /* If the device has a ROM, try to expose it in sysfs. */
1392 if (rom_size) {
1393 attr = kzalloc(sizeof(*attr), GFP_ATOMIC);
1394 if (!attr) {
1395 retval = -ENOMEM;
1396 goto err_resource_files;
1397 }
1398 sysfs_bin_attr_init(attr);
1399 attr->size = rom_size;
1400 attr->attr.name = "rom";
1401 attr->attr.mode = S_IRUSR | S_IWUSR;
1402 attr->read = pci_read_rom;
1403 attr->write = pci_write_rom;
1404 retval = sysfs_create_bin_file(&pdev->dev.kobj, attr);
1405 if (retval) {
1406 kfree(attr);
1407 goto err_resource_files;
1408 }
1409 pdev->rom_attr = attr;
1410 }
1411
1412 /* add sysfs entries for various capabilities */
1413 retval = pci_create_capabilities_sysfs(pdev);
1414 if (retval)
1415 goto err_rom_file;
1416
1417 pci_create_firmware_label_files(pdev);
1418
1419 return 0;
1420
1421 err_rom_file:
1422 if (rom_size) {
1423 sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr);
1424 kfree(pdev->rom_attr);
1425 pdev->rom_attr = NULL;
1426 }
1427 err_resource_files:
1428 pci_remove_resource_files(pdev);
1429 err_config_file:
1430 if (pdev->cfg_size < PCI_CFG_SPACE_EXP_SIZE)
1431 sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr);
1432 else
1433 sysfs_remove_bin_file(&pdev->dev.kobj, &pcie_config_attr);
1434 err:
1435 return retval;
1436 }
1437
1438 static void pci_remove_capabilities_sysfs(struct pci_dev *dev)
1439 {
1440 if (dev->vpd && dev->vpd->attr) {
1441 sysfs_remove_bin_file(&dev->dev.kobj, dev->vpd->attr);
1442 kfree(dev->vpd->attr);
1443 }
1444
1445 pcie_aspm_remove_sysfs_dev_files(dev);
1446 if (dev->reset_fn) {
1447 device_remove_file(&dev->dev, &reset_attr);
1448 dev->reset_fn = 0;
1449 }
1450 }
1451
1452 /**
1453 * pci_remove_sysfs_dev_files - cleanup PCI specific sysfs files
1454 * @pdev: device whose entries we should free
1455 *
1456 * Cleanup when @pdev is removed from sysfs.
1457 */
1458 void pci_remove_sysfs_dev_files(struct pci_dev *pdev)
1459 {
1460 int rom_size = 0;
1461
1462 if (!sysfs_initialized)
1463 return;
1464
1465 pci_remove_capabilities_sysfs(pdev);
1466
1467 if (pdev->cfg_size < PCI_CFG_SPACE_EXP_SIZE)
1468 sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr);
1469 else
1470 sysfs_remove_bin_file(&pdev->dev.kobj, &pcie_config_attr);
1471
1472 pci_remove_resource_files(pdev);
1473
1474 if (pci_resource_len(pdev, PCI_ROM_RESOURCE))
1475 rom_size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
1476 else if (pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW)
1477 rom_size = 0x20000;
1478
1479 if (rom_size && pdev->rom_attr) {
1480 sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr);
1481 kfree(pdev->rom_attr);
1482 }
1483
1484 pci_remove_firmware_label_files(pdev);
1485
1486 }
1487
1488 static int __init pci_sysfs_init(void)
1489 {
1490 struct pci_dev *pdev = NULL;
1491 int retval;
1492
1493 sysfs_initialized = 1;
1494 for_each_pci_dev(pdev) {
1495 retval = pci_create_sysfs_dev_files(pdev);
1496 if (retval) {
1497 pci_dev_put(pdev);
1498 return retval;
1499 }
1500 }
1501
1502 return 0;
1503 }
1504 late_initcall(pci_sysfs_init);
1505
1506 static struct attribute *pci_dev_dev_attrs[] = {
1507 &vga_attr.attr,
1508 NULL,
1509 };
1510
1511 static umode_t pci_dev_attrs_are_visible(struct kobject *kobj,
1512 struct attribute *a, int n)
1513 {
1514 struct device *dev = container_of(kobj, struct device, kobj);
1515 struct pci_dev *pdev = to_pci_dev(dev);
1516
1517 if (a == &vga_attr.attr)
1518 if ((pdev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
1519 return 0;
1520
1521 return a->mode;
1522 }
1523
1524 static struct attribute *pci_dev_hp_attrs[] = {
1525 &dev_remove_attr.attr,
1526 &dev_rescan_attr.attr,
1527 NULL,
1528 };
1529
1530 static umode_t pci_dev_hp_attrs_are_visible(struct kobject *kobj,
1531 struct attribute *a, int n)
1532 {
1533 struct device *dev = container_of(kobj, struct device, kobj);
1534 struct pci_dev *pdev = to_pci_dev(dev);
1535
1536 if (pdev->is_virtfn)
1537 return 0;
1538
1539 return a->mode;
1540 }
1541
1542 static struct attribute_group pci_dev_hp_attr_group = {
1543 .attrs = pci_dev_hp_attrs,
1544 .is_visible = pci_dev_hp_attrs_are_visible,
1545 };
1546
1547 #ifdef CONFIG_PCI_IOV
1548 static struct attribute *sriov_dev_attrs[] = {
1549 &sriov_totalvfs_attr.attr,
1550 &sriov_numvfs_attr.attr,
1551 NULL,
1552 };
1553
1554 static umode_t sriov_attrs_are_visible(struct kobject *kobj,
1555 struct attribute *a, int n)
1556 {
1557 struct device *dev = container_of(kobj, struct device, kobj);
1558
1559 if (!dev_is_pf(dev))
1560 return 0;
1561
1562 return a->mode;
1563 }
1564
1565 static struct attribute_group sriov_dev_attr_group = {
1566 .attrs = sriov_dev_attrs,
1567 .is_visible = sriov_attrs_are_visible,
1568 };
1569 #endif /* CONFIG_PCI_IOV */
1570
1571 static struct attribute_group pci_dev_attr_group = {
1572 .attrs = pci_dev_dev_attrs,
1573 .is_visible = pci_dev_attrs_are_visible,
1574 };
1575
1576 static const struct attribute_group *pci_dev_attr_groups[] = {
1577 &pci_dev_attr_group,
1578 &pci_dev_hp_attr_group,
1579 #ifdef CONFIG_PCI_IOV
1580 &sriov_dev_attr_group,
1581 #endif
1582 NULL,
1583 };
1584
1585 struct device_type pci_dev_type = {
1586 .groups = pci_dev_attr_groups,
1587 };