]> git.ipfire.org Git - thirdparty/linux.git/blame - arch/powerpc/kernel/eeh_cache.c
treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 156
[thirdparty/linux.git] / arch / powerpc / kernel / eeh_cache.c
CommitLineData
1a59d1b8 1// SPDX-License-Identifier: GPL-2.0-or-later
5d5a0936 2/*
5d5a0936
LV
3 * PCI address cache; allows the lookup of PCI devices based on I/O address
4 *
3c8c90ab
LV
5 * Copyright IBM Corporation 2004
6 * Copyright Linas Vepstas <linas@austin.ibm.com> 2004
5d5a0936
LV
7 */
8
9#include <linux/list.h>
10#include <linux/pci.h>
11#include <linux/rbtree.h>
5a0e3ad6 12#include <linux/slab.h>
5d5a0936 13#include <linux/spinlock.h>
60063497 14#include <linux/atomic.h>
5d5a0936 15#include <asm/pci-bridge.h>
5ca85ae6 16#include <asm/debugfs.h>
5d5a0936 17#include <asm/ppc-pci.h>
5d5a0936 18
5d5a0936
LV
19
20/**
21 * The pci address cache subsystem. This subsystem places
22 * PCI device address resources into a red-black tree, sorted
23 * according to the address range, so that given only an i/o
24 * address, the corresponding PCI device can be **quickly**
25 * found. It is safe to perform an address lookup in an interrupt
26 * context; this ability is an important feature.
27 *
28 * Currently, the only customer of this code is the EEH subsystem;
29 * thus, this code has been somewhat tailored to suit EEH better.
30 * In particular, the cache does *not* hold the addresses of devices
31 * for which EEH is not enabled.
32 *
33 * (Implementation Note: The RB tree seems to be better/faster
34 * than any hash algo I could think of for this problem, even
35 * with the penalty of slow pointer chases for d-cache misses).
36 */
29f8bf1b 37struct pci_io_addr_range {
5d5a0936 38 struct rb_node rb_node;
37213529
WY
39 resource_size_t addr_lo;
40 resource_size_t addr_hi;
f8f7d63f 41 struct eeh_dev *edev;
5d5a0936 42 struct pci_dev *pcidev;
37213529 43 unsigned long flags;
5d5a0936
LV
44};
45
29f8bf1b 46static struct pci_io_addr_cache {
5d5a0936
LV
47 struct rb_root rb_root;
48 spinlock_t piar_lock;
49} pci_io_addr_cache_root;
50
3ab96a02 51static inline struct eeh_dev *__eeh_addr_cache_get_device(unsigned long addr)
5d5a0936
LV
52{
53 struct rb_node *n = pci_io_addr_cache_root.rb_root.rb_node;
54
55 while (n) {
56 struct pci_io_addr_range *piar;
57 piar = rb_entry(n, struct pci_io_addr_range, rb_node);
58
0ba17888 59 if (addr < piar->addr_lo)
5d5a0936 60 n = n->rb_left;
0ba17888
GS
61 else if (addr > piar->addr_hi)
62 n = n->rb_right;
63 else
64 return piar->edev;
5d5a0936
LV
65 }
66
67 return NULL;
68}
69
70/**
3ab96a02 71 * eeh_addr_cache_get_dev - Get device, given only address
5d5a0936
LV
72 * @addr: mmio (PIO) phys address or i/o port number
73 *
74 * Given an mmio phys address, or a port number, find a pci device
63457b14 75 * that implements this address. I/O port numbers are assumed to be offset
5d5a0936
LV
76 * from zero (that is, they do *not* have pci_io_addr added in).
77 * It is safe to call this function within an interrupt.
78 */
3ab96a02 79struct eeh_dev *eeh_addr_cache_get_dev(unsigned long addr)
5d5a0936 80{
f8f7d63f 81 struct eeh_dev *edev;
5d5a0936
LV
82 unsigned long flags;
83
84 spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
3ab96a02 85 edev = __eeh_addr_cache_get_device(addr);
5d5a0936 86 spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
f8f7d63f 87 return edev;
5d5a0936
LV
88}
89
90#ifdef DEBUG
91/*
92 * Handy-dandy debug print routine, does nothing more
93 * than print out the contents of our addr cache.
94 */
3ab96a02 95static void eeh_addr_cache_print(struct pci_io_addr_cache *cache)
5d5a0936
LV
96{
97 struct rb_node *n;
98 int cnt = 0;
99
100 n = rb_first(&cache->rb_root);
101 while (n) {
102 struct pci_io_addr_range *piar;
103 piar = rb_entry(n, struct pci_io_addr_range, rb_node);
c8f02f21 104 pr_info("PCI: %s addr range %d [%pap-%pap]: %s\n",
5d5a0936 105 (piar->flags & IORESOURCE_IO) ? "i/o" : "mem", cnt,
91dc0682 106 &piar->addr_lo, &piar->addr_hi, pci_name(piar->pcidev));
5d5a0936
LV
107 cnt++;
108 n = rb_next(n);
109 }
110}
111#endif
112
113/* Insert address range into the rb tree. */
114static struct pci_io_addr_range *
37213529
WY
115eeh_addr_cache_insert(struct pci_dev *dev, resource_size_t alo,
116 resource_size_t ahi, unsigned long flags)
5d5a0936
LV
117{
118 struct rb_node **p = &pci_io_addr_cache_root.rb_root.rb_node;
119 struct rb_node *parent = NULL;
120 struct pci_io_addr_range *piar;
121
122 /* Walk tree, find a place to insert into tree */
123 while (*p) {
124 parent = *p;
125 piar = rb_entry(parent, struct pci_io_addr_range, rb_node);
126 if (ahi < piar->addr_lo) {
127 p = &parent->rb_left;
128 } else if (alo > piar->addr_hi) {
129 p = &parent->rb_right;
130 } else {
131 if (dev != piar->pcidev ||
132 alo != piar->addr_lo || ahi != piar->addr_hi) {
0dae2743 133 pr_warn("PIAR: overlapping address range\n");
5d5a0936
LV
134 }
135 return piar;
136 }
137 }
7e4bbaf0 138 piar = kzalloc(sizeof(struct pci_io_addr_range), GFP_ATOMIC);
5d5a0936
LV
139 if (!piar)
140 return NULL;
141
142 piar->addr_lo = alo;
143 piar->addr_hi = ahi;
f8f7d63f 144 piar->edev = pci_dev_to_eeh_dev(dev);
5d5a0936
LV
145 piar->pcidev = dev;
146 piar->flags = flags;
147
91dc0682
AD
148 pr_debug("PIAR: insert range=[%pap:%pap] dev=%s\n",
149 &alo, &ahi, pci_name(dev));
5d5a0936
LV
150
151 rb_link_node(&piar->rb_node, parent, p);
152 rb_insert_color(&piar->rb_node, &pci_io_addr_cache_root.rb_root);
153
154 return piar;
155}
156
3ab96a02 157static void __eeh_addr_cache_insert_dev(struct pci_dev *dev)
5d5a0936 158{
c6406d8f 159 struct pci_dn *pdn;
d50a7d4c 160 struct eeh_dev *edev;
5d5a0936 161 int i;
5d5a0936 162
c6406d8f
GS
163 pdn = pci_get_pdn_by_devfn(dev->bus, dev->devfn);
164 if (!pdn) {
0dae2743
GS
165 pr_warn("PCI: no pci dn found for dev=%s\n",
166 pci_name(dev));
5d5a0936
LV
167 return;
168 }
169
c6406d8f 170 edev = pdn_to_eeh_dev(pdn);
d50a7d4c 171 if (!edev) {
c6406d8f
GS
172 pr_warn("PCI: no EEH dev found for %s\n",
173 pci_name(dev));
d50a7d4c
GS
174 return;
175 }
176
5d5a0936 177 /* Skip any devices for which EEH is not enabled. */
05b1721d 178 if (!edev->pe) {
c6406d8f 179 dev_dbg(&dev->dev, "EEH: Skip building address cache\n");
5d5a0936
LV
180 return;
181 }
182
51c0e87e
WY
183 /*
184 * Walk resources on this device, poke the first 7 (6 normal BAR and 1
185 * ROM BAR) into the tree.
186 */
187 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
37213529
WY
188 resource_size_t start = pci_resource_start(dev,i);
189 resource_size_t end = pci_resource_end(dev,i);
190 unsigned long flags = pci_resource_flags(dev,i);
5d5a0936
LV
191
192 /* We are interested only bus addresses, not dma or other stuff */
193 if (0 == (flags & (IORESOURCE_IO | IORESOURCE_MEM)))
194 continue;
195 if (start == 0 || ~start == 0 || end == 0 || ~end == 0)
196 continue;
3ab96a02 197 eeh_addr_cache_insert(dev, start, end, flags);
5d5a0936 198 }
5d5a0936
LV
199}
200
201/**
3ab96a02 202 * eeh_addr_cache_insert_dev - Add a device to the address cache
5d5a0936
LV
203 * @dev: PCI device whose I/O addresses we are interested in.
204 *
205 * In order to support the fast lookup of devices based on addresses,
206 * we maintain a cache of devices that can be quickly searched.
207 * This routine adds a device to that cache.
208 */
3ab96a02 209void eeh_addr_cache_insert_dev(struct pci_dev *dev)
5d5a0936
LV
210{
211 unsigned long flags;
212
213 spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
3ab96a02 214 __eeh_addr_cache_insert_dev(dev);
5d5a0936
LV
215 spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
216}
217
3ab96a02 218static inline void __eeh_addr_cache_rmv_dev(struct pci_dev *dev)
5d5a0936
LV
219{
220 struct rb_node *n;
5d5a0936
LV
221
222restart:
223 n = rb_first(&pci_io_addr_cache_root.rb_root);
224 while (n) {
225 struct pci_io_addr_range *piar;
226 piar = rb_entry(n, struct pci_io_addr_range, rb_node);
227
228 if (piar->pcidev == dev) {
e67fbbec
OH
229 pr_debug("PIAR: remove range=[%pap:%pap] dev=%s\n",
230 &piar->addr_lo, &piar->addr_hi, pci_name(dev));
5d5a0936 231 rb_erase(n, &pci_io_addr_cache_root.rb_root);
5d5a0936
LV
232 kfree(piar);
233 goto restart;
234 }
235 n = rb_next(n);
236 }
5d5a0936
LV
237}
238
239/**
3ab96a02 240 * eeh_addr_cache_rmv_dev - remove pci device from addr cache
5d5a0936
LV
241 * @dev: device to remove
242 *
243 * Remove a device from the addr-cache tree.
244 * This is potentially expensive, since it will walk
245 * the tree multiple times (once per resource).
246 * But so what; device removal doesn't need to be that fast.
247 */
3ab96a02 248void eeh_addr_cache_rmv_dev(struct pci_dev *dev)
5d5a0936
LV
249{
250 unsigned long flags;
251
252 spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
3ab96a02 253 __eeh_addr_cache_rmv_dev(dev);
5d5a0936
LV
254 spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
255}
256
257/**
3ab96a02 258 * eeh_addr_cache_build - Build a cache of I/O addresses
5d5a0936
LV
259 *
260 * Build a cache of pci i/o addresses. This cache will be used to
261 * find the pci device that corresponds to a given address.
262 * This routine scans all pci busses to build the cache.
263 * Must be run late in boot process, after the pci controllers
d6e05edc 264 * have been scanned for devices (after all device resources are known).
5d5a0936 265 */
eeb6361f 266void eeh_addr_cache_build(void)
5d5a0936 267{
c6406d8f 268 struct pci_dn *pdn;
d50a7d4c 269 struct eeh_dev *edev;
5d5a0936
LV
270 struct pci_dev *dev = NULL;
271
272 spin_lock_init(&pci_io_addr_cache_root.piar_lock);
273
6901c6cc 274 for_each_pci_dev(dev) {
c6406d8f
GS
275 pdn = pci_get_pdn_by_devfn(dev->bus, dev->devfn);
276 if (!pdn)
ccba051c 277 continue;
d50a7d4c 278
c6406d8f 279 edev = pdn_to_eeh_dev(pdn);
d50a7d4c
GS
280 if (!edev)
281 continue;
282
d50a7d4c
GS
283 dev->dev.archdata.edev = edev;
284 edev->pdev = dev;
e1d04c97 285
1abd6018 286 eeh_addr_cache_insert_dev(dev);
e1d04c97 287 eeh_sysfs_add_device(dev);
5d5a0936 288 }
5ca85ae6 289}
5d5a0936 290
5ca85ae6
OH
291static int eeh_addr_cache_show(struct seq_file *s, void *v)
292{
293 struct pci_io_addr_range *piar;
294 struct rb_node *n;
295
296 spin_lock(&pci_io_addr_cache_root.piar_lock);
297 for (n = rb_first(&pci_io_addr_cache_root.rb_root); n; n = rb_next(n)) {
298 piar = rb_entry(n, struct pci_io_addr_range, rb_node);
299
300 seq_printf(s, "%s addr range [%pap-%pap]: %s\n",
301 (piar->flags & IORESOURCE_IO) ? "i/o" : "mem",
302 &piar->addr_lo, &piar->addr_hi, pci_name(piar->pcidev));
303 }
304 spin_unlock(&pci_io_addr_cache_root.piar_lock);
305
306 return 0;
307}
308DEFINE_SHOW_ATTRIBUTE(eeh_addr_cache);
309
310void eeh_cache_debugfs_init(void)
311{
312 debugfs_create_file_unsafe("eeh_address_cache", 0400,
313 powerpc_debugfs_root, NULL,
314 &eeh_addr_cache_fops);
5d5a0936 315}