]> git.ipfire.org Git - thirdparty/u-boot.git/blame - drivers/pci/pci-uclass.c
Revert "Merge patch series "arm: dts: am62-beagleplay: Fix Beagleplay Ethernet""
[thirdparty/u-boot.git] / drivers / pci / pci-uclass.c
CommitLineData
83d290c5 1// SPDX-License-Identifier: GPL-2.0+
ff3e077b
SG
2/*
3 * Copyright (c) 2014 Google, Inc
4 * Written by Simon Glass <sjg@chromium.org>
ff3e077b
SG
5 */
6
b953ec2b
PD
7#define LOG_CATEGORY UCLASS_PCI
8
d678a59d 9#include <common.h>
ff3e077b
SG
10#include <dm.h>
11#include <errno.h>
691d719d 12#include <init.h>
f7ae49fc 13#include <log.h>
336d4615 14#include <malloc.h>
ff3e077b 15#include <pci.h>
db3820a2 16#include <spl.h>
401d1c4f 17#include <asm/global_data.h>
21d1fe7e 18#include <asm/io.h>
ff3e077b 19#include <dm/device-internal.h>
bf501595 20#include <dm/lists.h>
42f3663a 21#include <dm/uclass-internal.h>
348b744b 22#if defined(CONFIG_X86) && defined(CONFIG_HAVE_FSP)
07f2f58b 23#include <asm/fsp/fsp_support.h>
348b744b 24#endif
f5cbb5c7 25#include <dt-bindings/pci/pci.h>
c05ed00a 26#include <linux/delay.h>
1e94b46f 27#include <linux/printk.h>
5e23b8b4 28#include "pci_internal.h"
ff3e077b
SG
29
30DECLARE_GLOBAL_DATA_PTR;
31
a6eb93b3 32int pci_get_bus(int busnum, struct udevice **busp)
983c6ba2
SG
33{
34 int ret;
35
36 ret = uclass_get_device_by_seq(UCLASS_PCI, busnum, busp);
37
38 /* Since buses may not be numbered yet try a little harder with bus 0 */
39 if (ret == -ENODEV) {
3f603cbb 40 ret = uclass_first_device_err(UCLASS_PCI, busp);
983c6ba2
SG
41 if (ret)
42 return ret;
983c6ba2
SG
43 ret = uclass_get_device_by_seq(UCLASS_PCI, busnum, busp);
44 }
45
46 return ret;
47}
48
9f60fb0d
SG
49struct udevice *pci_get_controller(struct udevice *dev)
50{
51 while (device_is_on_pci_bus(dev))
52 dev = dev->parent;
53
54 return dev;
55}
56
194fca91 57pci_dev_t dm_pci_get_bdf(const struct udevice *dev)
4b515e4f 58{
8a8d24bd 59 struct pci_child_plat *pplat = dev_get_parent_plat(dev);
4b515e4f
SG
60 struct udevice *bus = dev->parent;
61
4886287e
SG
62 /*
63 * This error indicates that @dev is a device on an unprobed PCI bus.
64 * The bus likely has bus=seq == -1, so the PCI_ADD_BUS() macro below
65 * will produce a bad BDF>
66 *
67 * A common cause of this problem is that this function is called in the
d1998a9f 68 * of_to_plat() method of @dev. Accessing the PCI bus in that
4886287e
SG
69 * method is not allowed, since it has not yet been probed. To fix this,
70 * move that access to the probe() method of @dev instead.
71 */
72 if (!device_active(bus))
73 log_err("PCI: Device '%s' on unprobed bus '%s'\n", dev->name,
74 bus->name);
8b85dfc6 75 return PCI_ADD_BUS(dev_seq(bus), pplat->devfn);
4b515e4f
SG
76}
77
ff3e077b
SG
78/**
79 * pci_get_bus_max() - returns the bus number of the last active bus
80 *
185f812c 81 * Return: last bus number, or -1 if no active buses
ff3e077b
SG
82 */
83static int pci_get_bus_max(void)
84{
85 struct udevice *bus;
86 struct uclass *uc;
87 int ret = -1;
88
89 ret = uclass_get(UCLASS_PCI, &uc);
90 uclass_foreach_dev(bus, uc) {
8b85dfc6
SG
91 if (dev_seq(bus) > ret)
92 ret = dev_seq(bus);
ff3e077b
SG
93 }
94
95 debug("%s: ret=%d\n", __func__, ret);
96
97 return ret;
98}
99
100int pci_last_busno(void)
101{
069155cb 102 return pci_get_bus_max();
ff3e077b
SG
103}
104
105int pci_get_ff(enum pci_size_t size)
106{
107 switch (size) {
108 case PCI_SIZE_8:
109 return 0xff;
110 case PCI_SIZE_16:
111 return 0xffff;
112 default:
113 return 0xffffffff;
114 }
115}
116
02e4d38d
MV
117static void pci_dev_find_ofnode(struct udevice *bus, phys_addr_t bdf,
118 ofnode *rnode)
119{
120 struct fdt_pci_addr addr;
121 ofnode node;
122 int ret;
123
124 dev_for_each_subnode(node, bus) {
125 ret = ofnode_read_pci_addr(node, FDT_PCI_SPACE_CONFIG, "reg",
f69d3d6d 126 &addr, NULL);
02e4d38d
MV
127 if (ret)
128 continue;
129
130 if (PCI_MASK_BUS(addr.phys_hi) != PCI_MASK_BUS(bdf))
131 continue;
132
133 *rnode = node;
134 break;
135 }
136};
137
c4e72c4a 138int pci_bus_find_devfn(const struct udevice *bus, pci_dev_t find_devfn,
ff3e077b
SG
139 struct udevice **devp)
140{
141 struct udevice *dev;
142
143 for (device_find_first_child(bus, &dev);
144 dev;
145 device_find_next_child(&dev)) {
8a8d24bd 146 struct pci_child_plat *pplat;
ff3e077b 147
caa4daa2 148 pplat = dev_get_parent_plat(dev);
ff3e077b
SG
149 if (pplat && pplat->devfn == find_devfn) {
150 *devp = dev;
151 return 0;
152 }
153 }
154
155 return -ENODEV;
156}
157
f3f1faef 158int dm_pci_bus_find_bdf(pci_dev_t bdf, struct udevice **devp)
ff3e077b
SG
159{
160 struct udevice *bus;
161 int ret;
162
983c6ba2 163 ret = pci_get_bus(PCI_BUS(bdf), &bus);
ff3e077b
SG
164 if (ret)
165 return ret;
166 return pci_bus_find_devfn(bus, PCI_MASK_BUS(bdf), devp);
167}
168
169static int pci_device_matches_ids(struct udevice *dev,
e58f3a7d 170 const struct pci_device_id *ids)
ff3e077b 171{
8a8d24bd 172 struct pci_child_plat *pplat;
ff3e077b
SG
173 int i;
174
caa4daa2 175 pplat = dev_get_parent_plat(dev);
ff3e077b
SG
176 if (!pplat)
177 return -EINVAL;
178 for (i = 0; ids[i].vendor != 0; i++) {
179 if (pplat->vendor == ids[i].vendor &&
180 pplat->device == ids[i].device)
181 return i;
182 }
183
184 return -EINVAL;
185}
186
e58f3a7d 187int pci_bus_find_devices(struct udevice *bus, const struct pci_device_id *ids,
ff3e077b
SG
188 int *indexp, struct udevice **devp)
189{
190 struct udevice *dev;
191
192 /* Scan all devices on this bus */
193 for (device_find_first_child(bus, &dev);
194 dev;
195 device_find_next_child(&dev)) {
196 if (pci_device_matches_ids(dev, ids) >= 0) {
197 if ((*indexp)-- <= 0) {
198 *devp = dev;
199 return 0;
200 }
201 }
202 }
203
204 return -ENODEV;
205}
206
e58f3a7d 207int pci_find_device_id(const struct pci_device_id *ids, int index,
ff3e077b
SG
208 struct udevice **devp)
209{
210 struct udevice *bus;
211
212 /* Scan all known buses */
213 for (uclass_first_device(UCLASS_PCI, &bus);
214 bus;
215 uclass_next_device(&bus)) {
216 if (!pci_bus_find_devices(bus, ids, &index, devp))
217 return 0;
218 }
219 *devp = NULL;
220
221 return -ENODEV;
222}
223
5c0bf647
SG
224static int dm_pci_bus_find_device(struct udevice *bus, unsigned int vendor,
225 unsigned int device, int *indexp,
226 struct udevice **devp)
227{
8a8d24bd 228 struct pci_child_plat *pplat;
5c0bf647
SG
229 struct udevice *dev;
230
231 for (device_find_first_child(bus, &dev);
232 dev;
233 device_find_next_child(&dev)) {
caa4daa2 234 pplat = dev_get_parent_plat(dev);
5c0bf647
SG
235 if (pplat->vendor == vendor && pplat->device == device) {
236 if (!(*indexp)--) {
237 *devp = dev;
238 return 0;
239 }
240 }
241 }
242
243 return -ENODEV;
244}
245
246int dm_pci_find_device(unsigned int vendor, unsigned int device, int index,
247 struct udevice **devp)
248{
249 struct udevice *bus;
250
251 /* Scan all known buses */
252 for (uclass_first_device(UCLASS_PCI, &bus);
253 bus;
254 uclass_next_device(&bus)) {
255 if (!dm_pci_bus_find_device(bus, vendor, device, &index, devp))
256 return device_probe(*devp);
257 }
258 *devp = NULL;
259
260 return -ENODEV;
261}
262
a0eb8356
SG
263int dm_pci_find_class(uint find_class, int index, struct udevice **devp)
264{
265 struct udevice *dev;
266
267 /* Scan all known buses */
268 for (pci_find_first_device(&dev);
269 dev;
270 pci_find_next_device(&dev)) {
8a8d24bd 271 struct pci_child_plat *pplat = dev_get_parent_plat(dev);
a0eb8356
SG
272
273 if (pplat->class == find_class && !index--) {
274 *devp = dev;
275 return device_probe(*devp);
276 }
277 }
278 *devp = NULL;
279
280 return -ENODEV;
281}
282
ff3e077b
SG
283int pci_bus_write_config(struct udevice *bus, pci_dev_t bdf, int offset,
284 unsigned long value, enum pci_size_t size)
285{
286 struct dm_pci_ops *ops;
287
288 ops = pci_get_ops(bus);
289 if (!ops->write_config)
290 return -ENOSYS;
d9f554b6
T
291 if (offset < 0 || offset >= 4096)
292 return -EINVAL;
ff3e077b
SG
293 return ops->write_config(bus, bdf, offset, value, size);
294}
295
319dba1f
SG
296int pci_bus_clrset_config32(struct udevice *bus, pci_dev_t bdf, int offset,
297 u32 clr, u32 set)
298{
299 ulong val;
300 int ret;
301
302 ret = pci_bus_read_config(bus, bdf, offset, &val, PCI_SIZE_32);
303 if (ret)
304 return ret;
305 val &= ~clr;
306 val |= set;
307
308 return pci_bus_write_config(bus, bdf, offset, val, PCI_SIZE_32);
309}
310
f98aa78e
VO
311static int pci_write_config(pci_dev_t bdf, int offset, unsigned long value,
312 enum pci_size_t size)
ff3e077b
SG
313{
314 struct udevice *bus;
315 int ret;
316
983c6ba2 317 ret = pci_get_bus(PCI_BUS(bdf), &bus);
ff3e077b
SG
318 if (ret)
319 return ret;
320
4d8615cb 321 return pci_bus_write_config(bus, bdf, offset, value, size);
ff3e077b
SG
322}
323
66afb4ed
SG
324int dm_pci_write_config(struct udevice *dev, int offset, unsigned long value,
325 enum pci_size_t size)
326{
327 struct udevice *bus;
328
1e0f2263 329 for (bus = dev; device_is_on_pci_bus(bus);)
66afb4ed 330 bus = bus->parent;
21ccce1b
SG
331 return pci_bus_write_config(bus, dm_pci_get_bdf(dev), offset, value,
332 size);
66afb4ed
SG
333}
334
ff3e077b
SG
335int pci_write_config32(pci_dev_t bdf, int offset, u32 value)
336{
337 return pci_write_config(bdf, offset, value, PCI_SIZE_32);
338}
339
340int pci_write_config16(pci_dev_t bdf, int offset, u16 value)
341{
342 return pci_write_config(bdf, offset, value, PCI_SIZE_16);
343}
344
345int pci_write_config8(pci_dev_t bdf, int offset, u8 value)
346{
347 return pci_write_config(bdf, offset, value, PCI_SIZE_8);
348}
349
66afb4ed
SG
350int dm_pci_write_config8(struct udevice *dev, int offset, u8 value)
351{
352 return dm_pci_write_config(dev, offset, value, PCI_SIZE_8);
353}
354
355int dm_pci_write_config16(struct udevice *dev, int offset, u16 value)
356{
357 return dm_pci_write_config(dev, offset, value, PCI_SIZE_16);
358}
359
360int dm_pci_write_config32(struct udevice *dev, int offset, u32 value)
361{
362 return dm_pci_write_config(dev, offset, value, PCI_SIZE_32);
363}
364
194fca91 365int pci_bus_read_config(const struct udevice *bus, pci_dev_t bdf, int offset,
ff3e077b
SG
366 unsigned long *valuep, enum pci_size_t size)
367{
368 struct dm_pci_ops *ops;
369
370 ops = pci_get_ops(bus);
d9f554b6
T
371 if (!ops->read_config) {
372 *valuep = pci_conv_32_to_size(~0, offset, size);
ff3e077b 373 return -ENOSYS;
d9f554b6
T
374 }
375 if (offset < 0 || offset >= 4096) {
376 *valuep = pci_conv_32_to_size(0, offset, size);
377 return -EINVAL;
378 }
ff3e077b
SG
379 return ops->read_config(bus, bdf, offset, valuep, size);
380}
381
1512ac17
VO
382static int pci_read_config(pci_dev_t bdf, int offset, unsigned long *valuep,
383 enum pci_size_t size)
ff3e077b
SG
384{
385 struct udevice *bus;
386 int ret;
387
983c6ba2 388 ret = pci_get_bus(PCI_BUS(bdf), &bus);
ff3e077b
SG
389 if (ret)
390 return ret;
391
4d8615cb 392 return pci_bus_read_config(bus, bdf, offset, valuep, size);
ff3e077b
SG
393}
394
194fca91
SG
395int dm_pci_read_config(const struct udevice *dev, int offset,
396 unsigned long *valuep, enum pci_size_t size)
66afb4ed 397{
194fca91 398 const struct udevice *bus;
66afb4ed 399
1e0f2263 400 for (bus = dev; device_is_on_pci_bus(bus);)
66afb4ed 401 bus = bus->parent;
21ccce1b 402 return pci_bus_read_config(bus, dm_pci_get_bdf(dev), offset, valuep,
66afb4ed
SG
403 size);
404}
405
ff3e077b
SG
406int pci_read_config32(pci_dev_t bdf, int offset, u32 *valuep)
407{
408 unsigned long value;
409 int ret;
410
411 ret = pci_read_config(bdf, offset, &value, PCI_SIZE_32);
412 if (ret)
413 return ret;
414 *valuep = value;
415
416 return 0;
417}
418
419int pci_read_config16(pci_dev_t bdf, int offset, u16 *valuep)
420{
421 unsigned long value;
422 int ret;
423
424 ret = pci_read_config(bdf, offset, &value, PCI_SIZE_16);
425 if (ret)
426 return ret;
427 *valuep = value;
428
429 return 0;
430}
431
432int pci_read_config8(pci_dev_t bdf, int offset, u8 *valuep)
433{
434 unsigned long value;
435 int ret;
436
437 ret = pci_read_config(bdf, offset, &value, PCI_SIZE_8);
438 if (ret)
439 return ret;
440 *valuep = value;
441
442 return 0;
443}
444
194fca91 445int dm_pci_read_config8(const struct udevice *dev, int offset, u8 *valuep)
66afb4ed
SG
446{
447 unsigned long value;
448 int ret;
449
450 ret = dm_pci_read_config(dev, offset, &value, PCI_SIZE_8);
451 if (ret)
452 return ret;
453 *valuep = value;
454
455 return 0;
456}
457
194fca91 458int dm_pci_read_config16(const struct udevice *dev, int offset, u16 *valuep)
66afb4ed
SG
459{
460 unsigned long value;
461 int ret;
462
463 ret = dm_pci_read_config(dev, offset, &value, PCI_SIZE_16);
464 if (ret)
465 return ret;
466 *valuep = value;
467
468 return 0;
469}
470
194fca91 471int dm_pci_read_config32(const struct udevice *dev, int offset, u32 *valuep)
66afb4ed
SG
472{
473 unsigned long value;
474 int ret;
475
476 ret = dm_pci_read_config(dev, offset, &value, PCI_SIZE_32);
477 if (ret)
478 return ret;
479 *valuep = value;
480
481 return 0;
482}
483
319dba1f
SG
484int dm_pci_clrset_config8(struct udevice *dev, int offset, u32 clr, u32 set)
485{
486 u8 val;
487 int ret;
488
489 ret = dm_pci_read_config8(dev, offset, &val);
490 if (ret)
491 return ret;
492 val &= ~clr;
493 val |= set;
494
495 return dm_pci_write_config8(dev, offset, val);
496}
497
498int dm_pci_clrset_config16(struct udevice *dev, int offset, u32 clr, u32 set)
499{
500 u16 val;
501 int ret;
502
503 ret = dm_pci_read_config16(dev, offset, &val);
504 if (ret)
505 return ret;
506 val &= ~clr;
507 val |= set;
508
509 return dm_pci_write_config16(dev, offset, val);
510}
511
512int dm_pci_clrset_config32(struct udevice *dev, int offset, u32 clr, u32 set)
513{
514 u32 val;
515 int ret;
516
517 ret = dm_pci_read_config32(dev, offset, &val);
518 if (ret)
519 return ret;
520 val &= ~clr;
521 val |= set;
522
523 return dm_pci_write_config32(dev, offset, val);
524}
525
bbbcb526
BM
526static void set_vga_bridge_bits(struct udevice *dev)
527{
528 struct udevice *parent = dev->parent;
529 u16 bc;
530
8b85dfc6 531 while (dev_seq(parent) != 0) {
bbbcb526
BM
532 dm_pci_read_config16(parent, PCI_BRIDGE_CONTROL, &bc);
533 bc |= PCI_BRIDGE_CTL_VGA;
534 dm_pci_write_config16(parent, PCI_BRIDGE_CONTROL, bc);
535 parent = parent->parent;
536 }
537}
538
ff3e077b
SG
539int pci_auto_config_devices(struct udevice *bus)
540{
0fd3d911 541 struct pci_controller *hose = dev_get_uclass_priv(bus);
8a8d24bd 542 struct pci_child_plat *pplat;
ff3e077b
SG
543 unsigned int sub_bus;
544 struct udevice *dev;
ff3e077b 545
8b85dfc6 546 sub_bus = dev_seq(bus);
ff3e077b
SG
547 debug("%s: start\n", __func__);
548 pciauto_config_init(hose);
8ee830d8
MV
549 for (device_find_first_child(bus, &dev);
550 dev;
551 device_find_next_child(&dev)) {
ff3e077b 552 unsigned int max_bus;
4d21455e 553 int ret;
ff3e077b 554
ff3e077b 555 debug("%s: device %s\n", __func__, dev->name);
7d14ee44 556 if (dev_has_ofnode(dev) &&
f0c36928 557 dev_read_bool(dev, "pci,no-autoconfig"))
d8c7fb50 558 continue;
5e23b8b4 559 ret = dm_pciauto_config_device(dev);
4d21455e 560 if (ret < 0)
42f3663a 561 return log_msg_ret("auto", ret);
4d21455e 562 max_bus = ret;
ff3e077b 563 sub_bus = max(sub_bus, max_bus);
bbbcb526 564
2f7dddc2
MH
565 if (dev_get_parent(dev) == bus)
566 continue;
567
caa4daa2 568 pplat = dev_get_parent_plat(dev);
bbbcb526
BM
569 if (pplat->class == (PCI_CLASS_DISPLAY_VGA << 8))
570 set_vga_bridge_bits(dev);
ff3e077b 571 }
8c303bc6
T
572 if (hose->last_busno < sub_bus)
573 hose->last_busno = sub_bus;
ff3e077b
SG
574 debug("%s: done\n", __func__);
575
42f3663a 576 return log_msg_ret("sub", sub_bus);
ff3e077b
SG
577}
578
badb9922 579int pci_generic_mmap_write_config(
c4e72c4a
SG
580 const struct udevice *bus,
581 int (*addr_f)(const struct udevice *bus, pci_dev_t bdf, uint offset,
582 void **addrp),
badb9922
TT
583 pci_dev_t bdf,
584 uint offset,
585 ulong value,
586 enum pci_size_t size)
587{
588 void *address;
589
590 if (addr_f(bus, bdf, offset, &address) < 0)
591 return 0;
592
593 switch (size) {
594 case PCI_SIZE_8:
595 writeb(value, address);
596 return 0;
597 case PCI_SIZE_16:
598 writew(value, address);
599 return 0;
600 case PCI_SIZE_32:
601 writel(value, address);
602 return 0;
603 default:
604 return -EINVAL;
605 }
606}
607
608int pci_generic_mmap_read_config(
c4e72c4a
SG
609 const struct udevice *bus,
610 int (*addr_f)(const struct udevice *bus, pci_dev_t bdf, uint offset,
611 void **addrp),
badb9922
TT
612 pci_dev_t bdf,
613 uint offset,
614 ulong *valuep,
615 enum pci_size_t size)
616{
617 void *address;
618
619 if (addr_f(bus, bdf, offset, &address) < 0) {
620 *valuep = pci_get_ff(size);
621 return 0;
622 }
623
624 switch (size) {
625 case PCI_SIZE_8:
626 *valuep = readb(address);
627 return 0;
628 case PCI_SIZE_16:
629 *valuep = readw(address);
630 return 0;
631 case PCI_SIZE_32:
632 *valuep = readl(address);
633 return 0;
634 default:
635 return -EINVAL;
636 }
637}
638
5e23b8b4 639int dm_pci_hose_probe_bus(struct udevice *bus)
ff3e077b 640{
63ae80dd 641 u8 header_type;
ff3e077b
SG
642 int sub_bus;
643 int ret;
636cc177
SG
644 int ea_pos;
645 u8 reg;
ff3e077b
SG
646
647 debug("%s\n", __func__);
ff3e077b 648
63ae80dd
T
649 dm_pci_read_config8(bus, PCI_HEADER_TYPE, &header_type);
650 header_type &= 0x7f;
651 if (header_type != PCI_HEADER_TYPE_BRIDGE) {
652 debug("%s: Skipping PCI device %d with Non-Bridge Header Type 0x%x\n",
653 __func__, PCI_DEV(dm_pci_get_bdf(bus)), header_type);
654 return log_msg_ret("probe", -EINVAL);
655 }
656
3b920186
AS
657 if (IS_ENABLED(CONFIG_PCI_ENHANCED_ALLOCATION))
658 ea_pos = dm_pci_find_capability(bus, PCI_CAP_ID_EA);
659 else
660 ea_pos = 0;
661
636cc177
SG
662 if (ea_pos) {
663 dm_pci_read_config8(bus, ea_pos + sizeof(u32) + sizeof(u8),
664 &reg);
665 sub_bus = reg;
666 } else {
667 sub_bus = pci_get_bus_max() + 1;
668 }
ff3e077b 669 debug("%s: bus = %d/%s\n", __func__, sub_bus, bus->name);
5e23b8b4 670 dm_pciauto_prescan_setup_bridge(bus, sub_bus);
ff3e077b
SG
671
672 ret = device_probe(bus);
673 if (ret) {
3129ace4 674 debug("%s: Cannot probe bus %s: %d\n", __func__, bus->name,
ff3e077b 675 ret);
42f3663a 676 return log_msg_ret("probe", ret);
ff3e077b 677 }
636cc177 678
19e1b8d9
MH
679 if (!ea_pos)
680 sub_bus = pci_get_bus_max();
681
5e23b8b4 682 dm_pciauto_postscan_setup_bridge(bus, sub_bus);
ff3e077b
SG
683
684 return sub_bus;
685}
686
aba92962
SG
687/**
688 * pci_match_one_device - Tell if a PCI device structure has a matching
689 * PCI device id structure
690 * @id: single PCI device id structure to match
0367bd4d 691 * @find: the PCI device id structure to match against
aba92962 692 *
0367bd4d
HZ
693 * Returns true if the finding pci_device_id structure matched or false if
694 * there is no match.
aba92962
SG
695 */
696static bool pci_match_one_id(const struct pci_device_id *id,
697 const struct pci_device_id *find)
698{
699 if ((id->vendor == PCI_ANY_ID || id->vendor == find->vendor) &&
700 (id->device == PCI_ANY_ID || id->device == find->device) &&
701 (id->subvendor == PCI_ANY_ID || id->subvendor == find->subvendor) &&
702 (id->subdevice == PCI_ANY_ID || id->subdevice == find->subdevice) &&
703 !((id->class ^ find->class) & id->class_mask))
704 return true;
705
706 return false;
707}
708
f5cbb5c7
SG
709/**
710 * pci_need_device_pre_reloc() - Check if a device should be bound
711 *
712 * This checks a list of vendor/device-ID values indicating devices that should
713 * be bound before relocation.
714 *
715 * @bus: Bus to check
716 * @vendor: Vendor ID to check
717 * @device: Device ID to check
185f812c 718 * Return: true if the vendor/device is in the list, false if not
f5cbb5c7
SG
719 */
720static bool pci_need_device_pre_reloc(struct udevice *bus, uint vendor,
721 uint device)
722{
723 u32 vendev;
724 int index;
725
db3820a2
SG
726 if (spl_phase() == PHASE_SPL && CONFIG_IS_ENABLED(PCI_PNP))
727 return true;
728
f5cbb5c7
SG
729 for (index = 0;
730 !dev_read_u32_index(bus, "u-boot,pci-pre-reloc", index,
731 &vendev);
732 index++) {
733 if (vendev == PCI_VENDEV(vendor, device))
734 return true;
735 }
736
737 return false;
738}
739
aba92962
SG
740/**
741 * pci_find_and_bind_driver() - Find and bind the right PCI driver
742 *
743 * This only looks at certain fields in the descriptor.
5dbcf3a0
SG
744 *
745 * @parent: Parent bus
746 * @find_id: Specification of the driver to find
747 * @bdf: Bus/device/function addreess - see PCI_BDF()
748 * @devp: Returns a pointer to the device created
185f812c 749 * Return: 0 if OK, -EPERM if the device is not needed before relocation and
5dbcf3a0 750 * therefore was not created, other -ve value on error
aba92962
SG
751 */
752static int pci_find_and_bind_driver(struct udevice *parent,
5dbcf3a0
SG
753 struct pci_device_id *find_id,
754 pci_dev_t bdf, struct udevice **devp)
aba92962
SG
755{
756 struct pci_driver_entry *start, *entry;
02e4d38d 757 ofnode node = ofnode_null();
aba92962
SG
758 const char *drv;
759 int n_ents;
760 int ret;
761 char name[30], *str;
08fc7b8f 762 bool bridge;
aba92962
SG
763
764 *devp = NULL;
765
766 debug("%s: Searching for driver: vendor=%x, device=%x\n", __func__,
767 find_id->vendor, find_id->device);
02e4d38d
MV
768
769 /* Determine optional OF node */
bc30140d
SG
770 if (ofnode_valid(dev_ofnode(parent)))
771 pci_dev_find_ofnode(parent, bdf, &node);
02e4d38d 772
89090661 773 if (ofnode_valid(node) && !ofnode_is_enabled(node)) {
a6cd597a 774 debug("%s: Ignoring disabled device\n", __func__);
42f3663a 775 return log_msg_ret("dis", -EPERM);
a6cd597a
MW
776 }
777
aba92962
SG
778 start = ll_entry_start(struct pci_driver_entry, pci_driver_entry);
779 n_ents = ll_entry_count(struct pci_driver_entry, pci_driver_entry);
780 for (entry = start; entry != start + n_ents; entry++) {
781 const struct pci_device_id *id;
782 struct udevice *dev;
783 const struct driver *drv;
784
785 for (id = entry->match;
786 id->vendor || id->subvendor || id->class_mask;
787 id++) {
788 if (!pci_match_one_id(id, find_id))
789 continue;
790
791 drv = entry->driver;
08fc7b8f
BM
792
793 /*
794 * In the pre-relocation phase, we only bind devices
795 * whose driver has the DM_FLAG_PRE_RELOC set, to save
796 * precious memory space as on some platforms as that
797 * space is pretty limited (ie: using Cache As RAM).
798 */
799 if (!(gd->flags & GD_FLG_RELOC) &&
db3820a2
SG
800 !(drv->flags & DM_FLAG_PRE_RELOC) &&
801 (!CONFIG_IS_ENABLED(PCI_PNP) ||
802 spl_phase() != PHASE_SPL))
42f3663a 803 return log_msg_ret("pre", -EPERM);
08fc7b8f 804
aba92962
SG
805 /*
806 * We could pass the descriptor to the driver as
caa4daa2 807 * plat (instead of NULL) and allow its bind()
aba92962
SG
808 * method to return -ENOENT if it doesn't support this
809 * device. That way we could continue the search to
810 * find another driver. For now this doesn't seem
811 * necesssary, so just bind the first match.
812 */
734206dd
SG
813 ret = device_bind(parent, drv, drv->name, NULL, node,
814 &dev);
aba92962
SG
815 if (ret)
816 goto error;
817 debug("%s: Match found: %s\n", __func__, drv->name);
ed698aa7 818 dev->driver_data = id->driver_data;
aba92962
SG
819 *devp = dev;
820 return 0;
821 }
822 }
823
08fc7b8f
BM
824 bridge = (find_id->class >> 8) == PCI_CLASS_BRIDGE_PCI;
825 /*
826 * In the pre-relocation phase, we only bind bridge devices to save
827 * precious memory space as on some platforms as that space is pretty
828 * limited (ie: using Cache As RAM).
829 */
f5cbb5c7
SG
830 if (!(gd->flags & GD_FLG_RELOC) && !bridge &&
831 !pci_need_device_pre_reloc(parent, find_id->vendor,
832 find_id->device))
42f3663a 833 return log_msg_ret("notbr", -EPERM);
08fc7b8f 834
aba92962 835 /* Bind a generic driver so that the device can be used */
8b85dfc6 836 sprintf(name, "pci_%x:%x.%x", dev_seq(parent), PCI_DEV(bdf),
4d8615cb 837 PCI_FUNC(bdf));
aba92962
SG
838 str = strdup(name);
839 if (!str)
840 return -ENOMEM;
08fc7b8f
BM
841 drv = bridge ? "pci_bridge_drv" : "pci_generic_drv";
842
02e4d38d 843 ret = device_bind_driver_to_node(parent, drv, str, node, devp);
aba92962 844 if (ret) {
3129ace4 845 debug("%s: Failed to bind generic driver: %d\n", __func__, ret);
c42640c7 846 free(str);
aba92962
SG
847 return ret;
848 }
849 debug("%s: No match found: bound generic driver instead\n", __func__);
850
851 return 0;
852
853error:
854 debug("%s: No match found: error %d\n", __func__, ret);
855 return ret;
856}
857
cecd013f
TH
858__weak extern void board_pci_fixup_dev(struct udevice *bus, struct udevice *dev)
859{
860}
861
ff3e077b
SG
862int pci_bind_bus_devices(struct udevice *bus)
863{
864 ulong vendor, device;
865 ulong header_type;
4d8615cb 866 pci_dev_t bdf, end;
ff3e077b 867 bool found_multi;
a3fac3f3 868 int ari_off;
ff3e077b
SG
869 int ret;
870
871 found_multi = false;
8b85dfc6 872 end = PCI_BDF(dev_seq(bus), PCI_MAX_PCI_DEVICES - 1,
4d8615cb 873 PCI_MAX_PCI_FUNCTIONS - 1);
8b85dfc6 874 for (bdf = PCI_BDF(dev_seq(bus), 0, 0); bdf <= end;
4d8615cb 875 bdf += PCI_BDF(0, 0, 1)) {
8a8d24bd 876 struct pci_child_plat *pplat;
ff3e077b
SG
877 struct udevice *dev;
878 ulong class;
879
64e45f73
BM
880 if (!PCI_FUNC(bdf))
881 found_multi = false;
4d8615cb 882 if (PCI_FUNC(bdf) && !found_multi)
ff3e077b 883 continue;
2a87f7fd 884
ff3e077b 885 /* Check only the first access, we don't expect problems */
2a87f7fd
HZ
886 ret = pci_bus_read_config(bus, bdf, PCI_VENDOR_ID, &vendor,
887 PCI_SIZE_16);
2348e72d 888 if (ret || vendor == 0xffff || vendor == 0x0000)
ff3e077b
SG
889 continue;
890
2a87f7fd
HZ
891 pci_bus_read_config(bus, bdf, PCI_HEADER_TYPE,
892 &header_type, PCI_SIZE_8);
893
4d8615cb 894 if (!PCI_FUNC(bdf))
ff3e077b
SG
895 found_multi = header_type & 0x80;
896
0911569b 897 debug("%s: bus %d/%s: found device %x, function %d", __func__,
8b85dfc6 898 dev_seq(bus), bus->name, PCI_DEV(bdf), PCI_FUNC(bdf));
4d8615cb 899 pci_bus_read_config(bus, bdf, PCI_DEVICE_ID, &device,
ff3e077b 900 PCI_SIZE_16);
4d8615cb 901 pci_bus_read_config(bus, bdf, PCI_CLASS_REVISION, &class,
aba92962
SG
902 PCI_SIZE_32);
903 class >>= 8;
ff3e077b
SG
904
905 /* Find this device in the device tree */
4d8615cb 906 ret = pci_bus_find_devfn(bus, PCI_MASK_BUS(bdf), &dev);
0911569b 907 debug(": find ret=%d\n", ret);
ff3e077b 908
8bd42525 909 /* If nothing in the device tree, bind a device */
ff3e077b 910 if (ret == -ENODEV) {
aba92962
SG
911 struct pci_device_id find_id;
912 ulong val;
913
914 memset(&find_id, '\0', sizeof(find_id));
915 find_id.vendor = vendor;
916 find_id.device = device;
917 find_id.class = class;
918 if ((header_type & 0x7f) == PCI_HEADER_TYPE_NORMAL) {
4d8615cb 919 pci_bus_read_config(bus, bdf,
aba92962
SG
920 PCI_SUBSYSTEM_VENDOR_ID,
921 &val, PCI_SIZE_32);
922 find_id.subvendor = val & 0xffff;
923 find_id.subdevice = val >> 16;
924 }
4d8615cb 925 ret = pci_find_and_bind_driver(bus, &find_id, bdf,
aba92962 926 &dev);
db3820a2
SG
927 } else {
928 debug("device: %s\n", dev->name);
ff3e077b 929 }
5dbcf3a0
SG
930 if (ret == -EPERM)
931 continue;
932 else if (ret)
ff3e077b
SG
933 return ret;
934
935 /* Update the platform data */
caa4daa2 936 pplat = dev_get_parent_plat(dev);
5dbcf3a0
SG
937 pplat->devfn = PCI_MASK_BUS(bdf);
938 pplat->vendor = vendor;
939 pplat->device = device;
940 pplat->class = class;
a3fac3f3
SG
941
942 if (IS_ENABLED(CONFIG_PCI_ARID)) {
943 ari_off = dm_pci_find_ext_capability(dev,
944 PCI_EXT_CAP_ID_ARI);
945 if (ari_off) {
946 u16 ari_cap;
947
948 /*
949 * Read Next Function number in ARI Cap
950 * Register
951 */
952 dm_pci_read_config16(dev, ari_off + 4,
953 &ari_cap);
954 /*
955 * Update next scan on this function number,
956 * subtract 1 in BDF to satisfy loop increment.
957 */
958 if (ari_cap & 0xff00) {
959 bdf = PCI_BDF(PCI_BUS(bdf),
960 PCI_DEV(ari_cap),
961 PCI_FUNC(ari_cap));
962 bdf = bdf - 0x100;
963 }
964 }
965 }
cecd013f
TH
966
967 board_pci_fixup_dev(bus, dev);
ff3e077b
SG
968 }
969
970 return 0;
ff3e077b
SG
971}
972
f2ebaaa9 973static int decode_regions(struct pci_controller *hose, ofnode parent_node,
f2825f6e 974 ofnode node)
ff3e077b
SG
975{
976 int pci_addr_cells, addr_cells, size_cells;
977 int cells_per_record;
dfaf6a57 978 struct bd_info *bd;
ff3e077b 979 const u32 *prop;
e0024741 980 int max_regions;
ff3e077b
SG
981 int len;
982 int i;
983
dd0f7bcf
SG
984 /* handle booting from coreboot, etc. */
985 if (!ll_boot_init())
986 return 0;
987
61e51bab 988 prop = ofnode_get_property(node, "ranges", &len);
f2825f6e
CG
989 if (!prop) {
990 debug("%s: Cannot decode regions\n", __func__);
f2ebaaa9 991 return -EINVAL;
f2825f6e
CG
992 }
993
878d68c0
SG
994 pci_addr_cells = ofnode_read_simple_addr_cells(node);
995 addr_cells = ofnode_read_simple_addr_cells(parent_node);
996 size_cells = ofnode_read_simple_size_cells(node);
ff3e077b
SG
997
998 /* PCI addresses are always 3-cells */
999 len /= sizeof(u32);
1000 cells_per_record = pci_addr_cells + addr_cells + size_cells;
1001 hose->region_count = 0;
1002 debug("%s: len=%d, cells_per_record=%d\n", __func__, len,
1003 cells_per_record);
e0024741
SR
1004
1005 /* Dynamically allocate the regions array */
1006 max_regions = len / cells_per_record + CONFIG_NR_DRAM_BANKS;
1007 hose->regions = (struct pci_region *)
1008 calloc(1, max_regions * sizeof(struct pci_region));
f2ebaaa9
PCT
1009 if (!hose->regions)
1010 return -ENOMEM;
e0024741
SR
1011
1012 for (i = 0; i < max_regions; i++, len -= cells_per_record) {
ff3e077b
SG
1013 u64 pci_addr, addr, size;
1014 int space_code;
1015 u32 flags;
1016 int type;
9526d83a 1017 int pos;
ff3e077b
SG
1018
1019 if (len < cells_per_record)
1020 break;
1021 flags = fdt32_to_cpu(prop[0]);
1022 space_code = (flags >> 24) & 3;
1023 pci_addr = fdtdec_get_number(prop + 1, 2);
1024 prop += pci_addr_cells;
1025 addr = fdtdec_get_number(prop, addr_cells);
1026 prop += addr_cells;
1027 size = fdtdec_get_number(prop, size_cells);
1028 prop += size_cells;
dee37fc9
MY
1029 debug("%s: region %d, pci_addr=%llx, addr=%llx, size=%llx, space_code=%d\n",
1030 __func__, hose->region_count, pci_addr, addr, size, space_code);
ff3e077b
SG
1031 if (space_code & 2) {
1032 type = flags & (1U << 30) ? PCI_REGION_PREFETCH :
1033 PCI_REGION_MEM;
1034 } else if (space_code & 1) {
1035 type = PCI_REGION_IO;
1036 } else {
1037 continue;
1038 }
52ba9073
TT
1039
1040 if (!IS_ENABLED(CONFIG_SYS_PCI_64BIT) &&
1041 type == PCI_REGION_MEM && upper_32_bits(pci_addr)) {
ec8eba8c
AS
1042 debug(" - pci_addr beyond the 32-bit boundary, ignoring\n");
1043 continue;
1044 }
1045
1046 if (!IS_ENABLED(CONFIG_PHYS_64BIT) && upper_32_bits(addr)) {
1047 debug(" - addr beyond the 32-bit boundary, ignoring\n");
1048 continue;
1049 }
1050
1051 if (~((pci_addr_t)0) - pci_addr < size) {
1052 debug(" - PCI range exceeds max address, ignoring\n");
1053 continue;
1054 }
1055
1056 if (~((phys_addr_t)0) - addr < size) {
1057 debug(" - phys range exceeds max address, ignoring\n");
52ba9073
TT
1058 continue;
1059 }
1060
9526d83a 1061 pos = -1;
4cf56ec0
SG
1062 if (!IS_ENABLED(CONFIG_PCI_REGION_MULTI_ENTRY)) {
1063 for (i = 0; i < hose->region_count; i++) {
1064 if (hose->regions[i].flags == type)
1065 pos = i;
1066 }
9526d83a 1067 }
4cf56ec0 1068
9526d83a
SG
1069 if (pos == -1)
1070 pos = hose->region_count++;
1071 debug(" - type=%d, pos=%d\n", type, pos);
1072 pci_set_region(hose->regions + pos, pci_addr, addr, size, type);
ff3e077b
SG
1073 }
1074
1075 /* Add a region for our local memory */
dfaf6a57 1076 bd = gd->bd;
1eaf7800 1077 if (!bd)
f2ebaaa9 1078 return 0;
1eaf7800 1079
664758c3
BM
1080 for (i = 0; i < CONFIG_NR_DRAM_BANKS; ++i) {
1081 if (bd->bi_dram[i].size) {
a45343a0
DS
1082 phys_addr_t start = bd->bi_dram[i].start;
1083
1084 if (IS_ENABLED(CONFIG_PCI_MAP_SYSTEM_MEMORY))
1085 start = virt_to_phys((void *)(uintptr_t)bd->bi_dram[i].start);
1086
664758c3 1087 pci_set_region(hose->regions + hose->region_count++,
a45343a0 1088 start, start, bd->bi_dram[i].size,
664758c3
BM
1089 PCI_REGION_MEM | PCI_REGION_SYS_MEMORY);
1090 }
1091 }
ff3e077b 1092
f2ebaaa9 1093 return 0;
ff3e077b
SG
1094}
1095
1096static int pci_uclass_pre_probe(struct udevice *bus)
1097{
1098 struct pci_controller *hose;
42f3663a
SG
1099 struct uclass *uc;
1100 int ret;
ff3e077b 1101
8b85dfc6 1102 debug("%s, bus=%d/%s, parent=%s\n", __func__, dev_seq(bus), bus->name,
ff3e077b 1103 bus->parent->name);
0fd3d911 1104 hose = dev_get_uclass_priv(bus);
ff3e077b 1105
42f3663a
SG
1106 /*
1107 * Set the sequence number, if device_bind() doesn't. We want control
1108 * of this so that numbers are allocated as devices are probed. That
1109 * ensures that sub-bus numbered is correct (sub-buses must get numbers
1110 * higher than their parents)
1111 */
1112 if (dev_seq(bus) == -1) {
1113 ret = uclass_get(UCLASS_PCI, &uc);
1114 if (ret)
1115 return ret;
2462139f 1116 bus->seq_ = uclass_find_next_free_seq(uc);
42f3663a
SG
1117 }
1118
ff3e077b 1119 /* For bridges, use the top-level PCI controller */
65f62b1c 1120 if (!device_is_on_pci_bus(bus)) {
ff3e077b 1121 hose->ctlr = bus;
f2ebaaa9
PCT
1122 ret = decode_regions(hose, dev_ofnode(bus->parent),
1123 dev_ofnode(bus));
1124 if (ret)
1125 return ret;
ff3e077b
SG
1126 } else {
1127 struct pci_controller *parent_hose;
1128
1129 parent_hose = dev_get_uclass_priv(bus->parent);
1130 hose->ctlr = parent_hose->bus;
1131 }
42f3663a 1132
ff3e077b 1133 hose->bus = bus;
8b85dfc6
SG
1134 hose->first_busno = dev_seq(bus);
1135 hose->last_busno = dev_seq(bus);
7d14ee44 1136 if (dev_has_ofnode(bus)) {
f0c36928
SG
1137 hose->skip_auto_config_until_reloc =
1138 dev_read_bool(bus,
1139 "u-boot,skip-auto-config-until-reloc");
1140 }
ff3e077b
SG
1141
1142 return 0;
1143}
1144
1145static int pci_uclass_post_probe(struct udevice *bus)
1146{
2206ac24 1147 struct pci_controller *hose = dev_get_uclass_priv(bus);
ff3e077b
SG
1148 int ret;
1149
8b85dfc6 1150 debug("%s: probing bus %d\n", __func__, dev_seq(bus));
ff3e077b
SG
1151 ret = pci_bind_bus_devices(bus);
1152 if (ret)
42f3663a 1153 return log_msg_ret("bind", ret);
ff3e077b 1154
f1f44382 1155 if (CONFIG_IS_ENABLED(PCI_PNP) && ll_boot_init() &&
2206ac24
SG
1156 (!hose->skip_auto_config_until_reloc ||
1157 (gd->flags & GD_FLG_RELOC))) {
1158 ret = pci_auto_config_devices(bus);
1159 if (ret < 0)
42f3663a 1160 return log_msg_ret("cfg", ret);
2206ac24 1161 }
ff3e077b 1162
348b744b
BM
1163#if defined(CONFIG_X86) && defined(CONFIG_HAVE_FSP)
1164 /*
1165 * Per Intel FSP specification, we should call FSP notify API to
1166 * inform FSP that PCI enumeration has been done so that FSP will
1167 * do any necessary initialization as required by the chipset's
1168 * BIOS Writer's Guide (BWG).
1169 *
1170 * Unfortunately we have to put this call here as with driver model,
1171 * the enumeration is all done on a lazy basis as needed, so until
1172 * something is touched on PCI it won't happen.
1173 *
1174 * Note we only call this 1) after U-Boot is relocated, and 2)
1175 * root bus has finished probing.
1176 */
8b85dfc6 1177 if ((gd->flags & GD_FLG_RELOC) && dev_seq(bus) == 0 && ll_boot_init()) {
348b744b 1178 ret = fsp_init_phase_pci();
4d21455e 1179 if (ret)
42f3663a 1180 return log_msg_ret("fsp", ret);
4d21455e 1181 }
348b744b
BM
1182#endif
1183
4d21455e 1184 return 0;
ff3e077b
SG
1185}
1186
1187static int pci_uclass_child_post_bind(struct udevice *dev)
1188{
8a8d24bd 1189 struct pci_child_plat *pplat;
ff3e077b 1190
7d14ee44 1191 if (!dev_has_ofnode(dev))
ff3e077b
SG
1192 return 0;
1193
caa4daa2 1194 pplat = dev_get_parent_plat(dev);
1f6b08b9
BM
1195
1196 /* Extract vendor id and device id if available */
1197 ofnode_read_pci_vendev(dev_ofnode(dev), &pplat->vendor, &pplat->device);
1198
1199 /* Extract the devfn from fdt_pci_addr */
b5214200 1200 pplat->devfn = pci_get_devfn(dev);
ff3e077b
SG
1201
1202 return 0;
1203}
1204
c4e72c4a 1205static int pci_bridge_read_config(const struct udevice *bus, pci_dev_t bdf,
4d8615cb
BM
1206 uint offset, ulong *valuep,
1207 enum pci_size_t size)
ff3e077b 1208{
0fd3d911 1209 struct pci_controller *hose = dev_get_uclass_priv(bus);
ff3e077b
SG
1210
1211 return pci_bus_read_config(hose->ctlr, bdf, offset, valuep, size);
1212}
1213
4d8615cb
BM
1214static int pci_bridge_write_config(struct udevice *bus, pci_dev_t bdf,
1215 uint offset, ulong value,
1216 enum pci_size_t size)
ff3e077b 1217{
0fd3d911 1218 struct pci_controller *hose = dev_get_uclass_priv(bus);
ff3e077b
SG
1219
1220 return pci_bus_write_config(hose->ctlr, bdf, offset, value, size);
1221}
1222
76c3fbcd
SG
1223static int skip_to_next_device(struct udevice *bus, struct udevice **devp)
1224{
1225 struct udevice *dev;
76c3fbcd
SG
1226
1227 /*
1228 * Scan through all the PCI controllers. On x86 there will only be one
1229 * but that is not necessarily true on other hardware.
1230 */
5afe93a1 1231 while (bus) {
76c3fbcd
SG
1232 device_find_first_child(bus, &dev);
1233 if (dev) {
1234 *devp = dev;
1235 return 0;
1236 }
4954937d 1237 uclass_next_device(&bus);
5afe93a1 1238 }
76c3fbcd
SG
1239
1240 return 0;
1241}
1242
1243int pci_find_next_device(struct udevice **devp)
1244{
1245 struct udevice *child = *devp;
1246 struct udevice *bus = child->parent;
76c3fbcd
SG
1247
1248 /* First try all the siblings */
1249 *devp = NULL;
1250 while (child) {
1251 device_find_next_child(&child);
1252 if (child) {
1253 *devp = child;
1254 return 0;
1255 }
1256 }
1257
1258 /* We ran out of siblings. Try the next bus */
4954937d 1259 uclass_next_device(&bus);
76c3fbcd
SG
1260
1261 return bus ? skip_to_next_device(bus, devp) : 0;
1262}
1263
1264int pci_find_first_device(struct udevice **devp)
1265{
1266 struct udevice *bus;
76c3fbcd
SG
1267
1268 *devp = NULL;
4954937d 1269 uclass_first_device(UCLASS_PCI, &bus);
76c3fbcd
SG
1270
1271 return skip_to_next_device(bus, devp);
1272}
1273
9289db6c
SG
1274ulong pci_conv_32_to_size(ulong value, uint offset, enum pci_size_t size)
1275{
1276 switch (size) {
1277 case PCI_SIZE_8:
1278 return (value >> ((offset & 3) * 8)) & 0xff;
1279 case PCI_SIZE_16:
1280 return (value >> ((offset & 2) * 8)) & 0xffff;
1281 default:
1282 return value;
1283 }
1284}
1285
1286ulong pci_conv_size_to_32(ulong old, ulong value, uint offset,
1287 enum pci_size_t size)
1288{
1289 uint off_mask;
1290 uint val_mask, shift;
1291 ulong ldata, mask;
1292
1293 switch (size) {
1294 case PCI_SIZE_8:
1295 off_mask = 3;
1296 val_mask = 0xff;
1297 break;
1298 case PCI_SIZE_16:
1299 off_mask = 2;
1300 val_mask = 0xffff;
1301 break;
1302 default:
1303 return value;
1304 }
1305 shift = (offset & off_mask) * 8;
1306 ldata = (value & val_mask) << shift;
1307 mask = val_mask << shift;
1308 value = (old & ~mask) | ldata;
1309
1310 return value;
1311}
1312
143eb5b1
RK
1313int pci_get_dma_regions(struct udevice *dev, struct pci_region *memp, int index)
1314{
1315 int pci_addr_cells, addr_cells, size_cells;
1316 int cells_per_record;
1317 const u32 *prop;
1318 int len;
1319 int i = 0;
1320
1321 prop = ofnode_get_property(dev_ofnode(dev), "dma-ranges", &len);
1322 if (!prop) {
1323 log_err("PCI: Device '%s': Cannot decode dma-ranges\n",
1324 dev->name);
1325 return -EINVAL;
1326 }
1327
1328 pci_addr_cells = ofnode_read_simple_addr_cells(dev_ofnode(dev));
1329 addr_cells = ofnode_read_simple_addr_cells(dev_ofnode(dev->parent));
1330 size_cells = ofnode_read_simple_size_cells(dev_ofnode(dev));
1331
1332 /* PCI addresses are always 3-cells */
1333 len /= sizeof(u32);
1334 cells_per_record = pci_addr_cells + addr_cells + size_cells;
1335 debug("%s: len=%d, cells_per_record=%d\n", __func__, len,
1336 cells_per_record);
1337
1338 while (len) {
1339 memp->bus_start = fdtdec_get_number(prop + 1, 2);
1340 prop += pci_addr_cells;
1341 memp->phys_start = fdtdec_get_number(prop, addr_cells);
1342 prop += addr_cells;
1343 memp->size = fdtdec_get_number(prop, size_cells);
1344 prop += size_cells;
1345
1346 if (i == index)
1347 return 0;
1348 i++;
1349 len -= cells_per_record;
1350 }
1351
1352 return -EINVAL;
1353}
1354
f9260336
SG
1355int pci_get_regions(struct udevice *dev, struct pci_region **iop,
1356 struct pci_region **memp, struct pci_region **prefp)
1357{
1358 struct udevice *bus = pci_get_controller(dev);
1359 struct pci_controller *hose = dev_get_uclass_priv(bus);
1360 int i;
1361
1362 *iop = NULL;
1363 *memp = NULL;
1364 *prefp = NULL;
1365 for (i = 0; i < hose->region_count; i++) {
1366 switch (hose->regions[i].flags) {
1367 case PCI_REGION_IO:
1368 if (!*iop || (*iop)->size < hose->regions[i].size)
1369 *iop = hose->regions + i;
1370 break;
1371 case PCI_REGION_MEM:
1372 if (!*memp || (*memp)->size < hose->regions[i].size)
1373 *memp = hose->regions + i;
1374 break;
1375 case (PCI_REGION_MEM | PCI_REGION_PREFETCH):
1376 if (!*prefp || (*prefp)->size < hose->regions[i].size)
1377 *prefp = hose->regions + i;
1378 break;
1379 }
1380 }
1381
1382 return (*iop != NULL) + (*memp != NULL) + (*prefp != NULL);
1383}
1384
194fca91 1385u32 dm_pci_read_bar32(const struct udevice *dev, int barnum)
bab17cf1
SG
1386{
1387 u32 addr;
1388 int bar;
1389
1390 bar = PCI_BASE_ADDRESS_0 + barnum * 4;
1391 dm_pci_read_config32(dev, bar, &addr);
9ece4b09
SG
1392
1393 /*
1394 * If we get an invalid address, return this so that comparisons with
1395 * FDT_ADDR_T_NONE work correctly
1396 */
1397 if (addr == 0xffffffff)
1398 return addr;
1399 else if (addr & PCI_BASE_ADDRESS_SPACE_IO)
bab17cf1
SG
1400 return addr & PCI_BASE_ADDRESS_IO_MASK;
1401 else
1402 return addr & PCI_BASE_ADDRESS_MEM_MASK;
1403}
1404
9d731c82
SG
1405void dm_pci_write_bar32(struct udevice *dev, int barnum, u32 addr)
1406{
1407 int bar;
1408
1409 bar = PCI_BASE_ADDRESS_0 + barnum * 4;
1410 dm_pci_write_config32(dev, bar, addr);
1411}
1412
7739d93d
AS
1413phys_addr_t dm_pci_bus_to_phys(struct udevice *dev, pci_addr_t bus_addr,
1414 size_t len, unsigned long mask,
1415 unsigned long flags)
21d1fe7e 1416{
7739d93d
AS
1417 struct udevice *ctlr;
1418 struct pci_controller *hose;
21d1fe7e 1419 struct pci_region *res;
398dc367 1420 pci_addr_t offset;
21d1fe7e
SG
1421 int i;
1422
7739d93d
AS
1423 /* The root controller has the region information */
1424 ctlr = pci_get_controller(dev);
1425 hose = dev_get_uclass_priv(ctlr);
1426
1427 if (hose->region_count == 0)
1428 return bus_addr;
6f95d89c 1429
21d1fe7e
SG
1430 for (i = 0; i < hose->region_count; i++) {
1431 res = &hose->regions[i];
1432
7739d93d 1433 if ((res->flags & mask) != flags)
21d1fe7e
SG
1434 continue;
1435
398dc367
AS
1436 if (bus_addr < res->bus_start)
1437 continue;
1438
1439 offset = bus_addr - res->bus_start;
1440 if (offset >= res->size)
1441 continue;
1442
1443 if (len > res->size - offset)
1444 continue;
1445
7739d93d 1446 return res->phys_start + offset;
21d1fe7e
SG
1447 }
1448
70fdcc70 1449 puts("dm_pci_bus_to_phys: invalid physical address\n");
7739d93d 1450 return 0;
21d1fe7e
SG
1451}
1452
7739d93d
AS
1453pci_addr_t dm_pci_phys_to_bus(struct udevice *dev, phys_addr_t phys_addr,
1454 size_t len, unsigned long mask,
1455 unsigned long flags)
21d1fe7e 1456{
21d1fe7e 1457 struct udevice *ctlr;
7739d93d 1458 struct pci_controller *hose;
21d1fe7e 1459 struct pci_region *res;
398dc367 1460 phys_addr_t offset;
21d1fe7e 1461 int i;
21d1fe7e
SG
1462
1463 /* The root controller has the region information */
1464 ctlr = pci_get_controller(dev);
1465 hose = dev_get_uclass_priv(ctlr);
1466
7739d93d
AS
1467 if (hose->region_count == 0)
1468 return phys_addr;
6f95d89c 1469
21d1fe7e
SG
1470 for (i = 0; i < hose->region_count; i++) {
1471 res = &hose->regions[i];
1472
7739d93d 1473 if ((res->flags & mask) != flags)
21d1fe7e
SG
1474 continue;
1475
398dc367
AS
1476 if (phys_addr < res->phys_start)
1477 continue;
21d1fe7e 1478
398dc367
AS
1479 offset = phys_addr - res->phys_start;
1480 if (offset >= res->size)
1481 continue;
1482
1483 if (len > res->size - offset)
1484 continue;
1485
7739d93d 1486 return res->bus_start + offset;
21d1fe7e
SG
1487 }
1488
70fdcc70 1489 puts("dm_pci_phys_to_bus: invalid physical address\n");
7739d93d 1490 return 0;
21d1fe7e
SG
1491}
1492
51eeae91 1493static phys_addr_t dm_pci_map_ea_virt(struct udevice *dev, int ea_off,
8a8d24bd 1494 struct pci_child_plat *pdata)
51eeae91
SG
1495{
1496 phys_addr_t addr = 0;
1497
1498 /*
1499 * In the case of a Virtual Function device using BAR
1500 * base and size, add offset for VFn BAR(1, 2, 3...n)
1501 */
1502 if (pdata->is_virtfn) {
1503 size_t sz;
1504 u32 ea_entry;
1505
1506 /* MaxOffset, 1st DW */
1507 dm_pci_read_config32(dev, ea_off + 8, &ea_entry);
1508 sz = ea_entry & PCI_EA_FIELD_MASK;
1509 /* Fill up lower 2 bits */
1510 sz |= (~PCI_EA_FIELD_MASK);
1511
1512 if (ea_entry & PCI_EA_IS_64) {
1513 /* MaxOffset 2nd DW */
1514 dm_pci_read_config32(dev, ea_off + 16, &ea_entry);
1515 sz |= ((u64)ea_entry) << 32;
1516 }
1517
1518 addr = (pdata->virtid - 1) * (sz + 1);
1519 }
1520
1521 return addr;
1522}
1523
12507a2d
AS
1524static void *dm_pci_map_ea_bar(struct udevice *dev, int bar, size_t offset,
1525 size_t len, int ea_off,
60f4142a 1526 struct pci_child_plat *pdata)
0b143d8a
AM
1527{
1528 int ea_cnt, i, entry_size;
1529 int bar_id = (bar - PCI_BASE_ADDRESS_0) >> 2;
1530 u32 ea_entry;
1531 phys_addr_t addr;
1532
51eeae91
SG
1533 if (IS_ENABLED(CONFIG_PCI_SRIOV)) {
1534 /*
1535 * In the case of a Virtual Function device, device is
1536 * Physical function, so pdata will point to required VF
1537 * specific data.
1538 */
1539 if (pdata->is_virtfn)
1540 bar_id += PCI_EA_BEI_VF_BAR0;
1541 }
1542
0b143d8a
AM
1543 /* EA capability structure header */
1544 dm_pci_read_config32(dev, ea_off, &ea_entry);
1545 ea_cnt = (ea_entry >> 16) & PCI_EA_NUM_ENT_MASK;
1546 ea_off += PCI_EA_FIRST_ENT;
1547
1548 for (i = 0; i < ea_cnt; i++, ea_off += entry_size) {
1549 /* Entry header */
1550 dm_pci_read_config32(dev, ea_off, &ea_entry);
1551 entry_size = ((ea_entry & PCI_EA_ES) + 1) << 2;
1552
1553 if (((ea_entry & PCI_EA_BEI) >> 4) != bar_id)
1554 continue;
1555
1556 /* Base address, 1st DW */
1557 dm_pci_read_config32(dev, ea_off + 4, &ea_entry);
1558 addr = ea_entry & PCI_EA_FIELD_MASK;
1559 if (ea_entry & PCI_EA_IS_64) {
1560 /* Base address, 2nd DW, skip over 4B MaxOffset */
1561 dm_pci_read_config32(dev, ea_off + 12, &ea_entry);
1562 addr |= ((u64)ea_entry) << 32;
1563 }
1564
51eeae91
SG
1565 if (IS_ENABLED(CONFIG_PCI_SRIOV))
1566 addr += dm_pci_map_ea_virt(dev, ea_off, pdata);
1567
12507a2d
AS
1568 if (~((phys_addr_t)0) - addr < offset)
1569 return NULL;
1570
0b143d8a 1571 /* size ignored for now */
12507a2d 1572 return map_physmem(addr + offset, len, MAP_NOCACHE);
0b143d8a
AM
1573 }
1574
1575 return 0;
1576}
1577
12507a2d 1578void *dm_pci_map_bar(struct udevice *dev, int bar, size_t offset, size_t len,
2635e3b5 1579 unsigned long mask, unsigned long flags)
21d1fe7e 1580{
8a8d24bd 1581 struct pci_child_plat *pdata = dev_get_parent_plat(dev);
51eeae91 1582 struct udevice *udev = dev;
21d1fe7e
SG
1583 pci_addr_t pci_bus_addr;
1584 u32 bar_response;
0b143d8a
AM
1585 int ea_off;
1586
51eeae91
SG
1587 if (IS_ENABLED(CONFIG_PCI_SRIOV)) {
1588 /*
1589 * In case of Virtual Function devices, use PF udevice
1590 * as EA capability is defined in Physical Function
1591 */
1592 if (pdata->is_virtfn)
1593 udev = pdata->pfdev;
1594 }
1595
0b143d8a
AM
1596 /*
1597 * if the function supports Enhanced Allocation use that instead of
1598 * BARs
51eeae91
SG
1599 * Incase of virtual functions, pdata will help read VF BEI
1600 * and EA entry size.
0b143d8a 1601 */
3b920186
AS
1602 if (IS_ENABLED(CONFIG_PCI_ENHANCED_ALLOCATION))
1603 ea_off = dm_pci_find_capability(udev, PCI_CAP_ID_EA);
1604 else
1605 ea_off = 0;
1606
0b143d8a 1607 if (ea_off)
12507a2d 1608 return dm_pci_map_ea_bar(udev, bar, offset, len, ea_off, pdata);
21d1fe7e
SG
1609
1610 /* read BAR address */
51eeae91 1611 dm_pci_read_config32(udev, bar, &bar_response);
21d1fe7e
SG
1612 pci_bus_addr = (pci_addr_t)(bar_response & ~0xf);
1613
04083470
MF
1614 /* This has a lot of baked in assumptions, but essentially tries
1615 * to mirror the behavior of BAR assignment for 64 Bit enabled
1616 * hosts and 64 bit placeable BARs in the auto assign code.
1617 */
1618#if defined(CONFIG_SYS_PCI_64BIT)
1619 if (bar_response & PCI_BASE_ADDRESS_MEM_TYPE_64) {
1620 dm_pci_read_config32(udev, bar + 4, &bar_response);
1621 pci_bus_addr |= (pci_addr_t)bar_response << 32;
1622 }
1623#endif /* CONFIG_SYS_PCI_64BIT */
1624
12507a2d
AS
1625 if (~((pci_addr_t)0) - pci_bus_addr < offset)
1626 return NULL;
1627
21d1fe7e 1628 /*
12507a2d
AS
1629 * Forward the length argument to dm_pci_bus_to_virt. The length will
1630 * be used to check that the entire address range has been declared as
1631 * a PCI range, but a better check would be to probe for the size of
1632 * the bar and prevent overflow more locally.
21d1fe7e 1633 */
2635e3b5
AS
1634 return dm_pci_bus_to_virt(udev, pci_bus_addr + offset, len, mask, flags,
1635 MAP_NOCACHE);
21d1fe7e
SG
1636}
1637
a8c5f8d3 1638static int _dm_pci_find_next_capability(struct udevice *dev, u8 pos, int cap)
dac01fd8 1639{
dac01fd8
BM
1640 int ttl = PCI_FIND_CAP_TTL;
1641 u8 id;
1642 u16 ent;
dac01fd8
BM
1643
1644 dm_pci_read_config8(dev, pos, &pos);
a8c5f8d3 1645
dac01fd8
BM
1646 while (ttl--) {
1647 if (pos < PCI_STD_HEADER_SIZEOF)
1648 break;
1649 pos &= ~3;
1650 dm_pci_read_config16(dev, pos, &ent);
1651
1652 id = ent & 0xff;
1653 if (id == 0xff)
1654 break;
1655 if (id == cap)
1656 return pos;
1657 pos = (ent >> 8);
1658 }
1659
1660 return 0;
1661}
1662
a8c5f8d3
BM
1663int dm_pci_find_next_capability(struct udevice *dev, u8 start, int cap)
1664{
1665 return _dm_pci_find_next_capability(dev, start + PCI_CAP_LIST_NEXT,
1666 cap);
1667}
1668
1669int dm_pci_find_capability(struct udevice *dev, int cap)
1670{
1671 u16 status;
1672 u8 header_type;
1673 u8 pos;
1674
1675 dm_pci_read_config16(dev, PCI_STATUS, &status);
1676 if (!(status & PCI_STATUS_CAP_LIST))
1677 return 0;
1678
1679 dm_pci_read_config8(dev, PCI_HEADER_TYPE, &header_type);
1680 if ((header_type & 0x7f) == PCI_HEADER_TYPE_CARDBUS)
1681 pos = PCI_CB_CAPABILITY_LIST;
1682 else
1683 pos = PCI_CAPABILITY_LIST;
1684
1685 return _dm_pci_find_next_capability(dev, pos, cap);
1686}
1687
1688int dm_pci_find_next_ext_capability(struct udevice *dev, int start, int cap)
dac01fd8
BM
1689{
1690 u32 header;
1691 int ttl;
1692 int pos = PCI_CFG_SPACE_SIZE;
1693
1694 /* minimum 8 bytes per capability */
1695 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
1696
a8c5f8d3
BM
1697 if (start)
1698 pos = start;
1699
dac01fd8
BM
1700 dm_pci_read_config32(dev, pos, &header);
1701 /*
1702 * If we have no capabilities, this is indicated by cap ID,
1703 * cap version and next pointer all being 0.
1704 */
1705 if (header == 0)
1706 return 0;
1707
1708 while (ttl--) {
1709 if (PCI_EXT_CAP_ID(header) == cap)
1710 return pos;
1711
1712 pos = PCI_EXT_CAP_NEXT(header);
1713 if (pos < PCI_CFG_SPACE_SIZE)
1714 break;
1715
1716 dm_pci_read_config32(dev, pos, &header);
1717 }
1718
1719 return 0;
1720}
1721
a8c5f8d3
BM
1722int dm_pci_find_ext_capability(struct udevice *dev, int cap)
1723{
1724 return dm_pci_find_next_ext_capability(dev, 0, cap);
1725}
1726
b8e1f827
AM
1727int dm_pci_flr(struct udevice *dev)
1728{
1729 int pcie_off;
1730 u32 cap;
1731
1732 /* look for PCI Express Capability */
1733 pcie_off = dm_pci_find_capability(dev, PCI_CAP_ID_EXP);
1734 if (!pcie_off)
1735 return -ENOENT;
1736
1737 /* check FLR capability */
1738 dm_pci_read_config32(dev, pcie_off + PCI_EXP_DEVCAP, &cap);
1739 if (!(cap & PCI_EXP_DEVCAP_FLR))
1740 return -ENOENT;
1741
1742 dm_pci_clrset_config16(dev, pcie_off + PCI_EXP_DEVCTL, 0,
1743 PCI_EXP_DEVCTL_BCR_FLR);
1744
1745 /* wait 100ms, per PCI spec */
1746 mdelay(100);
1747
1748 return 0;
1749}
1750
b8852dcf
SG
1751#if defined(CONFIG_PCI_SRIOV)
1752int pci_sriov_init(struct udevice *pdev, int vf_en)
1753{
1754 u16 vendor, device;
1755 struct udevice *bus;
1756 struct udevice *dev;
1757 pci_dev_t bdf;
1758 u16 ctrl;
1759 u16 num_vfs;
1760 u16 total_vf;
1761 u16 vf_offset;
1762 u16 vf_stride;
1763 int vf, ret;
1764 int pos;
1765
1766 pos = dm_pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1767 if (!pos) {
1768 debug("Error: SRIOV capability not found\n");
1769 return -ENOENT;
1770 }
1771
1772 dm_pci_read_config16(pdev, pos + PCI_SRIOV_CTRL, &ctrl);
1773
1774 dm_pci_read_config16(pdev, pos + PCI_SRIOV_TOTAL_VF, &total_vf);
1775 if (vf_en > total_vf)
1776 vf_en = total_vf;
1777 dm_pci_write_config16(pdev, pos + PCI_SRIOV_NUM_VF, vf_en);
1778
1779 ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
1780 dm_pci_write_config16(pdev, pos + PCI_SRIOV_CTRL, ctrl);
1781
1782 dm_pci_read_config16(pdev, pos + PCI_SRIOV_NUM_VF, &num_vfs);
1783 if (num_vfs > vf_en)
1784 num_vfs = vf_en;
1785
1786 dm_pci_read_config16(pdev, pos + PCI_SRIOV_VF_OFFSET, &vf_offset);
1787 dm_pci_read_config16(pdev, pos + PCI_SRIOV_VF_STRIDE, &vf_stride);
1788
1789 dm_pci_read_config16(pdev, PCI_VENDOR_ID, &vendor);
1790 dm_pci_read_config16(pdev, pos + PCI_SRIOV_VF_DID, &device);
1791
1792 bdf = dm_pci_get_bdf(pdev);
1793
58ddb937
MS
1794 ret = pci_get_bus(PCI_BUS(bdf), &bus);
1795 if (ret)
1796 return ret;
b8852dcf
SG
1797
1798 bdf += PCI_BDF(0, 0, vf_offset);
1799
1800 for (vf = 0; vf < num_vfs; vf++) {
8a8d24bd 1801 struct pci_child_plat *pplat;
b8852dcf
SG
1802 ulong class;
1803
1804 pci_bus_read_config(bus, bdf, PCI_CLASS_DEVICE,
1805 &class, PCI_SIZE_16);
1806
1807 debug("%s: bus %d/%s: found VF %x:%x\n", __func__,
8b85dfc6 1808 dev_seq(bus), bus->name, PCI_DEV(bdf), PCI_FUNC(bdf));
b8852dcf
SG
1809
1810 /* Find this device in the device tree */
1811 ret = pci_bus_find_devfn(bus, PCI_MASK_BUS(bdf), &dev);
1812
1813 if (ret == -ENODEV) {
1814 struct pci_device_id find_id;
1815
1816 memset(&find_id, '\0', sizeof(find_id));
1817 find_id.vendor = vendor;
1818 find_id.device = device;
1819 find_id.class = class;
1820
1821 ret = pci_find_and_bind_driver(bus, &find_id,
1822 bdf, &dev);
1823
1824 if (ret)
1825 return ret;
1826 }
1827
1828 /* Update the platform data */
caa4daa2 1829 pplat = dev_get_parent_plat(dev);
b8852dcf
SG
1830 pplat->devfn = PCI_MASK_BUS(bdf);
1831 pplat->vendor = vendor;
1832 pplat->device = device;
1833 pplat->class = class;
1834 pplat->is_virtfn = true;
1835 pplat->pfdev = pdev;
1836 pplat->virtid = vf * vf_stride + vf_offset;
1837
1838 debug("%s: bus %d/%s: found VF %x:%x %x:%x class %lx id %x\n",
8b85dfc6 1839 __func__, dev_seq(dev), dev->name, PCI_DEV(bdf),
b8852dcf
SG
1840 PCI_FUNC(bdf), vendor, device, class, pplat->virtid);
1841 bdf += PCI_BDF(0, 0, vf_stride);
1842 }
1843
1844 return 0;
1845}
1846
1847int pci_sriov_get_totalvfs(struct udevice *pdev)
1848{
1849 u16 total_vf;
1850 int pos;
1851
1852 pos = dm_pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1853 if (!pos) {
1854 debug("Error: SRIOV capability not found\n");
1855 return -ENOENT;
1856 }
1857
1858 dm_pci_read_config16(pdev, pos + PCI_SRIOV_TOTAL_VF, &total_vf);
1859
1860 return total_vf;
1861}
1862#endif /* SRIOV */
1863
ff3e077b
SG
1864UCLASS_DRIVER(pci) = {
1865 .id = UCLASS_PCI,
1866 .name = "pci",
42f3663a 1867 .flags = DM_UC_FLAG_SEQ_ALIAS | DM_UC_FLAG_NO_AUTO_SEQ,
91195485 1868 .post_bind = dm_scan_fdt_dev,
ff3e077b
SG
1869 .pre_probe = pci_uclass_pre_probe,
1870 .post_probe = pci_uclass_post_probe,
1871 .child_post_bind = pci_uclass_child_post_bind,
41575d8e 1872 .per_device_auto = sizeof(struct pci_controller),
8a8d24bd 1873 .per_child_plat_auto = sizeof(struct pci_child_plat),
ff3e077b
SG
1874};
1875
1876static const struct dm_pci_ops pci_bridge_ops = {
1877 .read_config = pci_bridge_read_config,
1878 .write_config = pci_bridge_write_config,
1879};
1880
1881static const struct udevice_id pci_bridge_ids[] = {
1882 { .compatible = "pci-bridge" },
1883 { }
1884};
1885
1886U_BOOT_DRIVER(pci_bridge_drv) = {
1887 .name = "pci_bridge_drv",
1888 .id = UCLASS_PCI,
1889 .of_match = pci_bridge_ids,
1890 .ops = &pci_bridge_ops,
1891};
1892
1893UCLASS_DRIVER(pci_generic) = {
1894 .id = UCLASS_PCI_GENERIC,
1895 .name = "pci_generic",
1896};
1897
1898static const struct udevice_id pci_generic_ids[] = {
1899 { .compatible = "pci-generic" },
1900 { }
1901};
1902
1903U_BOOT_DRIVER(pci_generic_drv) = {
1904 .name = "pci_generic_drv",
1905 .id = UCLASS_PCI_GENERIC,
1906 .of_match = pci_generic_ids,
1907};
e578b92c 1908
b9f6d0f7 1909int pci_init(void)
e578b92c
SW
1910{
1911 struct udevice *bus;
1912
1913 /*
1914 * Enumerate all known controller devices. Enumeration has the side-
1915 * effect of probing them, so PCIe devices will be enumerated too.
1916 */
60ee6094 1917 for (uclass_first_device_check(UCLASS_PCI, &bus);
e578b92c 1918 bus;
60ee6094 1919 uclass_next_device_check(&bus)) {
e578b92c
SW
1920 ;
1921 }
b9f6d0f7
OP
1922
1923 return 0;
e578b92c 1924}