]> git.ipfire.org Git - thirdparty/kernel/linux.git/blob - arch/sparc/kernel/pci_fire.c
License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[thirdparty/kernel/linux.git] / arch / sparc / kernel / pci_fire.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* pci_fire.c: Sun4u platform PCI-E controller support.
3 *
4 * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
5 */
6 #include <linux/kernel.h>
7 #include <linux/pci.h>
8 #include <linux/slab.h>
9 #include <linux/init.h>
10 #include <linux/msi.h>
11 #include <linux/export.h>
12 #include <linux/irq.h>
13 #include <linux/of_device.h>
14
15 #include <asm/prom.h>
16 #include <asm/irq.h>
17 #include <asm/upa.h>
18
19 #include "pci_impl.h"
20
21 #define DRIVER_NAME "fire"
22 #define PFX DRIVER_NAME ": "
23
24 #define FIRE_IOMMU_CONTROL 0x40000UL
25 #define FIRE_IOMMU_TSBBASE 0x40008UL
26 #define FIRE_IOMMU_FLUSH 0x40100UL
27 #define FIRE_IOMMU_FLUSHINV 0x40108UL
28
29 static int pci_fire_pbm_iommu_init(struct pci_pbm_info *pbm)
30 {
31 struct iommu *iommu = pbm->iommu;
32 u32 vdma[2], dma_mask;
33 u64 control;
34 int tsbsize, err;
35
36 /* No virtual-dma property on these guys, use largest size. */
37 vdma[0] = 0xc0000000; /* base */
38 vdma[1] = 0x40000000; /* size */
39 dma_mask = 0xffffffff;
40 tsbsize = 128;
41
42 /* Register addresses. */
43 iommu->iommu_control = pbm->pbm_regs + FIRE_IOMMU_CONTROL;
44 iommu->iommu_tsbbase = pbm->pbm_regs + FIRE_IOMMU_TSBBASE;
45 iommu->iommu_flush = pbm->pbm_regs + FIRE_IOMMU_FLUSH;
46 iommu->iommu_flushinv = pbm->pbm_regs + FIRE_IOMMU_FLUSHINV;
47
48 /* We use the main control/status register of FIRE as the write
49 * completion register.
50 */
51 iommu->write_complete_reg = pbm->controller_regs + 0x410000UL;
52
53 /*
54 * Invalidate TLB Entries.
55 */
56 upa_writeq(~(u64)0, iommu->iommu_flushinv);
57
58 err = iommu_table_init(iommu, tsbsize * 8 * 1024, vdma[0], dma_mask,
59 pbm->numa_node);
60 if (err)
61 return err;
62
63 upa_writeq(__pa(iommu->page_table) | 0x7UL, iommu->iommu_tsbbase);
64
65 control = upa_readq(iommu->iommu_control);
66 control |= (0x00000400 /* TSB cache snoop enable */ |
67 0x00000300 /* Cache mode */ |
68 0x00000002 /* Bypass enable */ |
69 0x00000001 /* Translation enable */);
70 upa_writeq(control, iommu->iommu_control);
71
72 return 0;
73 }
74
75 #ifdef CONFIG_PCI_MSI
76 struct pci_msiq_entry {
77 u64 word0;
78 #define MSIQ_WORD0_RESV 0x8000000000000000UL
79 #define MSIQ_WORD0_FMT_TYPE 0x7f00000000000000UL
80 #define MSIQ_WORD0_FMT_TYPE_SHIFT 56
81 #define MSIQ_WORD0_LEN 0x00ffc00000000000UL
82 #define MSIQ_WORD0_LEN_SHIFT 46
83 #define MSIQ_WORD0_ADDR0 0x00003fff00000000UL
84 #define MSIQ_WORD0_ADDR0_SHIFT 32
85 #define MSIQ_WORD0_RID 0x00000000ffff0000UL
86 #define MSIQ_WORD0_RID_SHIFT 16
87 #define MSIQ_WORD0_DATA0 0x000000000000ffffUL
88 #define MSIQ_WORD0_DATA0_SHIFT 0
89
90 #define MSIQ_TYPE_MSG 0x6
91 #define MSIQ_TYPE_MSI32 0xb
92 #define MSIQ_TYPE_MSI64 0xf
93
94 u64 word1;
95 #define MSIQ_WORD1_ADDR1 0xffffffffffff0000UL
96 #define MSIQ_WORD1_ADDR1_SHIFT 16
97 #define MSIQ_WORD1_DATA1 0x000000000000ffffUL
98 #define MSIQ_WORD1_DATA1_SHIFT 0
99
100 u64 resv[6];
101 };
102
103 /* All MSI registers are offset from pbm->pbm_regs */
104 #define EVENT_QUEUE_BASE_ADDR_REG 0x010000UL
105 #define EVENT_QUEUE_BASE_ADDR_ALL_ONES 0xfffc000000000000UL
106
107 #define EVENT_QUEUE_CONTROL_SET(EQ) (0x011000UL + (EQ) * 0x8UL)
108 #define EVENT_QUEUE_CONTROL_SET_OFLOW 0x0200000000000000UL
109 #define EVENT_QUEUE_CONTROL_SET_EN 0x0000100000000000UL
110
111 #define EVENT_QUEUE_CONTROL_CLEAR(EQ) (0x011200UL + (EQ) * 0x8UL)
112 #define EVENT_QUEUE_CONTROL_CLEAR_OF 0x0200000000000000UL
113 #define EVENT_QUEUE_CONTROL_CLEAR_E2I 0x0000800000000000UL
114 #define EVENT_QUEUE_CONTROL_CLEAR_DIS 0x0000100000000000UL
115
116 #define EVENT_QUEUE_STATE(EQ) (0x011400UL + (EQ) * 0x8UL)
117 #define EVENT_QUEUE_STATE_MASK 0x0000000000000007UL
118 #define EVENT_QUEUE_STATE_IDLE 0x0000000000000001UL
119 #define EVENT_QUEUE_STATE_ACTIVE 0x0000000000000002UL
120 #define EVENT_QUEUE_STATE_ERROR 0x0000000000000004UL
121
122 #define EVENT_QUEUE_TAIL(EQ) (0x011600UL + (EQ) * 0x8UL)
123 #define EVENT_QUEUE_TAIL_OFLOW 0x0200000000000000UL
124 #define EVENT_QUEUE_TAIL_VAL 0x000000000000007fUL
125
126 #define EVENT_QUEUE_HEAD(EQ) (0x011800UL + (EQ) * 0x8UL)
127 #define EVENT_QUEUE_HEAD_VAL 0x000000000000007fUL
128
129 #define MSI_MAP(MSI) (0x020000UL + (MSI) * 0x8UL)
130 #define MSI_MAP_VALID 0x8000000000000000UL
131 #define MSI_MAP_EQWR_N 0x4000000000000000UL
132 #define MSI_MAP_EQNUM 0x000000000000003fUL
133
134 #define MSI_CLEAR(MSI) (0x028000UL + (MSI) * 0x8UL)
135 #define MSI_CLEAR_EQWR_N 0x4000000000000000UL
136
137 #define IMONDO_DATA0 0x02C000UL
138 #define IMONDO_DATA0_DATA 0xffffffffffffffc0UL
139
140 #define IMONDO_DATA1 0x02C008UL
141 #define IMONDO_DATA1_DATA 0xffffffffffffffffUL
142
143 #define MSI_32BIT_ADDR 0x034000UL
144 #define MSI_32BIT_ADDR_VAL 0x00000000ffff0000UL
145
146 #define MSI_64BIT_ADDR 0x034008UL
147 #define MSI_64BIT_ADDR_VAL 0xffffffffffff0000UL
148
149 static int pci_fire_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
150 unsigned long *head)
151 {
152 *head = upa_readq(pbm->pbm_regs + EVENT_QUEUE_HEAD(msiqid));
153 return 0;
154 }
155
156 static int pci_fire_dequeue_msi(struct pci_pbm_info *pbm, unsigned long msiqid,
157 unsigned long *head, unsigned long *msi)
158 {
159 unsigned long type_fmt, type, msi_num;
160 struct pci_msiq_entry *base, *ep;
161
162 base = (pbm->msi_queues + ((msiqid - pbm->msiq_first) * 8192));
163 ep = &base[*head];
164
165 if ((ep->word0 & MSIQ_WORD0_FMT_TYPE) == 0)
166 return 0;
167
168 type_fmt = ((ep->word0 & MSIQ_WORD0_FMT_TYPE) >>
169 MSIQ_WORD0_FMT_TYPE_SHIFT);
170 type = (type_fmt >> 3);
171 if (unlikely(type != MSIQ_TYPE_MSI32 &&
172 type != MSIQ_TYPE_MSI64))
173 return -EINVAL;
174
175 *msi = msi_num = ((ep->word0 & MSIQ_WORD0_DATA0) >>
176 MSIQ_WORD0_DATA0_SHIFT);
177
178 upa_writeq(MSI_CLEAR_EQWR_N, pbm->pbm_regs + MSI_CLEAR(msi_num));
179
180 /* Clear the entry. */
181 ep->word0 &= ~MSIQ_WORD0_FMT_TYPE;
182
183 /* Go to next entry in ring. */
184 (*head)++;
185 if (*head >= pbm->msiq_ent_count)
186 *head = 0;
187
188 return 1;
189 }
190
191 static int pci_fire_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
192 unsigned long head)
193 {
194 upa_writeq(head, pbm->pbm_regs + EVENT_QUEUE_HEAD(msiqid));
195 return 0;
196 }
197
198 static int pci_fire_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
199 unsigned long msi, int is_msi64)
200 {
201 u64 val;
202
203 val = upa_readq(pbm->pbm_regs + MSI_MAP(msi));
204 val &= ~(MSI_MAP_EQNUM);
205 val |= msiqid;
206 upa_writeq(val, pbm->pbm_regs + MSI_MAP(msi));
207
208 upa_writeq(MSI_CLEAR_EQWR_N, pbm->pbm_regs + MSI_CLEAR(msi));
209
210 val = upa_readq(pbm->pbm_regs + MSI_MAP(msi));
211 val |= MSI_MAP_VALID;
212 upa_writeq(val, pbm->pbm_regs + MSI_MAP(msi));
213
214 return 0;
215 }
216
217 static int pci_fire_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
218 {
219 u64 val;
220
221 val = upa_readq(pbm->pbm_regs + MSI_MAP(msi));
222
223 val &= ~MSI_MAP_VALID;
224
225 upa_writeq(val, pbm->pbm_regs + MSI_MAP(msi));
226
227 return 0;
228 }
229
230 static int pci_fire_msiq_alloc(struct pci_pbm_info *pbm)
231 {
232 unsigned long pages, order, i;
233
234 order = get_order(512 * 1024);
235 pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
236 if (pages == 0UL) {
237 printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
238 order);
239 return -ENOMEM;
240 }
241 memset((char *)pages, 0, PAGE_SIZE << order);
242 pbm->msi_queues = (void *) pages;
243
244 upa_writeq((EVENT_QUEUE_BASE_ADDR_ALL_ONES |
245 __pa(pbm->msi_queues)),
246 pbm->pbm_regs + EVENT_QUEUE_BASE_ADDR_REG);
247
248 upa_writeq(pbm->portid << 6, pbm->pbm_regs + IMONDO_DATA0);
249 upa_writeq(0, pbm->pbm_regs + IMONDO_DATA1);
250
251 upa_writeq(pbm->msi32_start, pbm->pbm_regs + MSI_32BIT_ADDR);
252 upa_writeq(pbm->msi64_start, pbm->pbm_regs + MSI_64BIT_ADDR);
253
254 for (i = 0; i < pbm->msiq_num; i++) {
255 upa_writeq(0, pbm->pbm_regs + EVENT_QUEUE_HEAD(i));
256 upa_writeq(0, pbm->pbm_regs + EVENT_QUEUE_TAIL(i));
257 }
258
259 return 0;
260 }
261
262 static void pci_fire_msiq_free(struct pci_pbm_info *pbm)
263 {
264 unsigned long pages, order;
265
266 order = get_order(512 * 1024);
267 pages = (unsigned long) pbm->msi_queues;
268
269 free_pages(pages, order);
270
271 pbm->msi_queues = NULL;
272 }
273
274 static int pci_fire_msiq_build_irq(struct pci_pbm_info *pbm,
275 unsigned long msiqid,
276 unsigned long devino)
277 {
278 unsigned long cregs = (unsigned long) pbm->pbm_regs;
279 unsigned long imap_reg, iclr_reg, int_ctrlr;
280 unsigned int irq;
281 int fixup;
282 u64 val;
283
284 imap_reg = cregs + (0x001000UL + (devino * 0x08UL));
285 iclr_reg = cregs + (0x001400UL + (devino * 0x08UL));
286
287 /* XXX iterate amongst the 4 IRQ controllers XXX */
288 int_ctrlr = (1UL << 6);
289
290 val = upa_readq(imap_reg);
291 val |= (1UL << 63) | int_ctrlr;
292 upa_writeq(val, imap_reg);
293
294 fixup = ((pbm->portid << 6) | devino) - int_ctrlr;
295
296 irq = build_irq(fixup, iclr_reg, imap_reg);
297 if (!irq)
298 return -ENOMEM;
299
300 upa_writeq(EVENT_QUEUE_CONTROL_SET_EN,
301 pbm->pbm_regs + EVENT_QUEUE_CONTROL_SET(msiqid));
302
303 return irq;
304 }
305
306 static const struct sparc64_msiq_ops pci_fire_msiq_ops = {
307 .get_head = pci_fire_get_head,
308 .dequeue_msi = pci_fire_dequeue_msi,
309 .set_head = pci_fire_set_head,
310 .msi_setup = pci_fire_msi_setup,
311 .msi_teardown = pci_fire_msi_teardown,
312 .msiq_alloc = pci_fire_msiq_alloc,
313 .msiq_free = pci_fire_msiq_free,
314 .msiq_build_irq = pci_fire_msiq_build_irq,
315 };
316
317 static void pci_fire_msi_init(struct pci_pbm_info *pbm)
318 {
319 sparc64_pbm_msi_init(pbm, &pci_fire_msiq_ops);
320 }
321 #else /* CONFIG_PCI_MSI */
322 static void pci_fire_msi_init(struct pci_pbm_info *pbm)
323 {
324 }
325 #endif /* !(CONFIG_PCI_MSI) */
326
327 /* Based at pbm->controller_regs */
328 #define FIRE_PARITY_CONTROL 0x470010UL
329 #define FIRE_PARITY_ENAB 0x8000000000000000UL
330 #define FIRE_FATAL_RESET_CTL 0x471028UL
331 #define FIRE_FATAL_RESET_SPARE 0x0000000004000000UL
332 #define FIRE_FATAL_RESET_MB 0x0000000002000000UL
333 #define FIRE_FATAL_RESET_CPE 0x0000000000008000UL
334 #define FIRE_FATAL_RESET_APE 0x0000000000004000UL
335 #define FIRE_FATAL_RESET_PIO 0x0000000000000040UL
336 #define FIRE_FATAL_RESET_JW 0x0000000000000004UL
337 #define FIRE_FATAL_RESET_JI 0x0000000000000002UL
338 #define FIRE_FATAL_RESET_JR 0x0000000000000001UL
339 #define FIRE_CORE_INTR_ENABLE 0x471800UL
340
341 /* Based at pbm->pbm_regs */
342 #define FIRE_TLU_CTRL 0x80000UL
343 #define FIRE_TLU_CTRL_TIM 0x00000000da000000UL
344 #define FIRE_TLU_CTRL_QDET 0x0000000000000100UL
345 #define FIRE_TLU_CTRL_CFG 0x0000000000000001UL
346 #define FIRE_TLU_DEV_CTRL 0x90008UL
347 #define FIRE_TLU_LINK_CTRL 0x90020UL
348 #define FIRE_TLU_LINK_CTRL_CLK 0x0000000000000040UL
349 #define FIRE_LPU_RESET 0xe2008UL
350 #define FIRE_LPU_LLCFG 0xe2200UL
351 #define FIRE_LPU_LLCFG_VC0 0x0000000000000100UL
352 #define FIRE_LPU_FCTRL_UCTRL 0xe2240UL
353 #define FIRE_LPU_FCTRL_UCTRL_N 0x0000000000000002UL
354 #define FIRE_LPU_FCTRL_UCTRL_P 0x0000000000000001UL
355 #define FIRE_LPU_TXL_FIFOP 0xe2430UL
356 #define FIRE_LPU_LTSSM_CFG2 0xe2788UL
357 #define FIRE_LPU_LTSSM_CFG3 0xe2790UL
358 #define FIRE_LPU_LTSSM_CFG4 0xe2798UL
359 #define FIRE_LPU_LTSSM_CFG5 0xe27a0UL
360 #define FIRE_DMC_IENAB 0x31800UL
361 #define FIRE_DMC_DBG_SEL_A 0x53000UL
362 #define FIRE_DMC_DBG_SEL_B 0x53008UL
363 #define FIRE_PEC_IENAB 0x51800UL
364
365 static void pci_fire_hw_init(struct pci_pbm_info *pbm)
366 {
367 u64 val;
368
369 upa_writeq(FIRE_PARITY_ENAB,
370 pbm->controller_regs + FIRE_PARITY_CONTROL);
371
372 upa_writeq((FIRE_FATAL_RESET_SPARE |
373 FIRE_FATAL_RESET_MB |
374 FIRE_FATAL_RESET_CPE |
375 FIRE_FATAL_RESET_APE |
376 FIRE_FATAL_RESET_PIO |
377 FIRE_FATAL_RESET_JW |
378 FIRE_FATAL_RESET_JI |
379 FIRE_FATAL_RESET_JR),
380 pbm->controller_regs + FIRE_FATAL_RESET_CTL);
381
382 upa_writeq(~(u64)0, pbm->controller_regs + FIRE_CORE_INTR_ENABLE);
383
384 val = upa_readq(pbm->pbm_regs + FIRE_TLU_CTRL);
385 val |= (FIRE_TLU_CTRL_TIM |
386 FIRE_TLU_CTRL_QDET |
387 FIRE_TLU_CTRL_CFG);
388 upa_writeq(val, pbm->pbm_regs + FIRE_TLU_CTRL);
389 upa_writeq(0, pbm->pbm_regs + FIRE_TLU_DEV_CTRL);
390 upa_writeq(FIRE_TLU_LINK_CTRL_CLK,
391 pbm->pbm_regs + FIRE_TLU_LINK_CTRL);
392
393 upa_writeq(0, pbm->pbm_regs + FIRE_LPU_RESET);
394 upa_writeq(FIRE_LPU_LLCFG_VC0, pbm->pbm_regs + FIRE_LPU_LLCFG);
395 upa_writeq((FIRE_LPU_FCTRL_UCTRL_N | FIRE_LPU_FCTRL_UCTRL_P),
396 pbm->pbm_regs + FIRE_LPU_FCTRL_UCTRL);
397 upa_writeq(((0xffff << 16) | (0x0000 << 0)),
398 pbm->pbm_regs + FIRE_LPU_TXL_FIFOP);
399 upa_writeq(3000000, pbm->pbm_regs + FIRE_LPU_LTSSM_CFG2);
400 upa_writeq(500000, pbm->pbm_regs + FIRE_LPU_LTSSM_CFG3);
401 upa_writeq((2 << 16) | (140 << 8),
402 pbm->pbm_regs + FIRE_LPU_LTSSM_CFG4);
403 upa_writeq(0, pbm->pbm_regs + FIRE_LPU_LTSSM_CFG5);
404
405 upa_writeq(~(u64)0, pbm->pbm_regs + FIRE_DMC_IENAB);
406 upa_writeq(0, pbm->pbm_regs + FIRE_DMC_DBG_SEL_A);
407 upa_writeq(0, pbm->pbm_regs + FIRE_DMC_DBG_SEL_B);
408
409 upa_writeq(~(u64)0, pbm->pbm_regs + FIRE_PEC_IENAB);
410 }
411
412 static int pci_fire_pbm_init(struct pci_pbm_info *pbm,
413 struct platform_device *op, u32 portid)
414 {
415 const struct linux_prom64_registers *regs;
416 struct device_node *dp = op->dev.of_node;
417 int err;
418
419 pbm->numa_node = -1;
420
421 pbm->pci_ops = &sun4u_pci_ops;
422 pbm->config_space_reg_bits = 12;
423
424 pbm->index = pci_num_pbms++;
425
426 pbm->portid = portid;
427 pbm->op = op;
428 pbm->name = dp->full_name;
429
430 regs = of_get_property(dp, "reg", NULL);
431 pbm->pbm_regs = regs[0].phys_addr;
432 pbm->controller_regs = regs[1].phys_addr - 0x410000UL;
433
434 printk("%s: SUN4U PCIE Bus Module\n", pbm->name);
435
436 pci_determine_mem_io_space(pbm);
437
438 pci_get_pbm_props(pbm);
439
440 pci_fire_hw_init(pbm);
441
442 err = pci_fire_pbm_iommu_init(pbm);
443 if (err)
444 return err;
445
446 pci_fire_msi_init(pbm);
447
448 pbm->pci_bus = pci_scan_one_pbm(pbm, &op->dev);
449
450 /* XXX register error interrupt handlers XXX */
451
452 pbm->next = pci_pbm_root;
453 pci_pbm_root = pbm;
454
455 return 0;
456 }
457
458 static int fire_probe(struct platform_device *op)
459 {
460 struct device_node *dp = op->dev.of_node;
461 struct pci_pbm_info *pbm;
462 struct iommu *iommu;
463 u32 portid;
464 int err;
465
466 portid = of_getintprop_default(dp, "portid", 0xff);
467
468 err = -ENOMEM;
469 pbm = kzalloc(sizeof(*pbm), GFP_KERNEL);
470 if (!pbm) {
471 printk(KERN_ERR PFX "Cannot allocate pci_pbminfo.\n");
472 goto out_err;
473 }
474
475 iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL);
476 if (!iommu) {
477 printk(KERN_ERR PFX "Cannot allocate PBM iommu.\n");
478 goto out_free_controller;
479 }
480
481 pbm->iommu = iommu;
482
483 err = pci_fire_pbm_init(pbm, op, portid);
484 if (err)
485 goto out_free_iommu;
486
487 dev_set_drvdata(&op->dev, pbm);
488
489 return 0;
490
491 out_free_iommu:
492 kfree(pbm->iommu);
493
494 out_free_controller:
495 kfree(pbm);
496
497 out_err:
498 return err;
499 }
500
501 static const struct of_device_id fire_match[] = {
502 {
503 .name = "pci",
504 .compatible = "pciex108e,80f0",
505 },
506 {},
507 };
508
509 static struct platform_driver fire_driver = {
510 .driver = {
511 .name = DRIVER_NAME,
512 .of_match_table = fire_match,
513 },
514 .probe = fire_probe,
515 };
516
517 static int __init fire_init(void)
518 {
519 return platform_driver_register(&fire_driver);
520 }
521
522 subsys_initcall(fire_init);