1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * PCIe host controller driver for Xilinx Versal CPM DMA Bridge
4 *
5 * (C) Copyright 2019 - 2020, Xilinx, Inc.
6 */
7
8 #include <linux/bitfield.h>
9 #include <linux/interrupt.h>
10 #include <linux/irq.h>
11 #include <linux/irqchip.h>
12 #include <linux/irqchip/chained_irq.h>
13 #include <linux/irqdomain.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/of_address.h>
17 #include <linux/of_pci.h>
18 #include <linux/of_platform.h>
19 #include <linux/of_irq.h>
20 #include <linux/pci.h>
21 #include <linux/platform_device.h>
22 #include <linux/pci-ecam.h>
23
24 #include "../pci.h"
25
26 /* Register definitions */
27 #define XILINX_CPM_PCIE_REG_IDR 0x00000E10
28 #define XILINX_CPM_PCIE_REG_IMR 0x00000E14
29 #define XILINX_CPM_PCIE_REG_PSCR 0x00000E1C
30 #define XILINX_CPM_PCIE_REG_RPSC 0x00000E20
31 #define XILINX_CPM_PCIE_REG_RPEFR 0x00000E2C
32 #define XILINX_CPM_PCIE_REG_IDRN 0x00000E38
33 #define XILINX_CPM_PCIE_REG_IDRN_MASK 0x00000E3C
34 #define XILINX_CPM_PCIE_MISC_IR_STATUS 0x00000340
35 #define XILINX_CPM_PCIE_MISC_IR_ENABLE 0x00000348
36 #define XILINX_CPM_PCIE_MISC_IR_LOCAL BIT(1)
37
38 /* Interrupt registers definitions */
39 #define XILINX_CPM_PCIE_INTR_LINK_DOWN 0
40 #define XILINX_CPM_PCIE_INTR_HOT_RESET 3
41 #define XILINX_CPM_PCIE_INTR_CFG_PCIE_TIMEOUT 4
42 #define XILINX_CPM_PCIE_INTR_CFG_TIMEOUT 8
43 #define XILINX_CPM_PCIE_INTR_CORRECTABLE 9
44 #define XILINX_CPM_PCIE_INTR_NONFATAL 10
45 #define XILINX_CPM_PCIE_INTR_FATAL 11
46 #define XILINX_CPM_PCIE_INTR_CFG_ERR_POISON 12
47 #define XILINX_CPM_PCIE_INTR_PME_TO_ACK_RCVD 15
48 #define XILINX_CPM_PCIE_INTR_INTX 16
49 #define XILINX_CPM_PCIE_INTR_PM_PME_RCVD 17
50 #define XILINX_CPM_PCIE_INTR_SLV_UNSUPP 20
51 #define XILINX_CPM_PCIE_INTR_SLV_UNEXP 21
52 #define XILINX_CPM_PCIE_INTR_SLV_COMPL 22
53 #define XILINX_CPM_PCIE_INTR_SLV_ERRP 23
54 #define XILINX_CPM_PCIE_INTR_SLV_CMPABT 24
55 #define XILINX_CPM_PCIE_INTR_SLV_ILLBUR 25
56 #define XILINX_CPM_PCIE_INTR_MST_DECERR 26
57 #define XILINX_CPM_PCIE_INTR_MST_SLVERR 27
58 #define XILINX_CPM_PCIE_INTR_SLV_PCIE_TIMEOUT 28
59
60 #define IMR(x) BIT(XILINX_CPM_PCIE_INTR_ ##x)
61
62 #define XILINX_CPM_PCIE_IMR_ALL_MASK \
63 ( \
64 IMR(LINK_DOWN) | \
65 IMR(HOT_RESET) | \
66 IMR(CFG_PCIE_TIMEOUT) | \
67 IMR(CFG_TIMEOUT) | \
68 IMR(CORRECTABLE) | \
69 IMR(NONFATAL) | \
70 IMR(FATAL) | \
71 IMR(CFG_ERR_POISON) | \
72 IMR(PME_TO_ACK_RCVD) | \
73 IMR(INTX) | \
74 IMR(PM_PME_RCVD) | \
75 IMR(SLV_UNSUPP) | \
76 IMR(SLV_UNEXP) | \
77 IMR(SLV_COMPL) | \
78 IMR(SLV_ERRP) | \
79 IMR(SLV_CMPABT) | \
80 IMR(SLV_ILLBUR) | \
81 IMR(MST_DECERR) | \
82 IMR(MST_SLVERR) | \
83 IMR(SLV_PCIE_TIMEOUT) \
84 )
85
86 #define XILINX_CPM_PCIE_IDR_ALL_MASK 0xFFFFFFFF
87 #define XILINX_CPM_PCIE_IDRN_MASK GENMASK(19, 16)
88 #define XILINX_CPM_PCIE_IDRN_SHIFT 16
89
90 /* Root Port Error FIFO Read Register definitions */
91 #define XILINX_CPM_PCIE_RPEFR_ERR_VALID BIT(18)
92 #define XILINX_CPM_PCIE_RPEFR_REQ_ID GENMASK(15, 0)
93 #define XILINX_CPM_PCIE_RPEFR_ALL_MASK 0xFFFFFFFF
94
95 /* Root Port Status/control Register definitions */
96 #define XILINX_CPM_PCIE_REG_RPSC_BEN BIT(0)
97
98 /* Phy Status/Control Register definitions */
99 #define XILINX_CPM_PCIE_REG_PSCR_LNKUP BIT(11)
100
101 /**
102 * struct xilinx_cpm_pcie_port - PCIe port information
103 * @reg_base: Bridge Register Base
104 * @cpm_base: CPM System Level Control and Status Register(SLCR) Base
105 * @dev: Device pointer
106 * @intx_domain: Legacy IRQ domain pointer
107 * @cpm_domain: CPM IRQ domain pointer
108 * @cfg: Holds mappings of config space window
109 * @intx_irq: legacy interrupt number
110 * @irq: Error interrupt number
111 * @lock: lock protecting shared register access
112 */
113 struct xilinx_cpm_pcie_port {
114 void __iomem *reg_base;
115 void __iomem *cpm_base;
116 struct device *dev;
117 struct irq_domain *intx_domain;
118 struct irq_domain *cpm_domain;
119 struct pci_config_window *cfg;
120 int intx_irq;
121 int irq;
122 raw_spinlock_t lock;
123 };
124
pcie_read(struct xilinx_cpm_pcie_port * port,u32 reg)125 static u32 pcie_read(struct xilinx_cpm_pcie_port *port, u32 reg)
126 {
127 return readl_relaxed(port->reg_base + reg);
128 }
129
pcie_write(struct xilinx_cpm_pcie_port * port,u32 val,u32 reg)130 static void pcie_write(struct xilinx_cpm_pcie_port *port,
131 u32 val, u32 reg)
132 {
133 writel_relaxed(val, port->reg_base + reg);
134 }
135
cpm_pcie_link_up(struct xilinx_cpm_pcie_port * port)136 static bool cpm_pcie_link_up(struct xilinx_cpm_pcie_port *port)
137 {
138 return (pcie_read(port, XILINX_CPM_PCIE_REG_PSCR) &
139 XILINX_CPM_PCIE_REG_PSCR_LNKUP);
140 }
141
cpm_pcie_clear_err_interrupts(struct xilinx_cpm_pcie_port * port)142 static void cpm_pcie_clear_err_interrupts(struct xilinx_cpm_pcie_port *port)
143 {
144 unsigned long val = pcie_read(port, XILINX_CPM_PCIE_REG_RPEFR);
145
146 if (val & XILINX_CPM_PCIE_RPEFR_ERR_VALID) {
147 dev_dbg(port->dev, "Requester ID %lu\n",
148 val & XILINX_CPM_PCIE_RPEFR_REQ_ID);
149 pcie_write(port, XILINX_CPM_PCIE_RPEFR_ALL_MASK,
150 XILINX_CPM_PCIE_REG_RPEFR);
151 }
152 }
153
xilinx_cpm_mask_leg_irq(struct irq_data * data)154 static void xilinx_cpm_mask_leg_irq(struct irq_data *data)
155 {
156 struct xilinx_cpm_pcie_port *port = irq_data_get_irq_chip_data(data);
157 unsigned long flags;
158 u32 mask;
159 u32 val;
160
161 mask = BIT(data->hwirq + XILINX_CPM_PCIE_IDRN_SHIFT);
162 raw_spin_lock_irqsave(&port->lock, flags);
163 val = pcie_read(port, XILINX_CPM_PCIE_REG_IDRN_MASK);
164 pcie_write(port, (val & (~mask)), XILINX_CPM_PCIE_REG_IDRN_MASK);
165 raw_spin_unlock_irqrestore(&port->lock, flags);
166 }
167
xilinx_cpm_unmask_leg_irq(struct irq_data * data)168 static void xilinx_cpm_unmask_leg_irq(struct irq_data *data)
169 {
170 struct xilinx_cpm_pcie_port *port = irq_data_get_irq_chip_data(data);
171 unsigned long flags;
172 u32 mask;
173 u32 val;
174
175 mask = BIT(data->hwirq + XILINX_CPM_PCIE_IDRN_SHIFT);
176 raw_spin_lock_irqsave(&port->lock, flags);
177 val = pcie_read(port, XILINX_CPM_PCIE_REG_IDRN_MASK);
178 pcie_write(port, (val | mask), XILINX_CPM_PCIE_REG_IDRN_MASK);
179 raw_spin_unlock_irqrestore(&port->lock, flags);
180 }
181
182 static struct irq_chip xilinx_cpm_leg_irq_chip = {
183 .name = "INTx",
184 .irq_mask = xilinx_cpm_mask_leg_irq,
185 .irq_unmask = xilinx_cpm_unmask_leg_irq,
186 };
187
188 /**
189 * xilinx_cpm_pcie_intx_map - Set the handler for the INTx and mark IRQ as valid
190 * @domain: IRQ domain
191 * @irq: Virtual IRQ number
192 * @hwirq: HW interrupt number
193 *
194 * Return: Always returns 0.
195 */
xilinx_cpm_pcie_intx_map(struct irq_domain * domain,unsigned int irq,irq_hw_number_t hwirq)196 static int xilinx_cpm_pcie_intx_map(struct irq_domain *domain,
197 unsigned int irq, irq_hw_number_t hwirq)
198 {
199 irq_set_chip_and_handler(irq, &xilinx_cpm_leg_irq_chip,
200 handle_level_irq);
201 irq_set_chip_data(irq, domain->host_data);
202 irq_set_status_flags(irq, IRQ_LEVEL);
203
204 return 0;
205 }
206
207 /* INTx IRQ Domain operations */
208 static const struct irq_domain_ops intx_domain_ops = {
209 .map = xilinx_cpm_pcie_intx_map,
210 };
211
xilinx_cpm_pcie_intx_flow(struct irq_desc * desc)212 static void xilinx_cpm_pcie_intx_flow(struct irq_desc *desc)
213 {
214 struct xilinx_cpm_pcie_port *port = irq_desc_get_handler_data(desc);
215 struct irq_chip *chip = irq_desc_get_chip(desc);
216 unsigned long val;
217 int i;
218
219 chained_irq_enter(chip, desc);
220
221 val = FIELD_GET(XILINX_CPM_PCIE_IDRN_MASK,
222 pcie_read(port, XILINX_CPM_PCIE_REG_IDRN));
223
224 for_each_set_bit(i, &val, PCI_NUM_INTX)
225 generic_handle_domain_irq(port->intx_domain, i);
226
227 chained_irq_exit(chip, desc);
228 }
229
xilinx_cpm_mask_event_irq(struct irq_data * d)230 static void xilinx_cpm_mask_event_irq(struct irq_data *d)
231 {
232 struct xilinx_cpm_pcie_port *port = irq_data_get_irq_chip_data(d);
233 u32 val;
234
235 raw_spin_lock(&port->lock);
236 val = pcie_read(port, XILINX_CPM_PCIE_REG_IMR);
237 val &= ~BIT(d->hwirq);
238 pcie_write(port, val, XILINX_CPM_PCIE_REG_IMR);
239 raw_spin_unlock(&port->lock);
240 }
241
xilinx_cpm_unmask_event_irq(struct irq_data * d)242 static void xilinx_cpm_unmask_event_irq(struct irq_data *d)
243 {
244 struct xilinx_cpm_pcie_port *port = irq_data_get_irq_chip_data(d);
245 u32 val;
246
247 raw_spin_lock(&port->lock);
248 val = pcie_read(port, XILINX_CPM_PCIE_REG_IMR);
249 val |= BIT(d->hwirq);
250 pcie_write(port, val, XILINX_CPM_PCIE_REG_IMR);
251 raw_spin_unlock(&port->lock);
252 }
253
254 static struct irq_chip xilinx_cpm_event_irq_chip = {
255 .name = "RC-Event",
256 .irq_mask = xilinx_cpm_mask_event_irq,
257 .irq_unmask = xilinx_cpm_unmask_event_irq,
258 };
259
xilinx_cpm_pcie_event_map(struct irq_domain * domain,unsigned int irq,irq_hw_number_t hwirq)260 static int xilinx_cpm_pcie_event_map(struct irq_domain *domain,
261 unsigned int irq, irq_hw_number_t hwirq)
262 {
263 irq_set_chip_and_handler(irq, &xilinx_cpm_event_irq_chip,
264 handle_level_irq);
265 irq_set_chip_data(irq, domain->host_data);
266 irq_set_status_flags(irq, IRQ_LEVEL);
267 return 0;
268 }
269
270 static const struct irq_domain_ops event_domain_ops = {
271 .map = xilinx_cpm_pcie_event_map,
272 };
273
xilinx_cpm_pcie_event_flow(struct irq_desc * desc)274 static void xilinx_cpm_pcie_event_flow(struct irq_desc *desc)
275 {
276 struct xilinx_cpm_pcie_port *port = irq_desc_get_handler_data(desc);
277 struct irq_chip *chip = irq_desc_get_chip(desc);
278 unsigned long val;
279 int i;
280
281 chained_irq_enter(chip, desc);
282 val = pcie_read(port, XILINX_CPM_PCIE_REG_IDR);
283 val &= pcie_read(port, XILINX_CPM_PCIE_REG_IMR);
284 for_each_set_bit(i, &val, 32)
285 generic_handle_domain_irq(port->cpm_domain, i);
286 pcie_write(port, val, XILINX_CPM_PCIE_REG_IDR);
287
288 /*
289 * XILINX_CPM_PCIE_MISC_IR_STATUS register is mapped to
290 * CPM SLCR block.
291 */
292 val = readl_relaxed(port->cpm_base + XILINX_CPM_PCIE_MISC_IR_STATUS);
293 if (val)
294 writel_relaxed(val,
295 port->cpm_base + XILINX_CPM_PCIE_MISC_IR_STATUS);
296
297 chained_irq_exit(chip, desc);
298 }
299
300 #define _IC(x, s) \
301 [XILINX_CPM_PCIE_INTR_ ## x] = { __stringify(x), s }
302
303 static const struct {
304 const char *sym;
305 const char *str;
306 } intr_cause[32] = {
307 _IC(LINK_DOWN, "Link Down"),
308 _IC(HOT_RESET, "Hot reset"),
309 _IC(CFG_TIMEOUT, "ECAM access timeout"),
310 _IC(CORRECTABLE, "Correctable error message"),
311 _IC(NONFATAL, "Non fatal error message"),
312 _IC(FATAL, "Fatal error message"),
313 _IC(SLV_UNSUPP, "Slave unsupported request"),
314 _IC(SLV_UNEXP, "Slave unexpected completion"),
315 _IC(SLV_COMPL, "Slave completion timeout"),
316 _IC(SLV_ERRP, "Slave Error Poison"),
317 _IC(SLV_CMPABT, "Slave Completer Abort"),
318 _IC(SLV_ILLBUR, "Slave Illegal Burst"),
319 _IC(MST_DECERR, "Master decode error"),
320 _IC(MST_SLVERR, "Master slave error"),
321 _IC(CFG_PCIE_TIMEOUT, "PCIe ECAM access timeout"),
322 _IC(CFG_ERR_POISON, "ECAM poisoned completion received"),
323 _IC(PME_TO_ACK_RCVD, "PME_TO_ACK message received"),
324 _IC(PM_PME_RCVD, "PM_PME message received"),
325 _IC(SLV_PCIE_TIMEOUT, "PCIe completion timeout received"),
326 };
327
xilinx_cpm_pcie_intr_handler(int irq,void * dev_id)328 static irqreturn_t xilinx_cpm_pcie_intr_handler(int irq, void *dev_id)
329 {
330 struct xilinx_cpm_pcie_port *port = dev_id;
331 struct device *dev = port->dev;
332 struct irq_data *d;
333
334 d = irq_domain_get_irq_data(port->cpm_domain, irq);
335
336 switch (d->hwirq) {
337 case XILINX_CPM_PCIE_INTR_CORRECTABLE:
338 case XILINX_CPM_PCIE_INTR_NONFATAL:
339 case XILINX_CPM_PCIE_INTR_FATAL:
340 cpm_pcie_clear_err_interrupts(port);
341 fallthrough;
342
343 default:
344 if (intr_cause[d->hwirq].str)
345 dev_warn(dev, "%s\n", intr_cause[d->hwirq].str);
346 else
347 dev_warn(dev, "Unknown IRQ %ld\n", d->hwirq);
348 }
349
350 return IRQ_HANDLED;
351 }
352
xilinx_cpm_free_irq_domains(struct xilinx_cpm_pcie_port * port)353 static void xilinx_cpm_free_irq_domains(struct xilinx_cpm_pcie_port *port)
354 {
355 if (port->intx_domain) {
356 irq_domain_remove(port->intx_domain);
357 port->intx_domain = NULL;
358 }
359
360 if (port->cpm_domain) {
361 irq_domain_remove(port->cpm_domain);
362 port->cpm_domain = NULL;
363 }
364 }
365
366 /**
367 * xilinx_cpm_pcie_init_irq_domain - Initialize IRQ domain
368 * @port: PCIe port information
369 *
370 * Return: '0' on success and error value on failure
371 */
xilinx_cpm_pcie_init_irq_domain(struct xilinx_cpm_pcie_port * port)372 static int xilinx_cpm_pcie_init_irq_domain(struct xilinx_cpm_pcie_port *port)
373 {
374 struct device *dev = port->dev;
375 struct device_node *node = dev->of_node;
376 struct device_node *pcie_intc_node;
377
378 /* Setup INTx */
379 pcie_intc_node = of_get_next_child(node, NULL);
380 if (!pcie_intc_node) {
381 dev_err(dev, "No PCIe Intc node found\n");
382 return -EINVAL;
383 }
384
385 port->cpm_domain = irq_domain_add_linear(pcie_intc_node, 32,
386 &event_domain_ops,
387 port);
388 if (!port->cpm_domain)
389 goto out;
390
391 irq_domain_update_bus_token(port->cpm_domain, DOMAIN_BUS_NEXUS);
392
393 port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
394 &intx_domain_ops,
395 port);
396 if (!port->intx_domain)
397 goto out;
398
399 irq_domain_update_bus_token(port->intx_domain, DOMAIN_BUS_WIRED);
400
401 of_node_put(pcie_intc_node);
402 raw_spin_lock_init(&port->lock);
403
404 return 0;
405 out:
406 xilinx_cpm_free_irq_domains(port);
407 of_node_put(pcie_intc_node);
408 dev_err(dev, "Failed to allocate IRQ domains\n");
409
410 return -ENOMEM;
411 }
412
xilinx_cpm_setup_irq(struct xilinx_cpm_pcie_port * port)413 static int xilinx_cpm_setup_irq(struct xilinx_cpm_pcie_port *port)
414 {
415 struct device *dev = port->dev;
416 struct platform_device *pdev = to_platform_device(dev);
417 int i, irq;
418
419 port->irq = platform_get_irq(pdev, 0);
420 if (port->irq < 0)
421 return port->irq;
422
423 for (i = 0; i < ARRAY_SIZE(intr_cause); i++) {
424 int err;
425
426 if (!intr_cause[i].str)
427 continue;
428
429 irq = irq_create_mapping(port->cpm_domain, i);
430 if (!irq) {
431 dev_err(dev, "Failed to map interrupt\n");
432 return -ENXIO;
433 }
434
435 err = devm_request_irq(dev, irq, xilinx_cpm_pcie_intr_handler,
436 0, intr_cause[i].sym, port);
437 if (err) {
438 dev_err(dev, "Failed to request IRQ %d\n", irq);
439 return err;
440 }
441 }
442
443 port->intx_irq = irq_create_mapping(port->cpm_domain,
444 XILINX_CPM_PCIE_INTR_INTX);
445 if (!port->intx_irq) {
446 dev_err(dev, "Failed to map INTx interrupt\n");
447 return -ENXIO;
448 }
449
450 /* Plug the INTx chained handler */
451 irq_set_chained_handler_and_data(port->intx_irq,
452 xilinx_cpm_pcie_intx_flow, port);
453
454 /* Plug the main event chained handler */
455 irq_set_chained_handler_and_data(port->irq,
456 xilinx_cpm_pcie_event_flow, port);
457
458 return 0;
459 }
460
461 /**
462 * xilinx_cpm_pcie_init_port - Initialize hardware
463 * @port: PCIe port information
464 */
xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie_port * port)465 static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie_port *port)
466 {
467 if (cpm_pcie_link_up(port))
468 dev_info(port->dev, "PCIe Link is UP\n");
469 else
470 dev_info(port->dev, "PCIe Link is DOWN\n");
471
472 /* Disable all interrupts */
473 pcie_write(port, ~XILINX_CPM_PCIE_IDR_ALL_MASK,
474 XILINX_CPM_PCIE_REG_IMR);
475
476 /* Clear pending interrupts */
477 pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_IDR) &
478 XILINX_CPM_PCIE_IMR_ALL_MASK,
479 XILINX_CPM_PCIE_REG_IDR);
480
481 /*
482 * XILINX_CPM_PCIE_MISC_IR_ENABLE register is mapped to
483 * CPM SLCR block.
484 */
485 writel(XILINX_CPM_PCIE_MISC_IR_LOCAL,
486 port->cpm_base + XILINX_CPM_PCIE_MISC_IR_ENABLE);
487 /* Enable the Bridge enable bit */
488 pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_RPSC) |
489 XILINX_CPM_PCIE_REG_RPSC_BEN,
490 XILINX_CPM_PCIE_REG_RPSC);
491 }
492
493 /**
494 * xilinx_cpm_pcie_parse_dt - Parse Device tree
495 * @port: PCIe port information
496 * @bus_range: Bus resource
497 *
498 * Return: '0' on success and error value on failure
499 */
xilinx_cpm_pcie_parse_dt(struct xilinx_cpm_pcie_port * port,struct resource * bus_range)500 static int xilinx_cpm_pcie_parse_dt(struct xilinx_cpm_pcie_port *port,
501 struct resource *bus_range)
502 {
503 struct device *dev = port->dev;
504 struct platform_device *pdev = to_platform_device(dev);
505 struct resource *res;
506
507 port->cpm_base = devm_platform_ioremap_resource_byname(pdev,
508 "cpm_slcr");
509 if (IS_ERR(port->cpm_base))
510 return PTR_ERR(port->cpm_base);
511
512 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
513 if (!res)
514 return -ENXIO;
515
516 port->cfg = pci_ecam_create(dev, res, bus_range,
517 &pci_generic_ecam_ops);
518 if (IS_ERR(port->cfg))
519 return PTR_ERR(port->cfg);
520
521 port->reg_base = port->cfg->win;
522
523 return 0;
524 }
525
xilinx_cpm_free_interrupts(struct xilinx_cpm_pcie_port * port)526 static void xilinx_cpm_free_interrupts(struct xilinx_cpm_pcie_port *port)
527 {
528 irq_set_chained_handler_and_data(port->intx_irq, NULL, NULL);
529 irq_set_chained_handler_and_data(port->irq, NULL, NULL);
530 }
531
532 /**
533 * xilinx_cpm_pcie_probe - Probe function
534 * @pdev: Platform device pointer
535 *
536 * Return: '0' on success and error value on failure
537 */
xilinx_cpm_pcie_probe(struct platform_device * pdev)538 static int xilinx_cpm_pcie_probe(struct platform_device *pdev)
539 {
540 struct xilinx_cpm_pcie_port *port;
541 struct device *dev = &pdev->dev;
542 struct pci_host_bridge *bridge;
543 struct resource_entry *bus;
544 int err;
545
546 bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port));
547 if (!bridge)
548 return -ENODEV;
549
550 port = pci_host_bridge_priv(bridge);
551
552 port->dev = dev;
553
554 err = xilinx_cpm_pcie_init_irq_domain(port);
555 if (err)
556 return err;
557
558 bus = resource_list_first_type(&bridge->windows, IORESOURCE_BUS);
559 if (!bus)
560 return -ENODEV;
561
562 err = xilinx_cpm_pcie_parse_dt(port, bus->res);
563 if (err) {
564 dev_err(dev, "Parsing DT failed\n");
565 goto err_parse_dt;
566 }
567
568 xilinx_cpm_pcie_init_port(port);
569
570 err = xilinx_cpm_setup_irq(port);
571 if (err) {
572 dev_err(dev, "Failed to set up interrupts\n");
573 goto err_setup_irq;
574 }
575
576 bridge->sysdata = port->cfg;
577 bridge->ops = (struct pci_ops *)&pci_generic_ecam_ops.pci_ops;
578
579 err = pci_host_probe(bridge);
580 if (err < 0)
581 goto err_host_bridge;
582
583 return 0;
584
585 err_host_bridge:
586 xilinx_cpm_free_interrupts(port);
587 err_setup_irq:
588 pci_ecam_free(port->cfg);
589 err_parse_dt:
590 xilinx_cpm_free_irq_domains(port);
591 return err;
592 }
593
594 static const struct of_device_id xilinx_cpm_pcie_of_match[] = {
595 { .compatible = "xlnx,versal-cpm-host-1.00", },
596 {}
597 };
598
599 static struct platform_driver xilinx_cpm_pcie_driver = {
600 .driver = {
601 .name = "xilinx-cpm-pcie",
602 .of_match_table = xilinx_cpm_pcie_of_match,
603 .suppress_bind_attrs = true,
604 },
605 .probe = xilinx_cpm_pcie_probe,
606 };
607
608 builtin_platform_driver(xilinx_cpm_pcie_driver);
609