1 /* Broadcom NetXtreme-C/E network driver.
2 *
3 * Copyright (c) 2016-2018 Broadcom Limited
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 */
9
10 #include <linux/module.h>
11
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/interrupt.h>
15 #include <linux/pci.h>
16 #include <linux/netdevice.h>
17 #include <linux/rtnetlink.h>
18 #include <linux/bitops.h>
19 #include <linux/irq.h>
20 #include <asm/byteorder.h>
21 #include <linux/bitmap.h>
22
23 #include "bnxt_hsi.h"
24 #include "bnxt.h"
25 #include "bnxt_hwrm.h"
26 #include "bnxt_ulp.h"
27
bnxt_register_dev(struct bnxt_en_dev * edev,int ulp_id,struct bnxt_ulp_ops * ulp_ops,void * handle)28 static int bnxt_register_dev(struct bnxt_en_dev *edev, int ulp_id,
29 struct bnxt_ulp_ops *ulp_ops, void *handle)
30 {
31 struct net_device *dev = edev->net;
32 struct bnxt *bp = netdev_priv(dev);
33 struct bnxt_ulp *ulp;
34
35 ASSERT_RTNL();
36 if (ulp_id >= BNXT_MAX_ULP)
37 return -EINVAL;
38
39 ulp = &edev->ulp_tbl[ulp_id];
40 if (rcu_access_pointer(ulp->ulp_ops)) {
41 netdev_err(bp->dev, "ulp id %d already registered\n", ulp_id);
42 return -EBUSY;
43 }
44 if (ulp_id == BNXT_ROCE_ULP) {
45 unsigned int max_stat_ctxs;
46
47 max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
48 if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS ||
49 bp->cp_nr_rings == max_stat_ctxs)
50 return -ENOMEM;
51 }
52
53 atomic_set(&ulp->ref_count, 0);
54 ulp->handle = handle;
55 rcu_assign_pointer(ulp->ulp_ops, ulp_ops);
56
57 if (ulp_id == BNXT_ROCE_ULP) {
58 if (test_bit(BNXT_STATE_OPEN, &bp->state))
59 bnxt_hwrm_vnic_cfg(bp, 0);
60 }
61
62 return 0;
63 }
64
bnxt_unregister_dev(struct bnxt_en_dev * edev,int ulp_id)65 static int bnxt_unregister_dev(struct bnxt_en_dev *edev, int ulp_id)
66 {
67 struct net_device *dev = edev->net;
68 struct bnxt *bp = netdev_priv(dev);
69 struct bnxt_ulp *ulp;
70 int i = 0;
71
72 ASSERT_RTNL();
73 if (ulp_id >= BNXT_MAX_ULP)
74 return -EINVAL;
75
76 ulp = &edev->ulp_tbl[ulp_id];
77 if (!rcu_access_pointer(ulp->ulp_ops)) {
78 netdev_err(bp->dev, "ulp id %d not registered\n", ulp_id);
79 return -EINVAL;
80 }
81 if (ulp_id == BNXT_ROCE_ULP && ulp->msix_requested)
82 edev->en_ops->bnxt_free_msix(edev, ulp_id);
83
84 if (ulp->max_async_event_id)
85 bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, true);
86
87 RCU_INIT_POINTER(ulp->ulp_ops, NULL);
88 synchronize_rcu();
89 ulp->max_async_event_id = 0;
90 ulp->async_events_bmap = NULL;
91 while (atomic_read(&ulp->ref_count) != 0 && i < 10) {
92 msleep(100);
93 i++;
94 }
95 return 0;
96 }
97
bnxt_fill_msix_vecs(struct bnxt * bp,struct bnxt_msix_entry * ent)98 static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent)
99 {
100 struct bnxt_en_dev *edev = bp->edev;
101 int num_msix, idx, i;
102
103 num_msix = edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested;
104 idx = edev->ulp_tbl[BNXT_ROCE_ULP].msix_base;
105 for (i = 0; i < num_msix; i++) {
106 ent[i].vector = bp->irq_tbl[idx + i].vector;
107 ent[i].ring_idx = idx + i;
108 if (bp->flags & BNXT_FLAG_CHIP_P5) {
109 ent[i].db_offset = DB_PF_OFFSET_P5;
110 if (BNXT_VF(bp))
111 ent[i].db_offset = DB_VF_OFFSET_P5;
112 } else {
113 ent[i].db_offset = (idx + i) * 0x80;
114 }
115 }
116 }
117
bnxt_req_msix_vecs(struct bnxt_en_dev * edev,int ulp_id,struct bnxt_msix_entry * ent,int num_msix)118 static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
119 struct bnxt_msix_entry *ent, int num_msix)
120 {
121 struct net_device *dev = edev->net;
122 struct bnxt *bp = netdev_priv(dev);
123 struct bnxt_hw_resc *hw_resc;
124 int max_idx, max_cp_rings;
125 int avail_msix, idx;
126 int total_vecs;
127 int rc = 0;
128
129 ASSERT_RTNL();
130 if (ulp_id != BNXT_ROCE_ULP)
131 return -EINVAL;
132
133 if (!(bp->flags & BNXT_FLAG_USING_MSIX))
134 return -ENODEV;
135
136 if (edev->ulp_tbl[ulp_id].msix_requested)
137 return -EAGAIN;
138
139 max_cp_rings = bnxt_get_max_func_cp_rings(bp);
140 avail_msix = bnxt_get_avail_msix(bp, num_msix);
141 if (!avail_msix)
142 return -ENOMEM;
143 if (avail_msix > num_msix)
144 avail_msix = num_msix;
145
146 if (BNXT_NEW_RM(bp)) {
147 idx = bp->cp_nr_rings;
148 } else {
149 max_idx = min_t(int, bp->total_irqs, max_cp_rings);
150 idx = max_idx - avail_msix;
151 }
152 edev->ulp_tbl[ulp_id].msix_base = idx;
153 edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
154 hw_resc = &bp->hw_resc;
155 total_vecs = idx + avail_msix;
156 if (bp->total_irqs < total_vecs ||
157 (BNXT_NEW_RM(bp) && hw_resc->resv_irqs < total_vecs)) {
158 if (netif_running(dev)) {
159 bnxt_close_nic(bp, true, false);
160 rc = bnxt_open_nic(bp, true, false);
161 } else {
162 rc = bnxt_reserve_rings(bp, true);
163 }
164 }
165 if (rc) {
166 edev->ulp_tbl[ulp_id].msix_requested = 0;
167 return -EAGAIN;
168 }
169
170 if (BNXT_NEW_RM(bp)) {
171 int resv_msix;
172
173 resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings;
174 avail_msix = min_t(int, resv_msix, avail_msix);
175 edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
176 }
177 bnxt_fill_msix_vecs(bp, ent);
178 edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
179 return avail_msix;
180 }
181
bnxt_free_msix_vecs(struct bnxt_en_dev * edev,int ulp_id)182 static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
183 {
184 struct net_device *dev = edev->net;
185 struct bnxt *bp = netdev_priv(dev);
186
187 ASSERT_RTNL();
188 if (ulp_id != BNXT_ROCE_ULP)
189 return -EINVAL;
190
191 if (!(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
192 return 0;
193
194 edev->ulp_tbl[ulp_id].msix_requested = 0;
195 edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
196 if (netif_running(dev) && !(edev->flags & BNXT_EN_FLAG_ULP_STOPPED)) {
197 bnxt_close_nic(bp, true, false);
198 bnxt_open_nic(bp, true, false);
199 }
200 return 0;
201 }
202
bnxt_get_ulp_msix_num(struct bnxt * bp)203 int bnxt_get_ulp_msix_num(struct bnxt *bp)
204 {
205 if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
206 struct bnxt_en_dev *edev = bp->edev;
207
208 return edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested;
209 }
210 return 0;
211 }
212
bnxt_get_ulp_msix_base(struct bnxt * bp)213 int bnxt_get_ulp_msix_base(struct bnxt *bp)
214 {
215 if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
216 struct bnxt_en_dev *edev = bp->edev;
217
218 if (edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested)
219 return edev->ulp_tbl[BNXT_ROCE_ULP].msix_base;
220 }
221 return 0;
222 }
223
bnxt_get_ulp_stat_ctxs(struct bnxt * bp)224 int bnxt_get_ulp_stat_ctxs(struct bnxt *bp)
225 {
226 if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
227 struct bnxt_en_dev *edev = bp->edev;
228
229 if (edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested)
230 return BNXT_MIN_ROCE_STAT_CTXS;
231 }
232
233 return 0;
234 }
235
bnxt_send_msg(struct bnxt_en_dev * edev,int ulp_id,struct bnxt_fw_msg * fw_msg)236 static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id,
237 struct bnxt_fw_msg *fw_msg)
238 {
239 struct net_device *dev = edev->net;
240 struct bnxt *bp = netdev_priv(dev);
241 struct output *resp;
242 struct input *req;
243 u32 resp_len;
244 int rc;
245
246 if (ulp_id != BNXT_ROCE_ULP && bp->fw_reset_state)
247 return -EBUSY;
248
249 rc = hwrm_req_init(bp, req, 0 /* don't care */);
250 if (rc)
251 return rc;
252
253 rc = hwrm_req_replace(bp, req, fw_msg->msg, fw_msg->msg_len);
254 if (rc)
255 return rc;
256
257 hwrm_req_timeout(bp, req, fw_msg->timeout);
258 resp = hwrm_req_hold(bp, req);
259 rc = hwrm_req_send(bp, req);
260 resp_len = le16_to_cpu(resp->resp_len);
261 if (resp_len) {
262 if (fw_msg->resp_max_len < resp_len)
263 resp_len = fw_msg->resp_max_len;
264
265 memcpy(fw_msg->resp, resp, resp_len);
266 }
267 hwrm_req_drop(bp, req);
268 return rc;
269 }
270
bnxt_ulp_get(struct bnxt_ulp * ulp)271 static void bnxt_ulp_get(struct bnxt_ulp *ulp)
272 {
273 atomic_inc(&ulp->ref_count);
274 }
275
bnxt_ulp_put(struct bnxt_ulp * ulp)276 static void bnxt_ulp_put(struct bnxt_ulp *ulp)
277 {
278 atomic_dec(&ulp->ref_count);
279 }
280
bnxt_ulp_stop(struct bnxt * bp)281 void bnxt_ulp_stop(struct bnxt *bp)
282 {
283 struct bnxt_en_dev *edev = bp->edev;
284 struct bnxt_ulp_ops *ops;
285 int i;
286
287 if (!edev)
288 return;
289
290 edev->flags |= BNXT_EN_FLAG_ULP_STOPPED;
291 for (i = 0; i < BNXT_MAX_ULP; i++) {
292 struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
293
294 ops = rtnl_dereference(ulp->ulp_ops);
295 if (!ops || !ops->ulp_stop)
296 continue;
297 ops->ulp_stop(ulp->handle);
298 }
299 }
300
bnxt_ulp_start(struct bnxt * bp,int err)301 void bnxt_ulp_start(struct bnxt *bp, int err)
302 {
303 struct bnxt_en_dev *edev = bp->edev;
304 struct bnxt_ulp_ops *ops;
305 int i;
306
307 if (!edev)
308 return;
309
310 edev->flags &= ~BNXT_EN_FLAG_ULP_STOPPED;
311
312 if (err)
313 return;
314
315 for (i = 0; i < BNXT_MAX_ULP; i++) {
316 struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
317
318 ops = rtnl_dereference(ulp->ulp_ops);
319 if (!ops || !ops->ulp_start)
320 continue;
321 ops->ulp_start(ulp->handle);
322 }
323 }
324
bnxt_ulp_sriov_cfg(struct bnxt * bp,int num_vfs)325 void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs)
326 {
327 struct bnxt_en_dev *edev = bp->edev;
328 struct bnxt_ulp_ops *ops;
329 int i;
330
331 if (!edev)
332 return;
333
334 for (i = 0; i < BNXT_MAX_ULP; i++) {
335 struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
336
337 rcu_read_lock();
338 ops = rcu_dereference(ulp->ulp_ops);
339 if (!ops || !ops->ulp_sriov_config) {
340 rcu_read_unlock();
341 continue;
342 }
343 bnxt_ulp_get(ulp);
344 rcu_read_unlock();
345 ops->ulp_sriov_config(ulp->handle, num_vfs);
346 bnxt_ulp_put(ulp);
347 }
348 }
349
bnxt_ulp_shutdown(struct bnxt * bp)350 void bnxt_ulp_shutdown(struct bnxt *bp)
351 {
352 struct bnxt_en_dev *edev = bp->edev;
353 struct bnxt_ulp_ops *ops;
354 int i;
355
356 if (!edev)
357 return;
358
359 for (i = 0; i < BNXT_MAX_ULP; i++) {
360 struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
361
362 ops = rtnl_dereference(ulp->ulp_ops);
363 if (!ops || !ops->ulp_shutdown)
364 continue;
365 ops->ulp_shutdown(ulp->handle);
366 }
367 }
368
bnxt_ulp_irq_stop(struct bnxt * bp)369 void bnxt_ulp_irq_stop(struct bnxt *bp)
370 {
371 struct bnxt_en_dev *edev = bp->edev;
372 struct bnxt_ulp_ops *ops;
373
374 if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
375 return;
376
377 if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
378 struct bnxt_ulp *ulp = &edev->ulp_tbl[BNXT_ROCE_ULP];
379
380 if (!ulp->msix_requested)
381 return;
382
383 ops = rtnl_dereference(ulp->ulp_ops);
384 if (!ops || !ops->ulp_irq_stop)
385 return;
386 ops->ulp_irq_stop(ulp->handle);
387 }
388 }
389
bnxt_ulp_irq_restart(struct bnxt * bp,int err)390 void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
391 {
392 struct bnxt_en_dev *edev = bp->edev;
393 struct bnxt_ulp_ops *ops;
394
395 if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
396 return;
397
398 if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
399 struct bnxt_ulp *ulp = &edev->ulp_tbl[BNXT_ROCE_ULP];
400 struct bnxt_msix_entry *ent = NULL;
401
402 if (!ulp->msix_requested)
403 return;
404
405 ops = rtnl_dereference(ulp->ulp_ops);
406 if (!ops || !ops->ulp_irq_restart)
407 return;
408
409 if (!err) {
410 ent = kcalloc(ulp->msix_requested, sizeof(*ent),
411 GFP_KERNEL);
412 if (!ent)
413 return;
414 bnxt_fill_msix_vecs(bp, ent);
415 }
416 ops->ulp_irq_restart(ulp->handle, ent);
417 kfree(ent);
418 }
419 }
420
bnxt_ulp_async_events(struct bnxt * bp,struct hwrm_async_event_cmpl * cmpl)421 void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl)
422 {
423 u16 event_id = le16_to_cpu(cmpl->event_id);
424 struct bnxt_en_dev *edev = bp->edev;
425 struct bnxt_ulp_ops *ops;
426 int i;
427
428 if (!edev)
429 return;
430
431 rcu_read_lock();
432 for (i = 0; i < BNXT_MAX_ULP; i++) {
433 struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
434
435 ops = rcu_dereference(ulp->ulp_ops);
436 if (!ops || !ops->ulp_async_notifier)
437 continue;
438 if (!ulp->async_events_bmap ||
439 event_id > ulp->max_async_event_id)
440 continue;
441
442 /* Read max_async_event_id first before testing the bitmap. */
443 smp_rmb();
444 if (test_bit(event_id, ulp->async_events_bmap))
445 ops->ulp_async_notifier(ulp->handle, cmpl);
446 }
447 rcu_read_unlock();
448 }
449
bnxt_register_async_events(struct bnxt_en_dev * edev,int ulp_id,unsigned long * events_bmap,u16 max_id)450 static int bnxt_register_async_events(struct bnxt_en_dev *edev, int ulp_id,
451 unsigned long *events_bmap, u16 max_id)
452 {
453 struct net_device *dev = edev->net;
454 struct bnxt *bp = netdev_priv(dev);
455 struct bnxt_ulp *ulp;
456
457 if (ulp_id >= BNXT_MAX_ULP)
458 return -EINVAL;
459
460 ulp = &edev->ulp_tbl[ulp_id];
461 ulp->async_events_bmap = events_bmap;
462 /* Make sure bnxt_ulp_async_events() sees this order */
463 smp_wmb();
464 ulp->max_async_event_id = max_id;
465 bnxt_hwrm_func_drv_rgtr(bp, events_bmap, max_id + 1, true);
466 return 0;
467 }
468
469 static const struct bnxt_en_ops bnxt_en_ops_tbl = {
470 .bnxt_register_device = bnxt_register_dev,
471 .bnxt_unregister_device = bnxt_unregister_dev,
472 .bnxt_request_msix = bnxt_req_msix_vecs,
473 .bnxt_free_msix = bnxt_free_msix_vecs,
474 .bnxt_send_fw_msg = bnxt_send_msg,
475 .bnxt_register_fw_async_events = bnxt_register_async_events,
476 };
477
bnxt_ulp_probe(struct net_device * dev)478 struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev)
479 {
480 struct bnxt *bp = netdev_priv(dev);
481 struct bnxt_en_dev *edev;
482
483 edev = bp->edev;
484 if (!edev) {
485 edev = kzalloc(sizeof(*edev), GFP_KERNEL);
486 if (!edev)
487 return ERR_PTR(-ENOMEM);
488 edev->en_ops = &bnxt_en_ops_tbl;
489 edev->net = dev;
490 edev->pdev = bp->pdev;
491 edev->l2_db_size = bp->db_size;
492 edev->l2_db_size_nc = bp->db_size;
493 bp->edev = edev;
494 }
495 edev->flags &= ~BNXT_EN_FLAG_ROCE_CAP;
496 if (bp->flags & BNXT_FLAG_ROCEV1_CAP)
497 edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP;
498 if (bp->flags & BNXT_FLAG_ROCEV2_CAP)
499 edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP;
500 return bp->edev;
501 }
502 EXPORT_SYMBOL(bnxt_ulp_probe);
503