Home
last modified time | relevance | path

Searched refs:rcd (Results 1 – 25 of 56) sorted by relevance

123

/linux/drivers/infiniband/hw/hfi1/
A Dinit.c179 rcd->dd->rcd[rcd->ctxt] = NULL; in hfi1_rcd_free()
182 hfi1_free_ctxtdata(rcd->dd, rcd); in hfi1_rcd_free()
238 dd->rcd[ctxt] = rcd; in allocate_rcd_index()
291 rcd = dd->rcd[ctxt]; in hfi1_rcd_get_by_index()
337 rcd->do_interrupt = rcd->slow_handler; in hfi1_create_ctxtdata()
404 rcd->ctxt, rcd->egrbufs.count); in hfi1_create_ctxtdata()
436 rcd->ctxt, rcd->egrbufs.size); in hfi1_create_ctxtdata()
1095 rcd->rcvhdrq, rcd->rcvhdrq_dma); in hfi1_free_ctxtdata()
1794 set_hdrq_regs(rcd->dd, rcd->ctxt, rcd->rcvhdrqentsize, in hfi1_create_rcvhdrq()
1939 rcd->ctxt, rcd->egrbufs.alloced, in hfi1_setup_eagerbufs()
[all …]
A Daspm.c172 struct hfi1_ctxtdata *rcd = from_timer(rcd, t, aspm_timer); in aspm_ctx_timer_function() local
176 aspm_enable_dec(rcd->dd); in aspm_ctx_timer_function()
193 if (rcd) { in aspm_disable_all()
198 hfi1_rcd_put(rcd); in aspm_disable_all()
220 if (rcd) { in aspm_enable_all()
225 hfi1_rcd_put(rcd); in aspm_enable_all()
234 rcd->aspm_intr_supported = rcd->dd->aspm_supported && in aspm_ctx_init()
236 rcd->ctxt < rcd->dd->first_dyn_alloc_ctxt; in aspm_ctx_init()
249 if (rcd) in aspm_init()
250 aspm_ctx_init(rcd); in aspm_init()
[all …]
A Ddriver.c379 packet->rcd = rcd; in init_packet()
510 struct hfi1_ctxtdata *rcd = packet->rcd; in init_ps_mdata() local
512 mdata->rcd = rcd; in init_ps_mdata()
578 struct hfi1_ctxtdata *rcd = packet->rcd; in __prescan_rxq() local
644 struct hfi1_ctxtdata *rcd = packet->rcd; in process_rcv_qp_work() local
948 rcd->do_interrupt = rcd->slow_handler; in set_all_slowpath()
1540 struct hfi1_ctxtdata *rcd = packet->rcd; in hfi1_setup_bypass_packet() local
1613 struct hfi1_ctxtdata *rcd = packet->rcd; in show_eflags_errs() local
1631 struct hfi1_ctxtdata *rcd = packet->rcd; in handle_eflags() local
1633 rcv_hdrerr(rcd, rcd->ppd, packet); in handle_eflags()
[all …]
A Dexp_rcv.c26 hfi1_exp_tid_set_init(&rcd->tid_used_list); in hfi1_exp_tid_group_init()
36 struct hfi1_devdata *dd = rcd->dd; in hfi1_alloc_ctxt_rcv_groups()
43 rcd->groups = in hfi1_alloc_ctxt_rcv_groups()
45 GFP_KERNEL, rcd->numa_id); in hfi1_alloc_ctxt_rcv_groups()
46 if (!rcd->groups) in hfi1_alloc_ctxt_rcv_groups()
48 tidbase = rcd->expected_base; in hfi1_alloc_ctxt_rcv_groups()
50 grp = &rcd->groups[i]; in hfi1_alloc_ctxt_rcv_groups()
73 kfree(rcd->groups); in hfi1_free_ctxt_rcv_groups()
74 rcd->groups = NULL; in hfi1_free_ctxt_rcv_groups()
75 hfi1_exp_tid_group_init(rcd); in hfi1_free_ctxt_rcv_groups()
[all …]
A Dmsix.c131 int nr = msix_request_irq(rcd->dd, rcd, handler, thread, in msix_request_rcd_irq_common()
132 rcd->is_vnic ? IRQ_NETDEVCTXT : IRQ_RCVCTXT, in msix_request_rcd_irq_common()
141 rcd->ireg = (IS_RCVAVAIL_START + rcd->ctxt) / 64; in msix_request_rcd_irq_common()
142 rcd->imask = ((u64)1) << ((IS_RCVAVAIL_START + rcd->ctxt) % 64); in msix_request_rcd_irq_common()
143 rcd->msix_intr = nr; in msix_request_rcd_irq_common()
144 remap_intr(rcd->dd, IS_RCVAVAIL_START + rcd->ctxt, nr); in msix_request_rcd_irq_common()
159 rcd->dd->unit, rcd->ctxt); in msix_request_rcd_irq()
175 rcd->dd->unit, rcd->ctxt); in msix_netdev_request_rcd_irq()
270 if (rcd) in msix_request_irqs()
271 ret = msix_request_rcd_irq(rcd); in msix_request_irqs()
[all …]
A Dhfi.h1467 return rcd->head; in hfi1_rcd_head()
1477 rcd->head = head; in hfi1_set_rcd_head()
1483 return (__le32 *)rcd->rcvhdrq + rcd->head + rcd->rhf_offset; in get_rhf_addr()
1513 return rcd->seq_cnt; in hfi1_seq_cnt()
1524 rcd->seq_cnt = cnt; in hfi1_set_seq_cnt()
1548 rcd->seq_cnt = hfi1_seq_incr_wrap(rcd->seq_cnt); in hfi1_seq_incr()
1576 return rcd->do_interrupt == rcd->slow_handler; in hfi1_is_slowpath()
1588 return rcd->do_interrupt == rcd->fast_handler; in hfi1_is_fastpath()
1597 if (unlikely(!rcd)) in hfi1_set_fast()
1600 rcd->do_interrupt = rcd->fast_handler; in hfi1_set_fast()
[all …]
A Dtid_rdma.c306 hfi1_set_ctxt_jkey(rcd->dd, rcd, rcd->jkey); in hfi1_kern_exp_rcv_init()
750 write_uctxt_csr(rcd->dd, rcd->ctxt, in kern_set_hw_flow()
809 fqp = first_qp(rcd, &rcd->flow_queue); in hfi1_kern_setup_hw_flow()
837 fqp = first_qp(rcd, &rcd->flow_queue); in hfi1_kern_clear_hw_flow()
1466 struct hfi1_ctxtdata *rcd = req->rcd; in hfi1_kern_exp_rcv_setup() local
1530 fqp = first_qp(rcd, &rcd->rarr_queue); in hfi1_kern_exp_rcv_setup()
1557 struct hfi1_ctxtdata *rcd = req->rcd; in hfi1_kern_exp_rcv_clear() local
1574 fqp = first_qp(rcd, &rcd->rarr_queue); in hfi1_kern_exp_rcv_clear()
1670 req->rcd = qpriv->rcd; in hfi1_init_trdma_req()
3468 struct hfi1_ctxtdata *rcd = qpriv->rcd; in hfi1_tid_write_alloc_resources() local
[all …]
A Dnetdev_rx.c209 hfi1_rcd_get(rxq->rcd); in hfi1_netdev_rxq_init()
213 i, rxq->rcd->ctxt); in hfi1_netdev_rxq_init()
232 if (rxq->rcd) { in hfi1_netdev_rxq_init()
234 hfi1_rcd_put(rxq->rcd); in hfi1_netdev_rxq_init()
235 rxq->rcd = NULL; in hfi1_netdev_rxq_init()
254 hfi1_rcd_put(rxq->rcd); in hfi1_netdev_rxq_deinit()
255 rxq->rcd = NULL; in hfi1_netdev_rxq_deinit()
271 rxq->rcd->ctxt); in enable_queues()
275 rxq->rcd); in enable_queues()
289 rxq->rcd->ctxt); in disable_queues()
[all …]
A Dintr.c202 void handle_user_interrupt(struct hfi1_ctxtdata *rcd) in handle_user_interrupt() argument
204 struct hfi1_devdata *dd = rcd->dd; in handle_user_interrupt()
208 if (bitmap_empty(rcd->in_use_ctxts, HFI1_MAX_SHARED_CTXTS)) in handle_user_interrupt()
211 if (test_and_clear_bit(HFI1_CTXT_WAITING_RCV, &rcd->event_flags)) { in handle_user_interrupt()
212 wake_up_interruptible(&rcd->wait); in handle_user_interrupt()
213 hfi1_rcvctrl(dd, HFI1_RCVCTRL_INTRAVAIL_DIS, rcd); in handle_user_interrupt()
215 &rcd->event_flags)) { in handle_user_interrupt()
216 rcd->urgent++; in handle_user_interrupt()
217 wake_up_interruptible(&rcd->wait); in handle_user_interrupt()
A Dtrace_rx.h27 TP_STRUCT__entry(DD_DEV_ENTRY(packet->rcd->dd)
36 TP_fast_assign(DD_DEV_ASSIGN(packet->rcd->dd);
38 __entry->ctxt = packet->rcd->ctxt;
59 TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd),
60 TP_ARGS(dd, rcd),
67 __entry->ctxt = rcd->ctxt;
68 __entry->slow_path = hfi1_is_slowpath(rcd);
69 __entry->dma_rtail = get_dma_rtail_setting(rcd);
A Dexp_rcv.h150 hfi1_tid_group_to_idx(struct hfi1_ctxtdata *rcd, struct tid_group *grp) in hfi1_tid_group_to_idx() argument
152 return grp - &rcd->groups[0]; in hfi1_tid_group_to_idx()
161 hfi1_idx_to_tid_group(struct hfi1_ctxtdata *rcd, u16 idx) in hfi1_idx_to_tid_group() argument
163 return &rcd->groups[idx]; in hfi1_idx_to_tid_group()
166 int hfi1_alloc_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd);
167 void hfi1_free_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd);
168 void hfi1_exp_tid_group_init(struct hfi1_ctxtdata *rcd);
A Daspm.h22 void __aspm_ctx_disable(struct hfi1_ctxtdata *rcd);
26 static inline void aspm_ctx_disable(struct hfi1_ctxtdata *rcd) in aspm_ctx_disable() argument
29 if (likely(!rcd->aspm_intr_supported)) in aspm_ctx_disable()
32 __aspm_ctx_disable(rcd); in aspm_ctx_disable()
A Ddebugfs.c98 struct hfi1_ctxtdata *rcd; in _opcode_stats_seq_show() local
101 rcd = hfi1_rcd_get_by_index(dd, j); in _opcode_stats_seq_show()
102 if (rcd) { in _opcode_stats_seq_show()
106 hfi1_rcd_put(rcd); in _opcode_stats_seq_show()
189 struct hfi1_ctxtdata *rcd; in _ctx_stats_seq_show() local
200 if (!rcd) in _ctx_stats_seq_show()
206 hfi1_rcd_put(rcd); in _ctx_stats_seq_show()
354 struct hfi1_ctxtdata *rcd; in _rcds_seq_show() local
359 if (rcd) in _rcds_seq_show()
360 seqfile_dump_rcd(s, rcd); in _rcds_seq_show()
[all …]
A Dfault.c51 struct hfi1_ctxtdata *rcd; in _fault_stats_seq_show() local
54 rcd = hfi1_rcd_get_by_index(dd, j); in _fault_stats_seq_show()
55 if (rcd) { in _fault_stats_seq_show()
56 n_packets += rcd->opstats->stats[i].n_packets; in _fault_stats_seq_show()
57 n_bytes += rcd->opstats->stats[i].n_bytes; in _fault_stats_seq_show()
59 hfi1_rcd_put(rcd); in _fault_stats_seq_show()
322 struct hfi1_ibdev *ibd = &packet->rcd->dd->verbs_dev; in hfi1_dbg_should_fault_rx()
A Dtid_rdma.h99 struct hfi1_ctxtdata *rcd; member
209 int hfi1_kern_exp_rcv_init(struct hfi1_ctxtdata *rcd, int reinit);
236 int hfi1_kern_setup_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp);
237 void hfi1_kern_clear_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp);
238 void hfi1_kern_init_ctxt_generations(struct hfi1_ctxtdata *rcd);
255 bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
/linux/drivers/infiniband/hw/qib/
A Dqib_file_ops.c1030 (rcd->piocnt % rcd->subctxt_cnt); in qib_mmapf()
1091 if (rcd->urgent != rcd->urgent_poll) { in qib_poll_urgent()
1093 rcd->urgent_poll = rcd->urgent; in qib_poll_urgent()
1493 if (!rcd || !rcd->cnt) in find_shared_ctxt()
1501 rcd->cnt >= rcd->subctxt_cnt) { in find_shared_ctxt()
1565 struct qib_ctxtdata *rcd = fd->rcd; in do_qib_user_sdma_queue_create() local
1670 rcd->pio_base = rcd->piocnt * uctxt; in qib_do_user_init()
1673 rcd->pio_base = rcd->piocnt * uctxt + in qib_do_user_init()
1693 rcd->ctxt, rcd->piocnt); in qib_do_user_init()
1803 rcd = fd->rcd; in qib_close()
[all …]
A Dqib_init.c172 if (rcd) { in qib_create_ctxtdata()
179 dd->rcd[ctxt] = rcd; in qib_create_ctxtdata()
182 rcd->opstats = kzalloc_node(sizeof(*rcd->opstats), in qib_create_ctxtdata()
208 rcd->rcvegrbuf_chunks = (rcd->rcvegrcnt + in qib_create_ctxtdata()
487 struct qib_ctxtdata *rcd = dd->rcd[i]; in enable_chip() local
489 if (rcd) in enable_chip()
674 rcd = dd->rcd[i]; in qib_init()
907 if (!rcd) in qib_free_ctxtdata()
912 rcd->rcvhdrq, rcd->rcvhdrq_phys); in qib_free_ctxtdata()
1592 memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size); in qib_create_rcvhdrq()
[all …]
A Dqib_tx.c87 last = rcd->pio_base + rcd->piocnt; in qib_disarm_piobufs_ifneeded()
93 if (rcd->user_event_mask) { in qib_disarm_piobufs_ifneeded()
134 struct qib_ctxtdata *rcd; in find_ctxt() local
140 rcd = dd->rcd[ctxt]; in find_ctxt()
141 if (!rcd || bufn < rcd->pio_base || in find_ctxt()
142 bufn >= rcd->pio_base + rcd->piocnt) in find_ctxt()
457 struct qib_ctxtdata *rcd; in qib_cancel_sends() local
473 rcd = dd->rcd[ctxt]; in qib_cancel_sends()
474 if (rcd && rcd->ppd == ppd) { in qib_cancel_sends()
475 last = rcd->pio_base + rcd->piocnt; in qib_cancel_sends()
[all …]
A Dqib_driver.c285 return rcd->rcvegrbuf[chunk] + (idx << rcd->dd->rcvegrbufsize_shift); in qib_get_egrbuf()
406 &rcd->qp_wait_list); in qib_rcv_hdrerr()
456 l = rcd->head; in qib_kreceive()
461 if (seq != rcd->seq_cnt) in qib_kreceive()
509 crcs += qib_rcv_hdrerr(rcd, ppd, rcd->ctxt, eflags, l, in qib_kreceive()
529 if (++rcd->seq_cnt > 13) in qib_kreceive()
530 rcd->seq_cnt = 1; in qib_kreceive()
531 if (seq != rcd->seq_cnt) in qib_kreceive()
548 rcd->head = l; in qib_kreceive()
763 if (dd->rcd) in qib_reset_device()
[all …]
A Dqib_intr.c191 struct qib_ctxtdata *rcd; in qib_handle_urcv() local
196 for (i = dd->first_user_ctxt; dd->rcd && i < dd->cfgctxts; i++) { in qib_handle_urcv()
199 rcd = dd->rcd[i]; in qib_handle_urcv()
200 if (!rcd || !rcd->cnt) in qib_handle_urcv()
203 if (test_and_clear_bit(QIB_CTXT_WAITING_RCV, &rcd->flag)) { in qib_handle_urcv()
204 wake_up_interruptible(&rcd->wait); in qib_handle_urcv()
205 dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_DIS, in qib_handle_urcv()
206 rcd->ctxt); in qib_handle_urcv()
208 &rcd->flag)) { in qib_handle_urcv()
209 rcd->urgent++; in qib_handle_urcv()
[all …]
A Dqib_debugfs.c103 if (!dd->rcd[j]) in _opcode_stats_seq_show()
105 n_packets += dd->rcd[j]->opstats->stats[i].n_packets; in _opcode_stats_seq_show()
106 n_bytes += dd->rcd[j]->opstats->stats[i].n_bytes; in _opcode_stats_seq_show()
166 if (!dd->rcd[i]) in _ctx_stats_seq_show()
169 for (j = 0; j < ARRAY_SIZE(dd->rcd[i]->opstats->stats); j++) in _ctx_stats_seq_show()
170 n_packets += dd->rcd[i]->opstats->stats[j].n_packets; in _ctx_stats_seq_show()
/linux/arch/x86/kernel/cpu/mce/
A Dapei.c135 struct cper_mce_record rcd; in apei_write_mce() local
137 memset(&rcd, 0, sizeof(rcd)); in apei_write_mce()
141 rcd.hdr.section_count = 1; in apei_write_mce()
144 rcd.hdr.validation_bits = 0; in apei_write_mce()
145 rcd.hdr.record_length = sizeof(rcd); in apei_write_mce()
151 rcd.sec_hdr.section_offset = (void *)&rcd.mce - (void *)&rcd; in apei_write_mce()
152 rcd.sec_hdr.section_length = sizeof(rcd.mce); in apei_write_mce()
162 return erst_write(&rcd.hdr); in apei_write_mce()
167 struct cper_mce_record rcd; in apei_read_mce() local
180 rc = erst_read(*record_id, &rcd.hdr, sizeof(rcd)); in apei_read_mce()
[all …]
/linux/drivers/acpi/apei/
A Derst.c977 struct cper_pstore_record *rcd; in erst_reader() local
984 if (!rcd) { in erst_reader()
1015 memcpy(record->buf, rcd->data, len - sizeof(*rcd)); in erst_reader()
1036 kfree(rcd); in erst_reader()
1046 memset(rcd, 0, sizeof(*rcd)); in erst_writer()
1050 rcd->hdr.section_count = 1; in erst_writer()
1055 rcd->hdr.record_length = sizeof(*rcd) + record->size; in erst_writer()
1061 rcd->sec_hdr.section_offset = sizeof(*rcd); in erst_writer()
1065 rcd->sec_hdr.validation_bits = 0; in erst_writer()
1082 ret = erst_write(&rcd->hdr); in erst_writer()
[all …]
/linux/drivers/net/vmxnet3/
A Dvmxnet3_drv.c305 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd) argument
1231 WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) && in vmxnet3_rx_csum()
1240 WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) && in vmxnet3_rx_csum()
1265 if (!rcd->fcs) in vmxnet3_rx_error()
1381 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 && in vmxnet3_rq_rx_complete()
1383 idx = rcd->rxdIdx; in vmxnet3_rq_rx_complete()
1393 if (unlikely(rcd->eop && rcd->err)) { in vmxnet3_rq_rx_complete()
1411 BUG_ON(!(rcd->sop && rcd->eop)); in vmxnet3_rq_rx_complete()
1532 if (rcd->len) { in vmxnet3_rq_rx_complete()
1576 if (rcd->eop) { in vmxnet3_rq_rx_complete()
[all …]
/linux/drivers/cpufreq/
A Dsa1110-cpufreq.c130 static inline void set_mdcas(u_int *mdcas, int delayed, u_int rcd) in set_mdcas() argument
134 rcd = 2 * rcd - 1; in set_mdcas()
135 shift = delayed + 1 + rcd; in set_mdcas()
137 mdcas[0] = (1 << rcd) - 1; in set_mdcas()

Completed in 70 milliseconds

123