Lines Matching refs:rinfo

218 	struct blkfront_ring_info *rinfo;  member
263 static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo);
268 for ((ptr) = (info)->rinfo, (idx) = 0; \
276 return (void *)info->rinfo + i * info->rinfo_size; in get_rinfo()
279 static int get_id_from_freelist(struct blkfront_ring_info *rinfo) in get_id_from_freelist() argument
281 unsigned long free = rinfo->shadow_free; in get_id_from_freelist()
283 BUG_ON(free >= BLK_RING_SIZE(rinfo->dev_info)); in get_id_from_freelist()
284 rinfo->shadow_free = rinfo->shadow[free].req.u.rw.id; in get_id_from_freelist()
285 rinfo->shadow[free].req.u.rw.id = 0x0fffffee; /* debug */ in get_id_from_freelist()
289 static int add_id_to_freelist(struct blkfront_ring_info *rinfo, in add_id_to_freelist() argument
292 if (rinfo->shadow[id].req.u.rw.id != id) in add_id_to_freelist()
294 if (rinfo->shadow[id].request == NULL) in add_id_to_freelist()
296 rinfo->shadow[id].req.u.rw.id = rinfo->shadow_free; in add_id_to_freelist()
297 rinfo->shadow[id].request = NULL; in add_id_to_freelist()
298 rinfo->shadow_free = id; in add_id_to_freelist()
302 static int fill_grant_buffer(struct blkfront_ring_info *rinfo, int num) in fill_grant_buffer() argument
304 struct blkfront_info *info = rinfo->dev_info; in fill_grant_buffer()
324 list_add(&gnt_list_entry->node, &rinfo->grants); in fill_grant_buffer()
332 &rinfo->grants, node) { in fill_grant_buffer()
343 static struct grant *get_free_grant(struct blkfront_ring_info *rinfo) in get_free_grant() argument
347 BUG_ON(list_empty(&rinfo->grants)); in get_free_grant()
348 gnt_list_entry = list_first_entry(&rinfo->grants, struct grant, in get_free_grant()
353 rinfo->persistent_gnts_c--; in get_free_grant()
369 struct blkfront_ring_info *rinfo) in get_grant() argument
371 struct grant *gnt_list_entry = get_free_grant(rinfo); in get_grant()
372 struct blkfront_info *info = rinfo->dev_info; in get_grant()
393 struct blkfront_ring_info *rinfo) in get_indirect_grant() argument
395 struct grant *gnt_list_entry = get_free_grant(rinfo); in get_indirect_grant()
396 struct blkfront_info *info = rinfo->dev_info; in get_indirect_grant()
408 BUG_ON(list_empty(&rinfo->indirect_pages)); in get_indirect_grant()
409 indirect_page = list_first_entry(&rinfo->indirect_pages, in get_indirect_grant()
485 struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)arg; in blkif_restart_queue_callback() local
486 schedule_work(&rinfo->work); in blkif_restart_queue_callback()
525 static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo, in blkif_ring_get_request() argument
531 *ring_req = RING_GET_REQUEST(&rinfo->ring, rinfo->ring.req_prod_pvt); in blkif_ring_get_request()
532 rinfo->ring.req_prod_pvt++; in blkif_ring_get_request()
534 id = get_id_from_freelist(rinfo); in blkif_ring_get_request()
535 rinfo->shadow[id].request = req; in blkif_ring_get_request()
536 rinfo->shadow[id].status = REQ_PROCESSING; in blkif_ring_get_request()
537 rinfo->shadow[id].associated_id = NO_ASSOCIATED_ID; in blkif_ring_get_request()
539 rinfo->shadow[id].req.u.rw.id = id; in blkif_ring_get_request()
544 static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_info *rinfo) in blkif_queue_discard_req() argument
546 struct blkfront_info *info = rinfo->dev_info; in blkif_queue_discard_req()
551 id = blkif_ring_get_request(rinfo, req, &final_ring_req); in blkif_queue_discard_req()
552 ring_req = &rinfo->shadow[id].req; in blkif_queue_discard_req()
565 rinfo->shadow[id].status = REQ_WAITING; in blkif_queue_discard_req()
573 struct blkfront_ring_info *rinfo; member
596 struct blkfront_ring_info *rinfo = setup->rinfo; in blkif_setup_rw_req_grant() local
603 struct blk_shadow *shadow = &rinfo->shadow[setup->id]; in blkif_setup_rw_req_grant()
621 gnt_list_entry = get_indirect_grant(&setup->gref_head, rinfo); in blkif_setup_rw_req_grant()
627 gnt_list_entry = get_grant(&setup->gref_head, gfn, rinfo); in blkif_setup_rw_req_grant()
694 static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *rinfo) in blkif_queue_rw_req() argument
696 struct blkfront_info *info = rinfo->dev_info; in blkif_queue_rw_req()
705 .rinfo = rinfo, in blkif_queue_rw_req()
727 if (rinfo->persistent_gnts_c < max_grefs) { in blkif_queue_rw_req()
731 max_grefs - rinfo->persistent_gnts_c, in blkif_queue_rw_req()
734 &rinfo->callback, in blkif_queue_rw_req()
736 rinfo, in blkif_queue_rw_req()
737 max_grefs - rinfo->persistent_gnts_c); in blkif_queue_rw_req()
743 id = blkif_ring_get_request(rinfo, req, &final_ring_req); in blkif_queue_rw_req()
744 ring_req = &rinfo->shadow[id].req; in blkif_queue_rw_req()
746 num_sg = blk_rq_map_sg(req->q, req, rinfo->shadow[id].sg); in blkif_queue_rw_req()
749 for_each_sg(rinfo->shadow[id].sg, sg, num_sg, i) in blkif_queue_rw_req()
756 rinfo->shadow[id].num_sg = num_sg; in blkif_queue_rw_req()
794 extra_id = blkif_ring_get_request(rinfo, req, in blkif_queue_rw_req()
796 extra_ring_req = &rinfo->shadow[extra_id].req; in blkif_queue_rw_req()
802 rinfo->shadow[extra_id].num_sg = 0; in blkif_queue_rw_req()
807 rinfo->shadow[extra_id].associated_id = id; in blkif_queue_rw_req()
808 rinfo->shadow[id].associated_id = extra_id; in blkif_queue_rw_req()
819 for_each_sg(rinfo->shadow[id].sg, sg, num_sg, i) { in blkif_queue_rw_req()
841 rinfo->shadow[id].status = REQ_WAITING; in blkif_queue_rw_req()
844 rinfo->shadow[extra_id].status = REQ_WAITING; in blkif_queue_rw_req()
859 static int blkif_queue_request(struct request *req, struct blkfront_ring_info *rinfo) in blkif_queue_request() argument
861 if (unlikely(rinfo->dev_info->connected != BLKIF_STATE_CONNECTED)) in blkif_queue_request()
866 return blkif_queue_discard_req(req, rinfo); in blkif_queue_request()
868 return blkif_queue_rw_req(req, rinfo); in blkif_queue_request()
871 static inline void flush_requests(struct blkfront_ring_info *rinfo) in flush_requests() argument
875 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&rinfo->ring, notify); in flush_requests()
878 notify_remote_via_irq(rinfo->irq); in flush_requests()
897 struct blkfront_ring_info *rinfo = NULL; in blkif_queue_rq() local
899 rinfo = get_rinfo(info, qid); in blkif_queue_rq()
901 spin_lock_irqsave(&rinfo->ring_lock, flags); in blkif_queue_rq()
902 if (RING_FULL(&rinfo->ring)) in blkif_queue_rq()
905 if (blkif_request_flush_invalid(qd->rq, rinfo->dev_info)) in blkif_queue_rq()
908 if (blkif_queue_request(qd->rq, rinfo)) in blkif_queue_rq()
911 flush_requests(rinfo); in blkif_queue_rq()
912 spin_unlock_irqrestore(&rinfo->ring_lock, flags); in blkif_queue_rq()
916 spin_unlock_irqrestore(&rinfo->ring_lock, flags); in blkif_queue_rq()
921 spin_unlock_irqrestore(&rinfo->ring_lock, flags); in blkif_queue_rq()
1179 static inline void kick_pending_request_queues_locked(struct blkfront_ring_info *rinfo) in kick_pending_request_queues_locked() argument
1181 if (!RING_FULL(&rinfo->ring)) in kick_pending_request_queues_locked()
1182 blk_mq_start_stopped_hw_queues(rinfo->dev_info->rq, true); in kick_pending_request_queues_locked()
1185 static void kick_pending_request_queues(struct blkfront_ring_info *rinfo) in kick_pending_request_queues() argument
1189 spin_lock_irqsave(&rinfo->ring_lock, flags); in kick_pending_request_queues()
1190 kick_pending_request_queues_locked(rinfo); in kick_pending_request_queues()
1191 spin_unlock_irqrestore(&rinfo->ring_lock, flags); in kick_pending_request_queues()
1196 struct blkfront_ring_info *rinfo = container_of(work, struct blkfront_ring_info, work); in blkif_restart_queue() local
1198 if (rinfo->dev_info->connected == BLKIF_STATE_CONNECTED) in blkif_restart_queue()
1199 kick_pending_request_queues(rinfo); in blkif_restart_queue()
1202 static void blkif_free_ring(struct blkfront_ring_info *rinfo) in blkif_free_ring() argument
1205 struct blkfront_info *info = rinfo->dev_info; in blkif_free_ring()
1212 if (!list_empty(&rinfo->indirect_pages)) { in blkif_free_ring()
1216 list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) { in blkif_free_ring()
1223 if (!list_empty(&rinfo->grants)) { in blkif_free_ring()
1225 &rinfo->grants, node) { in blkif_free_ring()
1230 rinfo->persistent_gnts_c--; in blkif_free_ring()
1237 BUG_ON(rinfo->persistent_gnts_c != 0); in blkif_free_ring()
1244 if (!rinfo->shadow[i].request) in blkif_free_ring()
1247 segs = rinfo->shadow[i].req.operation == BLKIF_OP_INDIRECT ? in blkif_free_ring()
1248 rinfo->shadow[i].req.u.indirect.nr_segments : in blkif_free_ring()
1249 rinfo->shadow[i].req.u.rw.nr_segments; in blkif_free_ring()
1251 persistent_gnt = rinfo->shadow[i].grants_used[j]; in blkif_free_ring()
1258 if (rinfo->shadow[i].req.operation != BLKIF_OP_INDIRECT) in blkif_free_ring()
1266 persistent_gnt = rinfo->shadow[i].indirect_grants[j]; in blkif_free_ring()
1273 kvfree(rinfo->shadow[i].grants_used); in blkif_free_ring()
1274 rinfo->shadow[i].grants_used = NULL; in blkif_free_ring()
1275 kvfree(rinfo->shadow[i].indirect_grants); in blkif_free_ring()
1276 rinfo->shadow[i].indirect_grants = NULL; in blkif_free_ring()
1277 kvfree(rinfo->shadow[i].sg); in blkif_free_ring()
1278 rinfo->shadow[i].sg = NULL; in blkif_free_ring()
1282 gnttab_cancel_free_callback(&rinfo->callback); in blkif_free_ring()
1285 flush_work(&rinfo->work); in blkif_free_ring()
1289 if (rinfo->ring_ref[i] != GRANT_INVALID_REF) { in blkif_free_ring()
1290 gnttab_end_foreign_access(rinfo->ring_ref[i], 0, 0); in blkif_free_ring()
1291 rinfo->ring_ref[i] = GRANT_INVALID_REF; in blkif_free_ring()
1294 free_pages((unsigned long)rinfo->ring.sring, get_order(info->nr_ring_pages * XEN_PAGE_SIZE)); in blkif_free_ring()
1295 rinfo->ring.sring = NULL; in blkif_free_ring()
1297 if (rinfo->irq) in blkif_free_ring()
1298 unbind_from_irqhandler(rinfo->irq, rinfo); in blkif_free_ring()
1299 rinfo->evtchn = rinfo->irq = 0; in blkif_free_ring()
1305 struct blkfront_ring_info *rinfo; in blkif_free() local
1314 for_each_rinfo(info, rinfo, i) in blkif_free()
1315 blkif_free_ring(rinfo); in blkif_free()
1317 kvfree(info->rinfo); in blkif_free()
1318 info->rinfo = NULL; in blkif_free()
1379 struct blkfront_ring_info *rinfo, in blkif_completion() argument
1385 struct blkfront_info *info = rinfo->dev_info; in blkif_completion()
1386 struct blk_shadow *s = &rinfo->shadow[*id]; in blkif_completion()
1396 struct blk_shadow *s2 = &rinfo->shadow[s->associated_id]; in blkif_completion()
1428 if (add_id_to_freelist(rinfo, s->associated_id)) in blkif_completion()
1464 list_add(&s->grants_used[i]->node, &rinfo->grants); in blkif_completion()
1465 rinfo->persistent_gnts_c++; in blkif_completion()
1475 list_add_tail(&s->grants_used[i]->node, &rinfo->grants); in blkif_completion()
1484 list_add(&s->indirect_grants[i]->node, &rinfo->grants); in blkif_completion()
1485 rinfo->persistent_gnts_c++; in blkif_completion()
1496 list_add(&indirect_page->lru, &rinfo->indirect_pages); in blkif_completion()
1499 list_add_tail(&s->indirect_grants[i]->node, &rinfo->grants); in blkif_completion()
1513 struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id; in blkif_interrupt() local
1514 struct blkfront_info *info = rinfo->dev_info; in blkif_interrupt()
1522 spin_lock_irqsave(&rinfo->ring_lock, flags); in blkif_interrupt()
1524 rp = READ_ONCE(rinfo->ring.sring->rsp_prod); in blkif_interrupt()
1526 if (RING_RESPONSE_PROD_OVERFLOW(&rinfo->ring, rp)) { in blkif_interrupt()
1528 info->gd->disk_name, rp - rinfo->ring.rsp_cons); in blkif_interrupt()
1532 for (i = rinfo->ring.rsp_cons; i != rp; i++) { in blkif_interrupt()
1538 RING_COPY_RESPONSE(&rinfo->ring, i, &bret); in blkif_interrupt()
1551 if (rinfo->shadow[id].status != REQ_WAITING) { in blkif_interrupt()
1557 rinfo->shadow[id].status = REQ_PROCESSING; in blkif_interrupt()
1558 req = rinfo->shadow[id].request; in blkif_interrupt()
1560 op = rinfo->shadow[id].req.operation; in blkif_interrupt()
1562 op = rinfo->shadow[id].req.u.indirect.indirect_op; in blkif_interrupt()
1574 if (!blkif_completion(&id, rinfo, &bret)) in blkif_interrupt()
1578 if (add_id_to_freelist(rinfo, id)) { in blkif_interrupt()
1611 rinfo->shadow[id].req.u.rw.nr_segments == 0)) { in blkif_interrupt()
1640 rinfo->ring.rsp_cons = i; in blkif_interrupt()
1642 if (i != rinfo->ring.req_prod_pvt) { in blkif_interrupt()
1644 RING_FINAL_CHECK_FOR_RESPONSES(&rinfo->ring, more_to_do); in blkif_interrupt()
1648 rinfo->ring.sring->rsp_event = i + 1; in blkif_interrupt()
1650 kick_pending_request_queues_locked(rinfo); in blkif_interrupt()
1652 spin_unlock_irqrestore(&rinfo->ring_lock, flags); in blkif_interrupt()
1661 spin_unlock_irqrestore(&rinfo->ring_lock, flags); in blkif_interrupt()
1671 struct blkfront_ring_info *rinfo) in setup_blkring() argument
1675 struct blkfront_info *info = rinfo->dev_info; in setup_blkring()
1680 rinfo->ring_ref[i] = GRANT_INVALID_REF; in setup_blkring()
1689 FRONT_RING_INIT(&rinfo->ring, sring, ring_size); in setup_blkring()
1691 err = xenbus_grant_ring(dev, rinfo->ring.sring, info->nr_ring_pages, gref); in setup_blkring()
1694 rinfo->ring.sring = NULL; in setup_blkring()
1698 rinfo->ring_ref[i] = gref[i]; in setup_blkring()
1700 err = xenbus_alloc_evtchn(dev, &rinfo->evtchn); in setup_blkring()
1704 err = bind_evtchn_to_irqhandler_lateeoi(rinfo->evtchn, blkif_interrupt, in setup_blkring()
1705 0, "blkif", rinfo); in setup_blkring()
1711 rinfo->irq = err; in setup_blkring()
1724 struct blkfront_ring_info *rinfo, const char *dir) in write_per_ring_nodes() argument
1729 struct blkfront_info *info = rinfo->dev_info; in write_per_ring_nodes()
1732 err = xenbus_printf(xbt, dir, "ring-ref", "%u", rinfo->ring_ref[0]); in write_per_ring_nodes()
1743 "%u", rinfo->ring_ref[i]); in write_per_ring_nodes()
1751 err = xenbus_printf(xbt, dir, "event-channel", "%u", rinfo->evtchn); in write_per_ring_nodes()
1776 struct blkfront_ring_info *rinfo; in talk_to_blkback() local
1790 for_each_rinfo(info, rinfo, i) { in talk_to_blkback()
1792 err = setup_blkring(dev, rinfo); in talk_to_blkback()
1815 err = write_per_ring_nodes(xbt, info->rinfo, dev->nodename); in talk_to_blkback()
1837 for_each_rinfo(info, rinfo, i) { in talk_to_blkback()
1840 err = write_per_ring_nodes(xbt, rinfo, path); in talk_to_blkback()
1868 for_each_rinfo(info, rinfo, i) { in talk_to_blkback()
1872 rinfo->shadow[j].req.u.rw.id = j + 1; in talk_to_blkback()
1873 rinfo->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff; in talk_to_blkback()
1892 struct blkfront_ring_info *rinfo; in negotiate_mq() local
1904 info->rinfo_size = struct_size(info->rinfo, shadow, in negotiate_mq()
1906 info->rinfo = kvcalloc(info->nr_rings, info->rinfo_size, GFP_KERNEL); in negotiate_mq()
1907 if (!info->rinfo) { in negotiate_mq()
1913 for_each_rinfo(info, rinfo, i) { in negotiate_mq()
1914 INIT_LIST_HEAD(&rinfo->indirect_pages); in negotiate_mq()
1915 INIT_LIST_HEAD(&rinfo->grants); in negotiate_mq()
1916 rinfo->dev_info = info; in negotiate_mq()
1917 INIT_WORK(&rinfo->work, blkif_restart_queue); in negotiate_mq()
1918 spin_lock_init(&rinfo->ring_lock); in negotiate_mq()
2015 struct blkfront_ring_info *rinfo; in blkif_recover() local
2023 for_each_rinfo(info, rinfo, r_index) { in blkif_recover()
2024 rc = blkfront_setup_indirect(rinfo); in blkif_recover()
2033 for_each_rinfo(info, rinfo, r_index) { in blkif_recover()
2035 kick_pending_request_queues(rinfo); in blkif_recover()
2066 struct blkfront_ring_info *rinfo; in blkfront_resume() local
2072 for_each_rinfo(info, rinfo, i) { in blkfront_resume()
2074 struct blk_shadow *shadow = rinfo->shadow; in blkfront_resume()
2124 struct blkfront_ring_info *rinfo; in blkfront_closing() local
2135 for_each_rinfo(info, rinfo, i) { in blkfront_closing()
2137 gnttab_cancel_free_callback(&rinfo->callback); in blkfront_closing()
2140 flush_work(&rinfo->work); in blkfront_closing()
2159 static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo) in blkfront_setup_indirect() argument
2163 struct blkfront_info *info = rinfo->dev_info; in blkfront_setup_indirect()
2183 err = fill_grant_buffer(rinfo, in blkfront_setup_indirect()
2196 BUG_ON(!list_empty(&rinfo->indirect_pages)); in blkfront_setup_indirect()
2201 list_add(&indirect_page->lru, &rinfo->indirect_pages); in blkfront_setup_indirect()
2206 rinfo->shadow[i].grants_used = in blkfront_setup_indirect()
2208 sizeof(rinfo->shadow[i].grants_used[0]), in blkfront_setup_indirect()
2210 rinfo->shadow[i].sg = kvcalloc(psegs, in blkfront_setup_indirect()
2211 sizeof(rinfo->shadow[i].sg[0]), in blkfront_setup_indirect()
2214 rinfo->shadow[i].indirect_grants = in blkfront_setup_indirect()
2216 sizeof(rinfo->shadow[i].indirect_grants[0]), in blkfront_setup_indirect()
2218 if ((rinfo->shadow[i].grants_used == NULL) || in blkfront_setup_indirect()
2219 (rinfo->shadow[i].sg == NULL) || in blkfront_setup_indirect()
2221 (rinfo->shadow[i].indirect_grants == NULL))) in blkfront_setup_indirect()
2223 sg_init_table(rinfo->shadow[i].sg, psegs); in blkfront_setup_indirect()
2232 kvfree(rinfo->shadow[i].grants_used); in blkfront_setup_indirect()
2233 rinfo->shadow[i].grants_used = NULL; in blkfront_setup_indirect()
2234 kvfree(rinfo->shadow[i].sg); in blkfront_setup_indirect()
2235 rinfo->shadow[i].sg = NULL; in blkfront_setup_indirect()
2236 kvfree(rinfo->shadow[i].indirect_grants); in blkfront_setup_indirect()
2237 rinfo->shadow[i].indirect_grants = NULL; in blkfront_setup_indirect()
2239 if (!list_empty(&rinfo->indirect_pages)) { in blkfront_setup_indirect()
2241 list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) { in blkfront_setup_indirect()
2318 struct blkfront_ring_info *rinfo; in blkfront_connect() local
2373 for_each_rinfo(info, rinfo, i) { in blkfront_connect()
2374 err = blkfront_setup_indirect(rinfo); in blkfront_connect()
2395 for_each_rinfo(info, rinfo, i) in blkfront_connect()
2396 kick_pending_request_queues(rinfo); in blkfront_connect()
2524 struct blkfront_ring_info *rinfo; in purge_persistent_grants() local
2526 for_each_rinfo(info, rinfo, i) { in purge_persistent_grants()
2529 spin_lock_irqsave(&rinfo->ring_lock, flags); in purge_persistent_grants()
2531 if (rinfo->persistent_gnts_c == 0) { in purge_persistent_grants()
2532 spin_unlock_irqrestore(&rinfo->ring_lock, flags); in purge_persistent_grants()
2536 list_for_each_entry_safe(gnt_list_entry, tmp, &rinfo->grants, in purge_persistent_grants()
2544 rinfo->persistent_gnts_c--; in purge_persistent_grants()
2546 list_add_tail(&gnt_list_entry->node, &rinfo->grants); in purge_persistent_grants()
2549 spin_unlock_irqrestore(&rinfo->ring_lock, flags); in purge_persistent_grants()