1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 *
4 * Copyright (c) 2011, Microsoft Corporation.
5 *
6 * Authors:
7 * Haiyang Zhang <haiyangz@microsoft.com>
8 * Hank Janssen <hjanssen@microsoft.com>
9 * K. Y. Srinivasan <kys@microsoft.com>
10 */
11
12 #ifndef _HYPERV_H
13 #define _HYPERV_H
14
15 #include <uapi/linux/hyperv.h>
16
17 #include <linux/mm.h>
18 #include <linux/types.h>
19 #include <linux/scatterlist.h>
20 #include <linux/list.h>
21 #include <linux/timer.h>
22 #include <linux/completion.h>
23 #include <linux/device.h>
24 #include <linux/mod_devicetable.h>
25 #include <linux/interrupt.h>
26 #include <linux/reciprocal_div.h>
27 #include <asm/hyperv-tlfs.h>
28
29 #define MAX_PAGE_BUFFER_COUNT 32
30 #define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */
31
32 #pragma pack(push, 1)
33
34 /*
35 * Types for GPADL, decides is how GPADL header is created.
36 *
37 * It doesn't make much difference between BUFFER and RING if PAGE_SIZE is the
38 * same as HV_HYP_PAGE_SIZE.
39 *
40 * If PAGE_SIZE is bigger than HV_HYP_PAGE_SIZE, the headers of ring buffers
41 * will be of PAGE_SIZE, however, only the first HV_HYP_PAGE will be put
42 * into gpadl, therefore the number for HV_HYP_PAGE and the indexes of each
43 * HV_HYP_PAGE will be different between different types of GPADL, for example
44 * if PAGE_SIZE is 64K:
45 *
46 * BUFFER:
47 *
48 * gva: |-- 64k --|-- 64k --| ... |
49 * gpa: | 4k | 4k | ... | 4k | 4k | 4k | ... | 4k |
50 * index: 0 1 2 15 16 17 18 .. 31 32 ...
51 * | | ... | | | ... | ...
52 * v V V V V V
53 * gpadl: | 4k | 4k | ... | 4k | 4k | 4k | ... | 4k | ... |
54 * index: 0 1 2 ... 15 16 17 18 .. 31 32 ...
55 *
56 * RING:
57 *
58 * | header | data | header | data |
59 * gva: |-- 64k --|-- 64k --| ... |-- 64k --|-- 64k --| ... |
60 * gpa: | 4k | .. | 4k | 4k | ... | 4k | ... | 4k | .. | 4k | .. | ... |
61 * index: 0 1 16 17 18 31 ... n n+1 n+16 ... 2n
62 * | / / / | / /
63 * | / / / | / /
64 * | / / ... / ... | / ... /
65 * | / / / | / /
66 * | / / / | / /
67 * V V V V V V v
68 * gpadl: | 4k | 4k | ... | ... | 4k | 4k | ... |
69 * index: 0 1 2 ... 16 ... n-15 n-14 n-13 ... 2n-30
70 */
71 enum hv_gpadl_type {
72 HV_GPADL_BUFFER,
73 HV_GPADL_RING
74 };
75
76 /* Single-page buffer */
77 struct hv_page_buffer {
78 u32 len;
79 u32 offset;
80 u64 pfn;
81 };
82
83 /* Multiple-page buffer */
84 struct hv_multipage_buffer {
85 /* Length and Offset determines the # of pfns in the array */
86 u32 len;
87 u32 offset;
88 u64 pfn_array[MAX_MULTIPAGE_BUFFER_COUNT];
89 };
90
91 /*
92 * Multiple-page buffer array; the pfn array is variable size:
93 * The number of entries in the PFN array is determined by
94 * "len" and "offset".
95 */
96 struct hv_mpb_array {
97 /* Length and Offset determines the # of pfns in the array */
98 u32 len;
99 u32 offset;
100 u64 pfn_array[];
101 };
102
103 /* 0x18 includes the proprietary packet header */
104 #define MAX_PAGE_BUFFER_PACKET (0x18 + \
105 (sizeof(struct hv_page_buffer) * \
106 MAX_PAGE_BUFFER_COUNT))
107 #define MAX_MULTIPAGE_BUFFER_PACKET (0x18 + \
108 sizeof(struct hv_multipage_buffer))
109
110
111 #pragma pack(pop)
112
113 struct hv_ring_buffer {
114 /* Offset in bytes from the start of ring data below */
115 u32 write_index;
116
117 /* Offset in bytes from the start of ring data below */
118 u32 read_index;
119
120 u32 interrupt_mask;
121
122 /*
123 * WS2012/Win8 and later versions of Hyper-V implement interrupt
124 * driven flow management. The feature bit feat_pending_send_sz
125 * is set by the host on the host->guest ring buffer, and by the
126 * guest on the guest->host ring buffer.
127 *
128 * The meaning of the feature bit is a bit complex in that it has
129 * semantics that apply to both ring buffers. If the guest sets
130 * the feature bit in the guest->host ring buffer, the guest is
131 * telling the host that:
132 * 1) It will set the pending_send_sz field in the guest->host ring
133 * buffer when it is waiting for space to become available, and
134 * 2) It will read the pending_send_sz field in the host->guest
135 * ring buffer and interrupt the host when it frees enough space
136 *
137 * Similarly, if the host sets the feature bit in the host->guest
138 * ring buffer, the host is telling the guest that:
139 * 1) It will set the pending_send_sz field in the host->guest ring
140 * buffer when it is waiting for space to become available, and
141 * 2) It will read the pending_send_sz field in the guest->host
142 * ring buffer and interrupt the guest when it frees enough space
143 *
144 * If either the guest or host does not set the feature bit that it
145 * owns, that guest or host must do polling if it encounters a full
146 * ring buffer, and not signal the other end with an interrupt.
147 */
148 u32 pending_send_sz;
149 u32 reserved1[12];
150 union {
151 struct {
152 u32 feat_pending_send_sz:1;
153 };
154 u32 value;
155 } feature_bits;
156
157 /* Pad it to PAGE_SIZE so that data starts on page boundary */
158 u8 reserved2[PAGE_SIZE - 68];
159
160 /*
161 * Ring data starts here + RingDataStartOffset
162 * !!! DO NOT place any fields below this !!!
163 */
164 u8 buffer[];
165 } __packed;
166
167 /* Calculate the proper size of a ringbuffer, it must be page-aligned */
168 #define VMBUS_RING_SIZE(payload_sz) PAGE_ALIGN(sizeof(struct hv_ring_buffer) + \
169 (payload_sz))
170
171 struct hv_ring_buffer_info {
172 struct hv_ring_buffer *ring_buffer;
173 u32 ring_size; /* Include the shared header */
174 struct reciprocal_value ring_size_div10_reciprocal;
175 spinlock_t ring_lock;
176
177 u32 ring_datasize; /* < ring_size */
178 u32 priv_read_index;
179 /*
180 * The ring buffer mutex lock. This lock prevents the ring buffer from
181 * being freed while the ring buffer is being accessed.
182 */
183 struct mutex ring_buffer_mutex;
184
185 /* Buffer that holds a copy of an incoming host packet */
186 void *pkt_buffer;
187 u32 pkt_buffer_size;
188 };
189
190
hv_get_bytes_to_read(const struct hv_ring_buffer_info * rbi)191 static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi)
192 {
193 u32 read_loc, write_loc, dsize, read;
194
195 dsize = rbi->ring_datasize;
196 read_loc = rbi->ring_buffer->read_index;
197 write_loc = READ_ONCE(rbi->ring_buffer->write_index);
198
199 read = write_loc >= read_loc ? (write_loc - read_loc) :
200 (dsize - read_loc) + write_loc;
201
202 return read;
203 }
204
hv_get_bytes_to_write(const struct hv_ring_buffer_info * rbi)205 static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi)
206 {
207 u32 read_loc, write_loc, dsize, write;
208
209 dsize = rbi->ring_datasize;
210 read_loc = READ_ONCE(rbi->ring_buffer->read_index);
211 write_loc = rbi->ring_buffer->write_index;
212
213 write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
214 read_loc - write_loc;
215 return write;
216 }
217
hv_get_avail_to_write_percent(const struct hv_ring_buffer_info * rbi)218 static inline u32 hv_get_avail_to_write_percent(
219 const struct hv_ring_buffer_info *rbi)
220 {
221 u32 avail_write = hv_get_bytes_to_write(rbi);
222
223 return reciprocal_divide(
224 (avail_write << 3) + (avail_write << 1),
225 rbi->ring_size_div10_reciprocal);
226 }
227
228 /*
229 * VMBUS version is 32 bit entity broken up into
230 * two 16 bit quantities: major_number. minor_number.
231 *
232 * 0 . 13 (Windows Server 2008)
233 * 1 . 1 (Windows 7)
234 * 2 . 4 (Windows 8)
235 * 3 . 0 (Windows 8 R2)
236 * 4 . 0 (Windows 10)
237 * 4 . 1 (Windows 10 RS3)
238 * 5 . 0 (Newer Windows 10)
239 * 5 . 1 (Windows 10 RS4)
240 * 5 . 2 (Windows Server 2019, RS5)
241 * 5 . 3 (Windows Server 2022)
242 */
243
244 #define VERSION_WS2008 ((0 << 16) | (13))
245 #define VERSION_WIN7 ((1 << 16) | (1))
246 #define VERSION_WIN8 ((2 << 16) | (4))
247 #define VERSION_WIN8_1 ((3 << 16) | (0))
248 #define VERSION_WIN10 ((4 << 16) | (0))
249 #define VERSION_WIN10_V4_1 ((4 << 16) | (1))
250 #define VERSION_WIN10_V5 ((5 << 16) | (0))
251 #define VERSION_WIN10_V5_1 ((5 << 16) | (1))
252 #define VERSION_WIN10_V5_2 ((5 << 16) | (2))
253 #define VERSION_WIN10_V5_3 ((5 << 16) | (3))
254
255 /* Make maximum size of pipe payload of 16K */
256 #define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384)
257
258 /* Define PipeMode values. */
259 #define VMBUS_PIPE_TYPE_BYTE 0x00000000
260 #define VMBUS_PIPE_TYPE_MESSAGE 0x00000004
261
262 /* The size of the user defined data buffer for non-pipe offers. */
263 #define MAX_USER_DEFINED_BYTES 120
264
265 /* The size of the user defined data buffer for pipe offers. */
266 #define MAX_PIPE_USER_DEFINED_BYTES 116
267
268 /*
269 * At the center of the Channel Management library is the Channel Offer. This
270 * struct contains the fundamental information about an offer.
271 */
272 struct vmbus_channel_offer {
273 guid_t if_type;
274 guid_t if_instance;
275
276 /*
277 * These two fields are not currently used.
278 */
279 u64 reserved1;
280 u64 reserved2;
281
282 u16 chn_flags;
283 u16 mmio_megabytes; /* in bytes * 1024 * 1024 */
284
285 union {
286 /* Non-pipes: The user has MAX_USER_DEFINED_BYTES bytes. */
287 struct {
288 unsigned char user_def[MAX_USER_DEFINED_BYTES];
289 } std;
290
291 /*
292 * Pipes:
293 * The following structure is an integrated pipe protocol, which
294 * is implemented on top of standard user-defined data. Pipe
295 * clients have MAX_PIPE_USER_DEFINED_BYTES left for their own
296 * use.
297 */
298 struct {
299 u32 pipe_mode;
300 unsigned char user_def[MAX_PIPE_USER_DEFINED_BYTES];
301 } pipe;
302 } u;
303 /*
304 * The sub_channel_index is defined in Win8: a value of zero means a
305 * primary channel and a value of non-zero means a sub-channel.
306 *
307 * Before Win8, the field is reserved, meaning it's always zero.
308 */
309 u16 sub_channel_index;
310 u16 reserved3;
311 } __packed;
312
313 /* Server Flags */
314 #define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE 1
315 #define VMBUS_CHANNEL_SERVER_SUPPORTS_TRANSFER_PAGES 2
316 #define VMBUS_CHANNEL_SERVER_SUPPORTS_GPADLS 4
317 #define VMBUS_CHANNEL_NAMED_PIPE_MODE 0x10
318 #define VMBUS_CHANNEL_LOOPBACK_OFFER 0x100
319 #define VMBUS_CHANNEL_PARENT_OFFER 0x200
320 #define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION 0x400
321 #define VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER 0x2000
322
323 struct vmpacket_descriptor {
324 u16 type;
325 u16 offset8;
326 u16 len8;
327 u16 flags;
328 u64 trans_id;
329 } __packed;
330
331 struct vmpacket_header {
332 u32 prev_pkt_start_offset;
333 struct vmpacket_descriptor descriptor;
334 } __packed;
335
336 struct vmtransfer_page_range {
337 u32 byte_count;
338 u32 byte_offset;
339 } __packed;
340
341 struct vmtransfer_page_packet_header {
342 struct vmpacket_descriptor d;
343 u16 xfer_pageset_id;
344 u8 sender_owns_set;
345 u8 reserved;
346 u32 range_cnt;
347 struct vmtransfer_page_range ranges[1];
348 } __packed;
349
350 struct vmgpadl_packet_header {
351 struct vmpacket_descriptor d;
352 u32 gpadl;
353 u32 reserved;
354 } __packed;
355
356 struct vmadd_remove_transfer_page_set {
357 struct vmpacket_descriptor d;
358 u32 gpadl;
359 u16 xfer_pageset_id;
360 u16 reserved;
361 } __packed;
362
363 /*
364 * This structure defines a range in guest physical space that can be made to
365 * look virtually contiguous.
366 */
367 struct gpa_range {
368 u32 byte_count;
369 u32 byte_offset;
370 u64 pfn_array[];
371 };
372
373 /*
374 * This is the format for an Establish Gpadl packet, which contains a handle by
375 * which this GPADL will be known and a set of GPA ranges associated with it.
376 * This can be converted to a MDL by the guest OS. If there are multiple GPA
377 * ranges, then the resulting MDL will be "chained," representing multiple VA
378 * ranges.
379 */
380 struct vmestablish_gpadl {
381 struct vmpacket_descriptor d;
382 u32 gpadl;
383 u32 range_cnt;
384 struct gpa_range range[1];
385 } __packed;
386
387 /*
388 * This is the format for a Teardown Gpadl packet, which indicates that the
389 * GPADL handle in the Establish Gpadl packet will never be referenced again.
390 */
391 struct vmteardown_gpadl {
392 struct vmpacket_descriptor d;
393 u32 gpadl;
394 u32 reserved; /* for alignment to a 8-byte boundary */
395 } __packed;
396
397 /*
398 * This is the format for a GPA-Direct packet, which contains a set of GPA
399 * ranges, in addition to commands and/or data.
400 */
401 struct vmdata_gpa_direct {
402 struct vmpacket_descriptor d;
403 u32 reserved;
404 u32 range_cnt;
405 struct gpa_range range[1];
406 } __packed;
407
408 /* This is the format for a Additional Data Packet. */
409 struct vmadditional_data {
410 struct vmpacket_descriptor d;
411 u64 total_bytes;
412 u32 offset;
413 u32 byte_cnt;
414 unsigned char data[1];
415 } __packed;
416
417 union vmpacket_largest_possible_header {
418 struct vmpacket_descriptor simple_hdr;
419 struct vmtransfer_page_packet_header xfer_page_hdr;
420 struct vmgpadl_packet_header gpadl_hdr;
421 struct vmadd_remove_transfer_page_set add_rm_xfer_page_hdr;
422 struct vmestablish_gpadl establish_gpadl_hdr;
423 struct vmteardown_gpadl teardown_gpadl_hdr;
424 struct vmdata_gpa_direct data_gpa_direct_hdr;
425 };
426
427 #define VMPACKET_DATA_START_ADDRESS(__packet) \
428 (void *)(((unsigned char *)__packet) + \
429 ((struct vmpacket_descriptor)__packet)->offset8 * 8)
430
431 #define VMPACKET_DATA_LENGTH(__packet) \
432 ((((struct vmpacket_descriptor)__packet)->len8 - \
433 ((struct vmpacket_descriptor)__packet)->offset8) * 8)
434
435 #define VMPACKET_TRANSFER_MODE(__packet) \
436 (((struct IMPACT)__packet)->type)
437
438 enum vmbus_packet_type {
439 VM_PKT_INVALID = 0x0,
440 VM_PKT_SYNCH = 0x1,
441 VM_PKT_ADD_XFER_PAGESET = 0x2,
442 VM_PKT_RM_XFER_PAGESET = 0x3,
443 VM_PKT_ESTABLISH_GPADL = 0x4,
444 VM_PKT_TEARDOWN_GPADL = 0x5,
445 VM_PKT_DATA_INBAND = 0x6,
446 VM_PKT_DATA_USING_XFER_PAGES = 0x7,
447 VM_PKT_DATA_USING_GPADL = 0x8,
448 VM_PKT_DATA_USING_GPA_DIRECT = 0x9,
449 VM_PKT_CANCEL_REQUEST = 0xa,
450 VM_PKT_COMP = 0xb,
451 VM_PKT_DATA_USING_ADDITIONAL_PKT = 0xc,
452 VM_PKT_ADDITIONAL_DATA = 0xd
453 };
454
455 #define VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED 1
456
457
458 /* Version 1 messages */
459 enum vmbus_channel_message_type {
460 CHANNELMSG_INVALID = 0,
461 CHANNELMSG_OFFERCHANNEL = 1,
462 CHANNELMSG_RESCIND_CHANNELOFFER = 2,
463 CHANNELMSG_REQUESTOFFERS = 3,
464 CHANNELMSG_ALLOFFERS_DELIVERED = 4,
465 CHANNELMSG_OPENCHANNEL = 5,
466 CHANNELMSG_OPENCHANNEL_RESULT = 6,
467 CHANNELMSG_CLOSECHANNEL = 7,
468 CHANNELMSG_GPADL_HEADER = 8,
469 CHANNELMSG_GPADL_BODY = 9,
470 CHANNELMSG_GPADL_CREATED = 10,
471 CHANNELMSG_GPADL_TEARDOWN = 11,
472 CHANNELMSG_GPADL_TORNDOWN = 12,
473 CHANNELMSG_RELID_RELEASED = 13,
474 CHANNELMSG_INITIATE_CONTACT = 14,
475 CHANNELMSG_VERSION_RESPONSE = 15,
476 CHANNELMSG_UNLOAD = 16,
477 CHANNELMSG_UNLOAD_RESPONSE = 17,
478 CHANNELMSG_18 = 18,
479 CHANNELMSG_19 = 19,
480 CHANNELMSG_20 = 20,
481 CHANNELMSG_TL_CONNECT_REQUEST = 21,
482 CHANNELMSG_MODIFYCHANNEL = 22,
483 CHANNELMSG_TL_CONNECT_RESULT = 23,
484 CHANNELMSG_MODIFYCHANNEL_RESPONSE = 24,
485 CHANNELMSG_COUNT
486 };
487
488 /* Hyper-V supports about 2048 channels, and the RELIDs start with 1. */
489 #define INVALID_RELID U32_MAX
490
491 struct vmbus_channel_message_header {
492 enum vmbus_channel_message_type msgtype;
493 u32 padding;
494 } __packed;
495
496 /* Query VMBus Version parameters */
497 struct vmbus_channel_query_vmbus_version {
498 struct vmbus_channel_message_header header;
499 u32 version;
500 } __packed;
501
502 /* VMBus Version Supported parameters */
503 struct vmbus_channel_version_supported {
504 struct vmbus_channel_message_header header;
505 u8 version_supported;
506 } __packed;
507
508 /* Offer Channel parameters */
509 struct vmbus_channel_offer_channel {
510 struct vmbus_channel_message_header header;
511 struct vmbus_channel_offer offer;
512 u32 child_relid;
513 u8 monitorid;
514 /*
515 * win7 and beyond splits this field into a bit field.
516 */
517 u8 monitor_allocated:1;
518 u8 reserved:7;
519 /*
520 * These are new fields added in win7 and later.
521 * Do not access these fields without checking the
522 * negotiated protocol.
523 *
524 * If "is_dedicated_interrupt" is set, we must not set the
525 * associated bit in the channel bitmap while sending the
526 * interrupt to the host.
527 *
528 * connection_id is to be used in signaling the host.
529 */
530 u16 is_dedicated_interrupt:1;
531 u16 reserved1:15;
532 u32 connection_id;
533 } __packed;
534
535 /* Rescind Offer parameters */
536 struct vmbus_channel_rescind_offer {
537 struct vmbus_channel_message_header header;
538 u32 child_relid;
539 } __packed;
540
541 /*
542 * Request Offer -- no parameters, SynIC message contains the partition ID
543 * Set Snoop -- no parameters, SynIC message contains the partition ID
544 * Clear Snoop -- no parameters, SynIC message contains the partition ID
545 * All Offers Delivered -- no parameters, SynIC message contains the partition
546 * ID
547 * Flush Client -- no parameters, SynIC message contains the partition ID
548 */
549
550 /* Open Channel parameters */
551 struct vmbus_channel_open_channel {
552 struct vmbus_channel_message_header header;
553
554 /* Identifies the specific VMBus channel that is being opened. */
555 u32 child_relid;
556
557 /* ID making a particular open request at a channel offer unique. */
558 u32 openid;
559
560 /* GPADL for the channel's ring buffer. */
561 u32 ringbuffer_gpadlhandle;
562
563 /*
564 * Starting with win8, this field will be used to specify
565 * the target virtual processor on which to deliver the interrupt for
566 * the host to guest communication.
567 * Prior to win8, incoming channel interrupts would only
568 * be delivered on cpu 0. Setting this value to 0 would
569 * preserve the earlier behavior.
570 */
571 u32 target_vp;
572
573 /*
574 * The upstream ring buffer begins at offset zero in the memory
575 * described by RingBufferGpadlHandle. The downstream ring buffer
576 * follows it at this offset (in pages).
577 */
578 u32 downstream_ringbuffer_pageoffset;
579
580 /* User-specific data to be passed along to the server endpoint. */
581 unsigned char userdata[MAX_USER_DEFINED_BYTES];
582 } __packed;
583
584 /* Open Channel Result parameters */
585 struct vmbus_channel_open_result {
586 struct vmbus_channel_message_header header;
587 u32 child_relid;
588 u32 openid;
589 u32 status;
590 } __packed;
591
592 /* Modify Channel Result parameters */
593 struct vmbus_channel_modifychannel_response {
594 struct vmbus_channel_message_header header;
595 u32 child_relid;
596 u32 status;
597 } __packed;
598
599 /* Close channel parameters; */
600 struct vmbus_channel_close_channel {
601 struct vmbus_channel_message_header header;
602 u32 child_relid;
603 } __packed;
604
605 /* Channel Message GPADL */
606 #define GPADL_TYPE_RING_BUFFER 1
607 #define GPADL_TYPE_SERVER_SAVE_AREA 2
608 #define GPADL_TYPE_TRANSACTION 8
609
610 /*
611 * The number of PFNs in a GPADL message is defined by the number of
612 * pages that would be spanned by ByteCount and ByteOffset. If the
613 * implied number of PFNs won't fit in this packet, there will be a
614 * follow-up packet that contains more.
615 */
616 struct vmbus_channel_gpadl_header {
617 struct vmbus_channel_message_header header;
618 u32 child_relid;
619 u32 gpadl;
620 u16 range_buflen;
621 u16 rangecount;
622 struct gpa_range range[];
623 } __packed;
624
625 /* This is the followup packet that contains more PFNs. */
626 struct vmbus_channel_gpadl_body {
627 struct vmbus_channel_message_header header;
628 u32 msgnumber;
629 u32 gpadl;
630 u64 pfn[];
631 } __packed;
632
633 struct vmbus_channel_gpadl_created {
634 struct vmbus_channel_message_header header;
635 u32 child_relid;
636 u32 gpadl;
637 u32 creation_status;
638 } __packed;
639
640 struct vmbus_channel_gpadl_teardown {
641 struct vmbus_channel_message_header header;
642 u32 child_relid;
643 u32 gpadl;
644 } __packed;
645
646 struct vmbus_channel_gpadl_torndown {
647 struct vmbus_channel_message_header header;
648 u32 gpadl;
649 } __packed;
650
651 struct vmbus_channel_relid_released {
652 struct vmbus_channel_message_header header;
653 u32 child_relid;
654 } __packed;
655
656 struct vmbus_channel_initiate_contact {
657 struct vmbus_channel_message_header header;
658 u32 vmbus_version_requested;
659 u32 target_vcpu; /* The VCPU the host should respond to */
660 union {
661 u64 interrupt_page;
662 struct {
663 u8 msg_sint;
664 u8 padding1[3];
665 u32 padding2;
666 };
667 };
668 u64 monitor_page1;
669 u64 monitor_page2;
670 } __packed;
671
672 /* Hyper-V socket: guest's connect()-ing to host */
673 struct vmbus_channel_tl_connect_request {
674 struct vmbus_channel_message_header header;
675 guid_t guest_endpoint_id;
676 guid_t host_service_id;
677 } __packed;
678
679 /* Modify Channel parameters, cf. vmbus_send_modifychannel() */
680 struct vmbus_channel_modifychannel {
681 struct vmbus_channel_message_header header;
682 u32 child_relid;
683 u32 target_vp;
684 } __packed;
685
686 struct vmbus_channel_version_response {
687 struct vmbus_channel_message_header header;
688 u8 version_supported;
689
690 u8 connection_state;
691 u16 padding;
692
693 /*
694 * On new hosts that support VMBus protocol 5.0, we must use
695 * VMBUS_MESSAGE_CONNECTION_ID_4 for the Initiate Contact Message,
696 * and for subsequent messages, we must use the Message Connection ID
697 * field in the host-returned Version Response Message.
698 *
699 * On old hosts, we should always use VMBUS_MESSAGE_CONNECTION_ID (1).
700 */
701 u32 msg_conn_id;
702 } __packed;
703
704 enum vmbus_channel_state {
705 CHANNEL_OFFER_STATE,
706 CHANNEL_OPENING_STATE,
707 CHANNEL_OPEN_STATE,
708 CHANNEL_OPENED_STATE,
709 };
710
711 /*
712 * Represents each channel msg on the vmbus connection This is a
713 * variable-size data structure depending on the msg type itself
714 */
715 struct vmbus_channel_msginfo {
716 /* Bookkeeping stuff */
717 struct list_head msglistentry;
718
719 /* So far, this is only used to handle gpadl body message */
720 struct list_head submsglist;
721
722 /* Synchronize the request/response if needed */
723 struct completion waitevent;
724 struct vmbus_channel *waiting_channel;
725 union {
726 struct vmbus_channel_version_supported version_supported;
727 struct vmbus_channel_open_result open_result;
728 struct vmbus_channel_gpadl_torndown gpadl_torndown;
729 struct vmbus_channel_gpadl_created gpadl_created;
730 struct vmbus_channel_version_response version_response;
731 struct vmbus_channel_modifychannel_response modify_response;
732 } response;
733
734 u32 msgsize;
735 /*
736 * The channel message that goes out on the "wire".
737 * It will contain at minimum the VMBUS_CHANNEL_MESSAGE_HEADER header
738 */
739 unsigned char msg[];
740 };
741
742 struct vmbus_close_msg {
743 struct vmbus_channel_msginfo info;
744 struct vmbus_channel_close_channel msg;
745 };
746
747 /* Define connection identifier type. */
748 union hv_connection_id {
749 u32 asu32;
750 struct {
751 u32 id:24;
752 u32 reserved:8;
753 } u;
754 };
755
756 enum vmbus_device_type {
757 HV_IDE = 0,
758 HV_SCSI,
759 HV_FC,
760 HV_NIC,
761 HV_ND,
762 HV_PCIE,
763 HV_FB,
764 HV_KBD,
765 HV_MOUSE,
766 HV_KVP,
767 HV_TS,
768 HV_HB,
769 HV_SHUTDOWN,
770 HV_FCOPY,
771 HV_BACKUP,
772 HV_DM,
773 HV_UNKNOWN,
774 };
775
776 /*
777 * Provides request ids for VMBus. Encapsulates guest memory
778 * addresses and stores the next available slot in req_arr
779 * to generate new ids in constant time.
780 */
781 struct vmbus_requestor {
782 u64 *req_arr;
783 unsigned long *req_bitmap; /* is a given slot available? */
784 u32 size;
785 u64 next_request_id;
786 spinlock_t req_lock; /* provides atomicity */
787 };
788
789 #define VMBUS_NO_RQSTOR U64_MAX
790 #define VMBUS_RQST_ERROR (U64_MAX - 1)
791 /* NetVSC-specific */
792 #define VMBUS_RQST_ID_NO_RESPONSE (U64_MAX - 2)
793 /* StorVSC-specific */
794 #define VMBUS_RQST_INIT (U64_MAX - 2)
795 #define VMBUS_RQST_RESET (U64_MAX - 3)
796
797 struct vmbus_device {
798 u16 dev_type;
799 guid_t guid;
800 bool perf_device;
801 bool allowed_in_isolated;
802 };
803
804 #define VMBUS_DEFAULT_MAX_PKT_SIZE 4096
805
806 struct vmbus_gpadl {
807 u32 gpadl_handle;
808 u32 size;
809 void *buffer;
810 };
811
812 struct vmbus_channel {
813 struct list_head listentry;
814
815 struct hv_device *device_obj;
816
817 enum vmbus_channel_state state;
818
819 struct vmbus_channel_offer_channel offermsg;
820 /*
821 * These are based on the OfferMsg.MonitorId.
822 * Save it here for easy access.
823 */
824 u8 monitor_grp;
825 u8 monitor_bit;
826
827 bool rescind; /* got rescind msg */
828 bool rescind_ref; /* got rescind msg, got channel reference */
829 struct completion rescind_event;
830
831 struct vmbus_gpadl ringbuffer_gpadlhandle;
832
833 /* Allocated memory for ring buffer */
834 struct page *ringbuffer_page;
835 u32 ringbuffer_pagecount;
836 u32 ringbuffer_send_offset;
837 struct hv_ring_buffer_info outbound; /* send to parent */
838 struct hv_ring_buffer_info inbound; /* receive from parent */
839
840 struct vmbus_close_msg close_msg;
841
842 /* Statistics */
843 u64 interrupts; /* Host to Guest interrupts */
844 u64 sig_events; /* Guest to Host events */
845
846 /*
847 * Guest to host interrupts caused by the outbound ring buffer changing
848 * from empty to not empty.
849 */
850 u64 intr_out_empty;
851
852 /*
853 * Indicates that a full outbound ring buffer was encountered. The flag
854 * is set to true when a full outbound ring buffer is encountered and
855 * set to false when a write to the outbound ring buffer is completed.
856 */
857 bool out_full_flag;
858
859 /* Channel callback's invoked in softirq context */
860 struct tasklet_struct callback_event;
861 void (*onchannel_callback)(void *context);
862 void *channel_callback_context;
863
864 void (*change_target_cpu_callback)(struct vmbus_channel *channel,
865 u32 old, u32 new);
866
867 /*
868 * Synchronize channel scheduling and channel removal; see the inline
869 * comments in vmbus_chan_sched() and vmbus_reset_channel_cb().
870 */
871 spinlock_t sched_lock;
872
873 /*
874 * A channel can be marked for one of three modes of reading:
875 * BATCHED - callback called from taslket and should read
876 * channel until empty. Interrupts from the host
877 * are masked while read is in process (default).
878 * DIRECT - callback called from tasklet (softirq).
879 * ISR - callback called in interrupt context and must
880 * invoke its own deferred processing.
881 * Host interrupts are disabled and must be re-enabled
882 * when ring is empty.
883 */
884 enum hv_callback_mode {
885 HV_CALL_BATCHED,
886 HV_CALL_DIRECT,
887 HV_CALL_ISR
888 } callback_mode;
889
890 bool is_dedicated_interrupt;
891 u64 sig_event;
892
893 /*
894 * Starting with win8, this field will be used to specify the
895 * target CPU on which to deliver the interrupt for the host
896 * to guest communication.
897 *
898 * Prior to win8, incoming channel interrupts would only be
899 * delivered on CPU 0. Setting this value to 0 would preserve
900 * the earlier behavior.
901 */
902 u32 target_cpu;
903 /*
904 * Support for sub-channels. For high performance devices,
905 * it will be useful to have multiple sub-channels to support
906 * a scalable communication infrastructure with the host.
907 * The support for sub-channels is implemented as an extension
908 * to the current infrastructure.
909 * The initial offer is considered the primary channel and this
910 * offer message will indicate if the host supports sub-channels.
911 * The guest is free to ask for sub-channels to be offered and can
912 * open these sub-channels as a normal "primary" channel. However,
913 * all sub-channels will have the same type and instance guids as the
914 * primary channel. Requests sent on a given channel will result in a
915 * response on the same channel.
916 */
917
918 /*
919 * Sub-channel creation callback. This callback will be called in
920 * process context when a sub-channel offer is received from the host.
921 * The guest can open the sub-channel in the context of this callback.
922 */
923 void (*sc_creation_callback)(struct vmbus_channel *new_sc);
924
925 /*
926 * Channel rescind callback. Some channels (the hvsock ones), need to
927 * register a callback which is invoked in vmbus_onoffer_rescind().
928 */
929 void (*chn_rescind_callback)(struct vmbus_channel *channel);
930
931 /*
932 * All Sub-channels of a primary channel are linked here.
933 */
934 struct list_head sc_list;
935 /*
936 * The primary channel this sub-channel belongs to.
937 * This will be NULL for the primary channel.
938 */
939 struct vmbus_channel *primary_channel;
940 /*
941 * Support per-channel state for use by vmbus drivers.
942 */
943 void *per_channel_state;
944
945 /*
946 * Defer freeing channel until after all cpu's have
947 * gone through grace period.
948 */
949 struct rcu_head rcu;
950
951 /*
952 * For sysfs per-channel properties.
953 */
954 struct kobject kobj;
955
956 /*
957 * For performance critical channels (storage, networking
958 * etc,), Hyper-V has a mechanism to enhance the throughput
959 * at the expense of latency:
960 * When the host is to be signaled, we just set a bit in a shared page
961 * and this bit will be inspected by the hypervisor within a certain
962 * window and if the bit is set, the host will be signaled. The window
963 * of time is the monitor latency - currently around 100 usecs. This
964 * mechanism improves throughput by:
965 *
966 * A) Making the host more efficient - each time it wakes up,
967 * potentially it will process morev number of packets. The
968 * monitor latency allows a batch to build up.
969 * B) By deferring the hypercall to signal, we will also minimize
970 * the interrupts.
971 *
972 * Clearly, these optimizations improve throughput at the expense of
973 * latency. Furthermore, since the channel is shared for both
974 * control and data messages, control messages currently suffer
975 * unnecessary latency adversely impacting performance and boot
976 * time. To fix this issue, permit tagging the channel as being
977 * in "low latency" mode. In this mode, we will bypass the monitor
978 * mechanism.
979 */
980 bool low_latency;
981
982 bool probe_done;
983
984 /*
985 * Cache the device ID here for easy access; this is useful, in
986 * particular, in situations where the channel's device_obj has
987 * not been allocated/initialized yet.
988 */
989 u16 device_id;
990
991 /*
992 * We must offload the handling of the primary/sub channels
993 * from the single-threaded vmbus_connection.work_queue to
994 * two different workqueue, otherwise we can block
995 * vmbus_connection.work_queue and hang: see vmbus_process_offer().
996 */
997 struct work_struct add_channel_work;
998
999 /*
1000 * Guest to host interrupts caused by the inbound ring buffer changing
1001 * from full to not full while a packet is waiting.
1002 */
1003 u64 intr_in_full;
1004
1005 /*
1006 * The total number of write operations that encountered a full
1007 * outbound ring buffer.
1008 */
1009 u64 out_full_total;
1010
1011 /*
1012 * The number of write operations that were the first to encounter a
1013 * full outbound ring buffer.
1014 */
1015 u64 out_full_first;
1016
1017 /* enabling/disabling fuzz testing on the channel (default is false)*/
1018 bool fuzz_testing_state;
1019
1020 /*
1021 * Interrupt delay will delay the guest from emptying the ring buffer
1022 * for a specific amount of time. The delay is in microseconds and will
1023 * be between 1 to a maximum of 1000, its default is 0 (no delay).
1024 * The Message delay will delay guest reading on a per message basis
1025 * in microseconds between 1 to 1000 with the default being 0
1026 * (no delay).
1027 */
1028 u32 fuzz_testing_interrupt_delay;
1029 u32 fuzz_testing_message_delay;
1030
1031 /* callback to generate a request ID from a request address */
1032 u64 (*next_request_id_callback)(struct vmbus_channel *channel, u64 rqst_addr);
1033 /* callback to retrieve a request address from a request ID */
1034 u64 (*request_addr_callback)(struct vmbus_channel *channel, u64 rqst_id);
1035
1036 /* request/transaction ids for VMBus */
1037 struct vmbus_requestor requestor;
1038 u32 rqstor_size;
1039
1040 /* The max size of a packet on this channel */
1041 u32 max_pkt_size;
1042 };
1043
1044 u64 vmbus_next_request_id(struct vmbus_channel *channel, u64 rqst_addr);
1045 u64 vmbus_request_addr(struct vmbus_channel *channel, u64 trans_id);
1046
is_hvsock_channel(const struct vmbus_channel * c)1047 static inline bool is_hvsock_channel(const struct vmbus_channel *c)
1048 {
1049 return !!(c->offermsg.offer.chn_flags &
1050 VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
1051 }
1052
is_sub_channel(const struct vmbus_channel * c)1053 static inline bool is_sub_channel(const struct vmbus_channel *c)
1054 {
1055 return c->offermsg.offer.sub_channel_index != 0;
1056 }
1057
set_channel_read_mode(struct vmbus_channel * c,enum hv_callback_mode mode)1058 static inline void set_channel_read_mode(struct vmbus_channel *c,
1059 enum hv_callback_mode mode)
1060 {
1061 c->callback_mode = mode;
1062 }
1063
set_per_channel_state(struct vmbus_channel * c,void * s)1064 static inline void set_per_channel_state(struct vmbus_channel *c, void *s)
1065 {
1066 c->per_channel_state = s;
1067 }
1068
get_per_channel_state(struct vmbus_channel * c)1069 static inline void *get_per_channel_state(struct vmbus_channel *c)
1070 {
1071 return c->per_channel_state;
1072 }
1073
set_channel_pending_send_size(struct vmbus_channel * c,u32 size)1074 static inline void set_channel_pending_send_size(struct vmbus_channel *c,
1075 u32 size)
1076 {
1077 unsigned long flags;
1078
1079 if (size) {
1080 spin_lock_irqsave(&c->outbound.ring_lock, flags);
1081 ++c->out_full_total;
1082
1083 if (!c->out_full_flag) {
1084 ++c->out_full_first;
1085 c->out_full_flag = true;
1086 }
1087 spin_unlock_irqrestore(&c->outbound.ring_lock, flags);
1088 } else {
1089 c->out_full_flag = false;
1090 }
1091
1092 c->outbound.ring_buffer->pending_send_sz = size;
1093 }
1094
1095 void vmbus_onmessage(struct vmbus_channel_message_header *hdr);
1096
1097 int vmbus_request_offers(void);
1098
1099 /*
1100 * APIs for managing sub-channels.
1101 */
1102
1103 void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
1104 void (*sc_cr_cb)(struct vmbus_channel *new_sc));
1105
1106 void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
1107 void (*chn_rescind_cb)(struct vmbus_channel *));
1108
1109 /* The format must be the same as struct vmdata_gpa_direct */
1110 struct vmbus_channel_packet_page_buffer {
1111 u16 type;
1112 u16 dataoffset8;
1113 u16 length8;
1114 u16 flags;
1115 u64 transactionid;
1116 u32 reserved;
1117 u32 rangecount;
1118 struct hv_page_buffer range[MAX_PAGE_BUFFER_COUNT];
1119 } __packed;
1120
1121 /* The format must be the same as struct vmdata_gpa_direct */
1122 struct vmbus_channel_packet_multipage_buffer {
1123 u16 type;
1124 u16 dataoffset8;
1125 u16 length8;
1126 u16 flags;
1127 u64 transactionid;
1128 u32 reserved;
1129 u32 rangecount; /* Always 1 in this case */
1130 struct hv_multipage_buffer range;
1131 } __packed;
1132
1133 /* The format must be the same as struct vmdata_gpa_direct */
1134 struct vmbus_packet_mpb_array {
1135 u16 type;
1136 u16 dataoffset8;
1137 u16 length8;
1138 u16 flags;
1139 u64 transactionid;
1140 u32 reserved;
1141 u32 rangecount; /* Always 1 in this case */
1142 struct hv_mpb_array range;
1143 } __packed;
1144
1145 int vmbus_alloc_ring(struct vmbus_channel *channel,
1146 u32 send_size, u32 recv_size);
1147 void vmbus_free_ring(struct vmbus_channel *channel);
1148
1149 int vmbus_connect_ring(struct vmbus_channel *channel,
1150 void (*onchannel_callback)(void *context),
1151 void *context);
1152 int vmbus_disconnect_ring(struct vmbus_channel *channel);
1153
1154 extern int vmbus_open(struct vmbus_channel *channel,
1155 u32 send_ringbuffersize,
1156 u32 recv_ringbuffersize,
1157 void *userdata,
1158 u32 userdatalen,
1159 void (*onchannel_callback)(void *context),
1160 void *context);
1161
1162 extern void vmbus_close(struct vmbus_channel *channel);
1163
1164 extern int vmbus_sendpacket(struct vmbus_channel *channel,
1165 void *buffer,
1166 u32 bufferLen,
1167 u64 requestid,
1168 enum vmbus_packet_type type,
1169 u32 flags);
1170
1171 extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
1172 struct hv_page_buffer pagebuffers[],
1173 u32 pagecount,
1174 void *buffer,
1175 u32 bufferlen,
1176 u64 requestid);
1177
1178 extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
1179 struct vmbus_packet_mpb_array *mpb,
1180 u32 desc_size,
1181 void *buffer,
1182 u32 bufferlen,
1183 u64 requestid);
1184
1185 extern int vmbus_establish_gpadl(struct vmbus_channel *channel,
1186 void *kbuffer,
1187 u32 size,
1188 struct vmbus_gpadl *gpadl);
1189
1190 extern int vmbus_teardown_gpadl(struct vmbus_channel *channel,
1191 struct vmbus_gpadl *gpadl);
1192
1193 void vmbus_reset_channel_cb(struct vmbus_channel *channel);
1194
1195 extern int vmbus_recvpacket(struct vmbus_channel *channel,
1196 void *buffer,
1197 u32 bufferlen,
1198 u32 *buffer_actual_len,
1199 u64 *requestid);
1200
1201 extern int vmbus_recvpacket_raw(struct vmbus_channel *channel,
1202 void *buffer,
1203 u32 bufferlen,
1204 u32 *buffer_actual_len,
1205 u64 *requestid);
1206
1207
1208 extern void vmbus_ontimer(unsigned long data);
1209
1210 /* Base driver object */
1211 struct hv_driver {
1212 const char *name;
1213
1214 /*
1215 * A hvsock offer, which has a VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER
1216 * channel flag, actually doesn't mean a synthetic device because the
1217 * offer's if_type/if_instance can change for every new hvsock
1218 * connection.
1219 *
1220 * However, to facilitate the notification of new-offer/rescind-offer
1221 * from vmbus driver to hvsock driver, we can handle hvsock offer as
1222 * a special vmbus device, and hence we need the below flag to
1223 * indicate if the driver is the hvsock driver or not: we need to
1224 * specially treat the hvosck offer & driver in vmbus_match().
1225 */
1226 bool hvsock;
1227
1228 /* the device type supported by this driver */
1229 guid_t dev_type;
1230 const struct hv_vmbus_device_id *id_table;
1231
1232 struct device_driver driver;
1233
1234 /* dynamic device GUID's */
1235 struct {
1236 spinlock_t lock;
1237 struct list_head list;
1238 } dynids;
1239
1240 int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *);
1241 int (*remove)(struct hv_device *);
1242 void (*shutdown)(struct hv_device *);
1243
1244 int (*suspend)(struct hv_device *);
1245 int (*resume)(struct hv_device *);
1246
1247 };
1248
1249 /* Base device object */
1250 struct hv_device {
1251 /* the device type id of this device */
1252 guid_t dev_type;
1253
1254 /* the device instance id of this device */
1255 guid_t dev_instance;
1256 u16 vendor_id;
1257 u16 device_id;
1258
1259 struct device device;
1260 char *driver_override; /* Driver name to force a match */
1261
1262 struct vmbus_channel *channel;
1263 struct kset *channels_kset;
1264
1265 /* place holder to keep track of the dir for hv device in debugfs */
1266 struct dentry *debug_dir;
1267
1268 };
1269
1270
device_to_hv_device(struct device * d)1271 static inline struct hv_device *device_to_hv_device(struct device *d)
1272 {
1273 return container_of(d, struct hv_device, device);
1274 }
1275
drv_to_hv_drv(struct device_driver * d)1276 static inline struct hv_driver *drv_to_hv_drv(struct device_driver *d)
1277 {
1278 return container_of(d, struct hv_driver, driver);
1279 }
1280
hv_set_drvdata(struct hv_device * dev,void * data)1281 static inline void hv_set_drvdata(struct hv_device *dev, void *data)
1282 {
1283 dev_set_drvdata(&dev->device, data);
1284 }
1285
hv_get_drvdata(struct hv_device * dev)1286 static inline void *hv_get_drvdata(struct hv_device *dev)
1287 {
1288 return dev_get_drvdata(&dev->device);
1289 }
1290
1291 struct hv_ring_buffer_debug_info {
1292 u32 current_interrupt_mask;
1293 u32 current_read_index;
1294 u32 current_write_index;
1295 u32 bytes_avail_toread;
1296 u32 bytes_avail_towrite;
1297 };
1298
1299
1300 int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
1301 struct hv_ring_buffer_debug_info *debug_info);
1302
1303 /* Vmbus interface */
1304 #define vmbus_driver_register(driver) \
1305 __vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
1306 int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
1307 struct module *owner,
1308 const char *mod_name);
1309 void vmbus_driver_unregister(struct hv_driver *hv_driver);
1310
1311 void vmbus_hvsock_device_unregister(struct vmbus_channel *channel);
1312
1313 int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
1314 resource_size_t min, resource_size_t max,
1315 resource_size_t size, resource_size_t align,
1316 bool fb_overlap_ok);
1317 void vmbus_free_mmio(resource_size_t start, resource_size_t size);
1318
1319 /*
1320 * GUID definitions of various offer types - services offered to the guest.
1321 */
1322
1323 /*
1324 * Network GUID
1325 * {f8615163-df3e-46c5-913f-f2d2f965ed0e}
1326 */
1327 #define HV_NIC_GUID \
1328 .guid = GUID_INIT(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \
1329 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e)
1330
1331 /*
1332 * IDE GUID
1333 * {32412632-86cb-44a2-9b5c-50d1417354f5}
1334 */
1335 #define HV_IDE_GUID \
1336 .guid = GUID_INIT(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \
1337 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5)
1338
1339 /*
1340 * SCSI GUID
1341 * {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}
1342 */
1343 #define HV_SCSI_GUID \
1344 .guid = GUID_INIT(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \
1345 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f)
1346
1347 /*
1348 * Shutdown GUID
1349 * {0e0b6031-5213-4934-818b-38d90ced39db}
1350 */
1351 #define HV_SHUTDOWN_GUID \
1352 .guid = GUID_INIT(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \
1353 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb)
1354
1355 /*
1356 * Time Synch GUID
1357 * {9527E630-D0AE-497b-ADCE-E80AB0175CAF}
1358 */
1359 #define HV_TS_GUID \
1360 .guid = GUID_INIT(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \
1361 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf)
1362
1363 /*
1364 * Heartbeat GUID
1365 * {57164f39-9115-4e78-ab55-382f3bd5422d}
1366 */
1367 #define HV_HEART_BEAT_GUID \
1368 .guid = GUID_INIT(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \
1369 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d)
1370
1371 /*
1372 * KVP GUID
1373 * {a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}
1374 */
1375 #define HV_KVP_GUID \
1376 .guid = GUID_INIT(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \
1377 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6)
1378
1379 /*
1380 * Dynamic memory GUID
1381 * {525074dc-8985-46e2-8057-a307dc18a502}
1382 */
1383 #define HV_DM_GUID \
1384 .guid = GUID_INIT(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \
1385 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02)
1386
1387 /*
1388 * Mouse GUID
1389 * {cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}
1390 */
1391 #define HV_MOUSE_GUID \
1392 .guid = GUID_INIT(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \
1393 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a)
1394
1395 /*
1396 * Keyboard GUID
1397 * {f912ad6d-2b17-48ea-bd65-f927a61c7684}
1398 */
1399 #define HV_KBD_GUID \
1400 .guid = GUID_INIT(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \
1401 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84)
1402
1403 /*
1404 * VSS (Backup/Restore) GUID
1405 */
1406 #define HV_VSS_GUID \
1407 .guid = GUID_INIT(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \
1408 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40)
1409 /*
1410 * Synthetic Video GUID
1411 * {DA0A7802-E377-4aac-8E77-0558EB1073F8}
1412 */
1413 #define HV_SYNTHVID_GUID \
1414 .guid = GUID_INIT(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \
1415 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8)
1416
1417 /*
1418 * Synthetic FC GUID
1419 * {2f9bcc4a-0069-4af3-b76b-6fd0be528cda}
1420 */
1421 #define HV_SYNTHFC_GUID \
1422 .guid = GUID_INIT(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \
1423 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda)
1424
1425 /*
1426 * Guest File Copy Service
1427 * {34D14BE3-DEE4-41c8-9AE7-6B174977C192}
1428 */
1429
1430 #define HV_FCOPY_GUID \
1431 .guid = GUID_INIT(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \
1432 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92)
1433
1434 /*
1435 * NetworkDirect. This is the guest RDMA service.
1436 * {8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}
1437 */
1438 #define HV_ND_GUID \
1439 .guid = GUID_INIT(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \
1440 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01)
1441
1442 /*
1443 * PCI Express Pass Through
1444 * {44C4F61D-4444-4400-9D52-802E27EDE19F}
1445 */
1446
1447 #define HV_PCIE_GUID \
1448 .guid = GUID_INIT(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \
1449 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f)
1450
1451 /*
1452 * Linux doesn't support the 3 devices: the first two are for
1453 * Automatic Virtual Machine Activation, and the third is for
1454 * Remote Desktop Virtualization.
1455 * {f8e65716-3cb3-4a06-9a60-1889c5cccab5}
1456 * {3375baf4-9e15-4b30-b765-67acb10d607b}
1457 * {276aacf4-ac15-426c-98dd-7521ad3f01fe}
1458 */
1459
1460 #define HV_AVMA1_GUID \
1461 .guid = GUID_INIT(0xf8e65716, 0x3cb3, 0x4a06, 0x9a, 0x60, \
1462 0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5)
1463
1464 #define HV_AVMA2_GUID \
1465 .guid = GUID_INIT(0x3375baf4, 0x9e15, 0x4b30, 0xb7, 0x65, \
1466 0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b)
1467
1468 #define HV_RDV_GUID \
1469 .guid = GUID_INIT(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \
1470 0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe)
1471
1472 /*
1473 * Common header for Hyper-V ICs
1474 */
1475
1476 #define ICMSGTYPE_NEGOTIATE 0
1477 #define ICMSGTYPE_HEARTBEAT 1
1478 #define ICMSGTYPE_KVPEXCHANGE 2
1479 #define ICMSGTYPE_SHUTDOWN 3
1480 #define ICMSGTYPE_TIMESYNC 4
1481 #define ICMSGTYPE_VSS 5
1482 #define ICMSGTYPE_FCOPY 7
1483
1484 #define ICMSGHDRFLAG_TRANSACTION 1
1485 #define ICMSGHDRFLAG_REQUEST 2
1486 #define ICMSGHDRFLAG_RESPONSE 4
1487
1488
1489 /*
1490 * While we want to handle util services as regular devices,
1491 * there is only one instance of each of these services; so
1492 * we statically allocate the service specific state.
1493 */
1494
1495 struct hv_util_service {
1496 u8 *recv_buffer;
1497 void *channel;
1498 void (*util_cb)(void *);
1499 int (*util_init)(struct hv_util_service *);
1500 void (*util_deinit)(void);
1501 int (*util_pre_suspend)(void);
1502 int (*util_pre_resume)(void);
1503 };
1504
1505 struct vmbuspipe_hdr {
1506 u32 flags;
1507 u32 msgsize;
1508 } __packed;
1509
1510 struct ic_version {
1511 u16 major;
1512 u16 minor;
1513 } __packed;
1514
1515 struct icmsg_hdr {
1516 struct ic_version icverframe;
1517 u16 icmsgtype;
1518 struct ic_version icvermsg;
1519 u16 icmsgsize;
1520 u32 status;
1521 u8 ictransaction_id;
1522 u8 icflags;
1523 u8 reserved[2];
1524 } __packed;
1525
1526 #define IC_VERSION_NEGOTIATION_MAX_VER_COUNT 100
1527 #define ICMSG_HDR (sizeof(struct vmbuspipe_hdr) + sizeof(struct icmsg_hdr))
1528 #define ICMSG_NEGOTIATE_PKT_SIZE(icframe_vercnt, icmsg_vercnt) \
1529 (ICMSG_HDR + sizeof(struct icmsg_negotiate) + \
1530 (((icframe_vercnt) + (icmsg_vercnt)) * sizeof(struct ic_version)))
1531
1532 struct icmsg_negotiate {
1533 u16 icframe_vercnt;
1534 u16 icmsg_vercnt;
1535 u32 reserved;
1536 struct ic_version icversion_data[]; /* any size array */
1537 } __packed;
1538
1539 struct shutdown_msg_data {
1540 u32 reason_code;
1541 u32 timeout_seconds;
1542 u32 flags;
1543 u8 display_message[2048];
1544 } __packed;
1545
1546 struct heartbeat_msg_data {
1547 u64 seq_num;
1548 u32 reserved[8];
1549 } __packed;
1550
1551 /* Time Sync IC defs */
1552 #define ICTIMESYNCFLAG_PROBE 0
1553 #define ICTIMESYNCFLAG_SYNC 1
1554 #define ICTIMESYNCFLAG_SAMPLE 2
1555
1556 #ifdef __x86_64__
1557 #define WLTIMEDELTA 116444736000000000L /* in 100ns unit */
1558 #else
1559 #define WLTIMEDELTA 116444736000000000LL
1560 #endif
1561
1562 struct ictimesync_data {
1563 u64 parenttime;
1564 u64 childtime;
1565 u64 roundtriptime;
1566 u8 flags;
1567 } __packed;
1568
1569 struct ictimesync_ref_data {
1570 u64 parenttime;
1571 u64 vmreferencetime;
1572 u8 flags;
1573 char leapflags;
1574 char stratum;
1575 u8 reserved[3];
1576 } __packed;
1577
1578 struct hyperv_service_callback {
1579 u8 msg_type;
1580 char *log_msg;
1581 guid_t data;
1582 struct vmbus_channel *channel;
1583 void (*callback)(void *context);
1584 };
1585
1586 #define MAX_SRV_VER 0x7ffffff
1587 extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf, u32 buflen,
1588 const int *fw_version, int fw_vercnt,
1589 const int *srv_version, int srv_vercnt,
1590 int *nego_fw_version, int *nego_srv_version);
1591
1592 void hv_process_channel_removal(struct vmbus_channel *channel);
1593
1594 void vmbus_setevent(struct vmbus_channel *channel);
1595 /*
1596 * Negotiated version with the Host.
1597 */
1598
1599 extern __u32 vmbus_proto_version;
1600
1601 int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
1602 const guid_t *shv_host_servie_id);
1603 int vmbus_send_modifychannel(struct vmbus_channel *channel, u32 target_vp);
1604 void vmbus_set_event(struct vmbus_channel *channel);
1605
1606 /* Get the start of the ring buffer. */
1607 static inline void *
hv_get_ring_buffer(const struct hv_ring_buffer_info * ring_info)1608 hv_get_ring_buffer(const struct hv_ring_buffer_info *ring_info)
1609 {
1610 return ring_info->ring_buffer->buffer;
1611 }
1612
1613 /*
1614 * Mask off host interrupt callback notifications
1615 */
hv_begin_read(struct hv_ring_buffer_info * rbi)1616 static inline void hv_begin_read(struct hv_ring_buffer_info *rbi)
1617 {
1618 rbi->ring_buffer->interrupt_mask = 1;
1619
1620 /* make sure mask update is not reordered */
1621 virt_mb();
1622 }
1623
1624 /*
1625 * Re-enable host callback and return number of outstanding bytes
1626 */
hv_end_read(struct hv_ring_buffer_info * rbi)1627 static inline u32 hv_end_read(struct hv_ring_buffer_info *rbi)
1628 {
1629
1630 rbi->ring_buffer->interrupt_mask = 0;
1631
1632 /* make sure mask update is not reordered */
1633 virt_mb();
1634
1635 /*
1636 * Now check to see if the ring buffer is still empty.
1637 * If it is not, we raced and we need to process new
1638 * incoming messages.
1639 */
1640 return hv_get_bytes_to_read(rbi);
1641 }
1642
1643 /*
1644 * An API to support in-place processing of incoming VMBUS packets.
1645 */
1646
1647 /* Get data payload associated with descriptor */
hv_pkt_data(const struct vmpacket_descriptor * desc)1648 static inline void *hv_pkt_data(const struct vmpacket_descriptor *desc)
1649 {
1650 return (void *)((unsigned long)desc + (desc->offset8 << 3));
1651 }
1652
1653 /* Get data size associated with descriptor */
hv_pkt_datalen(const struct vmpacket_descriptor * desc)1654 static inline u32 hv_pkt_datalen(const struct vmpacket_descriptor *desc)
1655 {
1656 return (desc->len8 << 3) - (desc->offset8 << 3);
1657 }
1658
1659
1660 struct vmpacket_descriptor *
1661 hv_pkt_iter_first_raw(struct vmbus_channel *channel);
1662
1663 struct vmpacket_descriptor *
1664 hv_pkt_iter_first(struct vmbus_channel *channel);
1665
1666 struct vmpacket_descriptor *
1667 __hv_pkt_iter_next(struct vmbus_channel *channel,
1668 const struct vmpacket_descriptor *pkt,
1669 bool copy);
1670
1671 void hv_pkt_iter_close(struct vmbus_channel *channel);
1672
1673 static inline struct vmpacket_descriptor *
hv_pkt_iter_next_pkt(struct vmbus_channel * channel,const struct vmpacket_descriptor * pkt,bool copy)1674 hv_pkt_iter_next_pkt(struct vmbus_channel *channel,
1675 const struct vmpacket_descriptor *pkt,
1676 bool copy)
1677 {
1678 struct vmpacket_descriptor *nxt;
1679
1680 nxt = __hv_pkt_iter_next(channel, pkt, copy);
1681 if (!nxt)
1682 hv_pkt_iter_close(channel);
1683
1684 return nxt;
1685 }
1686
1687 /*
1688 * Get next packet descriptor without copying it out of the ring buffer
1689 * If at end of list, return NULL and update host.
1690 */
1691 static inline struct vmpacket_descriptor *
hv_pkt_iter_next_raw(struct vmbus_channel * channel,const struct vmpacket_descriptor * pkt)1692 hv_pkt_iter_next_raw(struct vmbus_channel *channel,
1693 const struct vmpacket_descriptor *pkt)
1694 {
1695 return hv_pkt_iter_next_pkt(channel, pkt, false);
1696 }
1697
1698 /*
1699 * Get next packet descriptor from iterator
1700 * If at end of list, return NULL and update host.
1701 */
1702 static inline struct vmpacket_descriptor *
hv_pkt_iter_next(struct vmbus_channel * channel,const struct vmpacket_descriptor * pkt)1703 hv_pkt_iter_next(struct vmbus_channel *channel,
1704 const struct vmpacket_descriptor *pkt)
1705 {
1706 return hv_pkt_iter_next_pkt(channel, pkt, true);
1707 }
1708
1709 #define foreach_vmbus_pkt(pkt, channel) \
1710 for (pkt = hv_pkt_iter_first(channel); pkt; \
1711 pkt = hv_pkt_iter_next(channel, pkt))
1712
1713 /*
1714 * Interface for passing data between SR-IOV PF and VF drivers. The VF driver
1715 * sends requests to read and write blocks. Each block must be 128 bytes or
1716 * smaller. Optionally, the VF driver can register a callback function which
1717 * will be invoked when the host says that one or more of the first 64 block
1718 * IDs is "invalid" which means that the VF driver should reread them.
1719 */
1720 #define HV_CONFIG_BLOCK_SIZE_MAX 128
1721
1722 int hyperv_read_cfg_blk(struct pci_dev *dev, void *buf, unsigned int buf_len,
1723 unsigned int block_id, unsigned int *bytes_returned);
1724 int hyperv_write_cfg_blk(struct pci_dev *dev, void *buf, unsigned int len,
1725 unsigned int block_id);
1726 int hyperv_reg_block_invalidate(struct pci_dev *dev, void *context,
1727 void (*block_invalidate)(void *context,
1728 u64 block_mask));
1729
1730 struct hyperv_pci_block_ops {
1731 int (*read_block)(struct pci_dev *dev, void *buf, unsigned int buf_len,
1732 unsigned int block_id, unsigned int *bytes_returned);
1733 int (*write_block)(struct pci_dev *dev, void *buf, unsigned int len,
1734 unsigned int block_id);
1735 int (*reg_blk_invalidate)(struct pci_dev *dev, void *context,
1736 void (*block_invalidate)(void *context,
1737 u64 block_mask));
1738 };
1739
1740 extern struct hyperv_pci_block_ops hvpci_block_ops;
1741
virt_to_hvpfn(void * addr)1742 static inline unsigned long virt_to_hvpfn(void *addr)
1743 {
1744 phys_addr_t paddr;
1745
1746 if (is_vmalloc_addr(addr))
1747 paddr = page_to_phys(vmalloc_to_page(addr)) +
1748 offset_in_page(addr);
1749 else
1750 paddr = __pa(addr);
1751
1752 return paddr >> HV_HYP_PAGE_SHIFT;
1753 }
1754
1755 #define NR_HV_HYP_PAGES_IN_PAGE (PAGE_SIZE / HV_HYP_PAGE_SIZE)
1756 #define offset_in_hvpage(ptr) ((unsigned long)(ptr) & ~HV_HYP_PAGE_MASK)
1757 #define HVPFN_UP(x) (((x) + HV_HYP_PAGE_SIZE-1) >> HV_HYP_PAGE_SHIFT)
1758 #define HVPFN_DOWN(x) ((x) >> HV_HYP_PAGE_SHIFT)
1759 #define page_to_hvpfn(page) (page_to_pfn(page) * NR_HV_HYP_PAGES_IN_PAGE)
1760
1761 #endif /* _HYPERV_H */
1762