mirror of
				https://github.com/AetherDroid/android_kernel_samsung_on5xelte.git
				synced 2025-10-31 16:18:51 +01:00 
			
		
		
		
	Fixed MTP to work with TWRP
This commit is contained in:
		
						commit
						f6dfaef42e
					
				
					 50820 changed files with 20846062 additions and 0 deletions
				
			
		
							
								
								
									
										253
									
								
								include/xen/interface/io/blkif.h
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										253
									
								
								include/xen/interface/io/blkif.h
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,253 @@ | |||
| /******************************************************************************
 | ||||
|  * blkif.h | ||||
|  * | ||||
|  * Unified block-device I/O interface for Xen guest OSes. | ||||
|  * | ||||
|  * Copyright (c) 2003-2004, Keir Fraser | ||||
|  */ | ||||
| 
 | ||||
| #ifndef __XEN_PUBLIC_IO_BLKIF_H__ | ||||
| #define __XEN_PUBLIC_IO_BLKIF_H__ | ||||
| 
 | ||||
| #include <xen/interface/io/ring.h> | ||||
| #include <xen/interface/grant_table.h> | ||||
| 
 | ||||
| /*
 | ||||
|  * Front->back notifications: When enqueuing a new request, sending a | ||||
|  * notification can be made conditional on req_event (i.e., the generic | ||||
|  * hold-off mechanism provided by the ring macros). Backends must set | ||||
|  * req_event appropriately (e.g., using RING_FINAL_CHECK_FOR_REQUESTS()). | ||||
|  * | ||||
|  * Back->front notifications: When enqueuing a new response, sending a | ||||
|  * notification can be made conditional on rsp_event (i.e., the generic | ||||
|  * hold-off mechanism provided by the ring macros). Frontends must set | ||||
|  * rsp_event appropriately (e.g., using RING_FINAL_CHECK_FOR_RESPONSES()). | ||||
|  */ | ||||
| 
 | ||||
| typedef uint16_t blkif_vdev_t; | ||||
| typedef uint64_t blkif_sector_t; | ||||
| 
 | ||||
| /*
 | ||||
|  * REQUEST CODES. | ||||
|  */ | ||||
| #define BLKIF_OP_READ              0 | ||||
| #define BLKIF_OP_WRITE             1 | ||||
| /*
 | ||||
|  * Recognised only if "feature-barrier" is present in backend xenbus info. | ||||
|  * The "feature_barrier" node contains a boolean indicating whether barrier | ||||
|  * requests are likely to succeed or fail. Either way, a barrier request | ||||
|  * may fail at any time with BLKIF_RSP_EOPNOTSUPP if it is unsupported by | ||||
|  * the underlying block-device hardware. The boolean simply indicates whether | ||||
|  * or not it is worthwhile for the frontend to attempt barrier requests. | ||||
|  * If a backend does not recognise BLKIF_OP_WRITE_BARRIER, it should *not* | ||||
|  * create the "feature-barrier" node! | ||||
|  */ | ||||
| #define BLKIF_OP_WRITE_BARRIER     2 | ||||
| 
 | ||||
| /*
 | ||||
|  * Recognised if "feature-flush-cache" is present in backend xenbus | ||||
|  * info.  A flush will ask the underlying storage hardware to flush its | ||||
|  * non-volatile caches as appropriate.  The "feature-flush-cache" node | ||||
|  * contains a boolean indicating whether flush requests are likely to | ||||
|  * succeed or fail. Either way, a flush request may fail at any time | ||||
|  * with BLKIF_RSP_EOPNOTSUPP if it is unsupported by the underlying | ||||
|  * block-device hardware. The boolean simply indicates whether or not it | ||||
|  * is worthwhile for the frontend to attempt flushes.  If a backend does | ||||
|  * not recognise BLKIF_OP_WRITE_FLUSH_CACHE, it should *not* create the | ||||
|  * "feature-flush-cache" node! | ||||
|  */ | ||||
| #define BLKIF_OP_FLUSH_DISKCACHE   3 | ||||
| 
 | ||||
| /*
 | ||||
|  * Recognised only if "feature-discard" is present in backend xenbus info. | ||||
|  * The "feature-discard" node contains a boolean indicating whether trim | ||||
|  * (ATA) or unmap (SCSI) - conviently called discard requests are likely | ||||
|  * to succeed or fail. Either way, a discard request | ||||
|  * may fail at any time with BLKIF_RSP_EOPNOTSUPP if it is unsupported by | ||||
|  * the underlying block-device hardware. The boolean simply indicates whether | ||||
|  * or not it is worthwhile for the frontend to attempt discard requests. | ||||
|  * If a backend does not recognise BLKIF_OP_DISCARD, it should *not* | ||||
|  * create the "feature-discard" node! | ||||
|  * | ||||
|  * Discard operation is a request for the underlying block device to mark | ||||
|  * extents to be erased. However, discard does not guarantee that the blocks | ||||
|  * will be erased from the device - it is just a hint to the device | ||||
|  * controller that these blocks are no longer in use. What the device | ||||
|  * controller does with that information is left to the controller. | ||||
|  * Discard operations are passed with sector_number as the | ||||
|  * sector index to begin discard operations at and nr_sectors as the number of | ||||
|  * sectors to be discarded. The specified sectors should be discarded if the | ||||
|  * underlying block device supports trim (ATA) or unmap (SCSI) operations, | ||||
|  * or a BLKIF_RSP_EOPNOTSUPP  should be returned. | ||||
|  * More information about trim/unmap operations at: | ||||
|  * http://t13.org/Documents/UploadedDocuments/docs2008/
 | ||||
|  *     e07154r6-Data_Set_Management_Proposal_for_ATA-ACS2.doc | ||||
|  * http://www.seagate.com/staticfiles/support/disc/manuals/
 | ||||
|  *     Interface%20manuals/100293068c.pdf | ||||
|  * The backend can optionally provide three extra XenBus attributes to | ||||
|  * further optimize the discard functionality: | ||||
|  * 'discard-alignment' - Devices that support discard functionality may | ||||
|  * internally allocate space in units that are bigger than the exported | ||||
|  * logical block size. The discard-alignment parameter indicates how many bytes | ||||
|  * the beginning of the partition is offset from the internal allocation unit's | ||||
|  * natural alignment. | ||||
|  * 'discard-granularity'  - Devices that support discard functionality may | ||||
|  * internally allocate space using units that are bigger than the logical block | ||||
|  * size. The discard-granularity parameter indicates the size of the internal | ||||
|  * allocation unit in bytes if reported by the device. Otherwise the | ||||
|  * discard-granularity will be set to match the device's physical block size. | ||||
|  * 'discard-secure' - All copies of the discarded sectors (potentially created | ||||
|  * by garbage collection) must also be erased.  To use this feature, the flag | ||||
|  * BLKIF_DISCARD_SECURE must be set in the blkif_request_trim. | ||||
|  */ | ||||
| #define BLKIF_OP_DISCARD           5 | ||||
| 
 | ||||
| /*
 | ||||
|  * Recognized if "feature-max-indirect-segments" in present in the backend | ||||
|  * xenbus info. The "feature-max-indirect-segments" node contains the maximum | ||||
|  * number of segments allowed by the backend per request. If the node is | ||||
|  * present, the frontend might use blkif_request_indirect structs in order to | ||||
|  * issue requests with more than BLKIF_MAX_SEGMENTS_PER_REQUEST (11). The | ||||
|  * maximum number of indirect segments is fixed by the backend, but the | ||||
|  * frontend can issue requests with any number of indirect segments as long as | ||||
|  * it's less than the number provided by the backend. The indirect_grefs field | ||||
|  * in blkif_request_indirect should be filled by the frontend with the | ||||
|  * grant references of the pages that are holding the indirect segments. | ||||
|  * These pages are filled with an array of blkif_request_segment that hold the | ||||
|  * information about the segments. The number of indirect pages to use is | ||||
|  * determined by the number of segments an indirect request contains. Every | ||||
|  * indirect page can contain a maximum of | ||||
|  * (PAGE_SIZE / sizeof(struct blkif_request_segment)) segments, so to | ||||
|  * calculate the number of indirect pages to use we have to do | ||||
|  * ceil(indirect_segments / (PAGE_SIZE / sizeof(struct blkif_request_segment))). | ||||
|  * | ||||
|  * If a backend does not recognize BLKIF_OP_INDIRECT, it should *not* | ||||
|  * create the "feature-max-indirect-segments" node! | ||||
|  */ | ||||
| #define BLKIF_OP_INDIRECT          6 | ||||
| 
 | ||||
| /*
 | ||||
|  * Maximum scatter/gather segments per request. | ||||
|  * This is carefully chosen so that sizeof(struct blkif_ring) <= PAGE_SIZE. | ||||
|  * NB. This could be 12 if the ring indexes weren't stored in the same page. | ||||
|  */ | ||||
| #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11 | ||||
| 
 | ||||
| #define BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST 8 | ||||
| 
 | ||||
| struct blkif_request_segment { | ||||
| 		grant_ref_t gref;        /* reference to I/O buffer frame        */ | ||||
| 		/* @first_sect: first sector in frame to transfer (inclusive).   */ | ||||
| 		/* @last_sect: last sector in frame to transfer (inclusive).     */ | ||||
| 		uint8_t     first_sect, last_sect; | ||||
| }; | ||||
| 
 | ||||
| struct blkif_request_rw { | ||||
| 	uint8_t        nr_segments;  /* number of segments                   */ | ||||
| 	blkif_vdev_t   handle;       /* only for read/write requests         */ | ||||
| #ifndef CONFIG_X86_32 | ||||
| 	uint32_t       _pad1;	     /* offsetof(blkif_request,u.rw.id) == 8 */ | ||||
| #endif | ||||
| 	uint64_t       id;           /* private guest value, echoed in resp  */ | ||||
| 	blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */ | ||||
| 	struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | ||||
| } __attribute__((__packed__)); | ||||
| 
 | ||||
| struct blkif_request_discard { | ||||
| 	uint8_t        flag;         /* BLKIF_DISCARD_SECURE or zero.        */ | ||||
| #define BLKIF_DISCARD_SECURE (1<<0)  /* ignored if discard-secure=0          */ | ||||
| 	blkif_vdev_t   _pad1;        /* only for read/write requests         */ | ||||
| #ifndef CONFIG_X86_32 | ||||
| 	uint32_t       _pad2;        /* offsetof(blkif_req..,u.discard.id)==8*/ | ||||
| #endif | ||||
| 	uint64_t       id;           /* private guest value, echoed in resp  */ | ||||
| 	blkif_sector_t sector_number; | ||||
| 	uint64_t       nr_sectors; | ||||
| 	uint8_t        _pad3; | ||||
| } __attribute__((__packed__)); | ||||
| 
 | ||||
| struct blkif_request_other { | ||||
| 	uint8_t      _pad1; | ||||
| 	blkif_vdev_t _pad2;        /* only for read/write requests         */ | ||||
| #ifndef CONFIG_X86_32 | ||||
| 	uint32_t     _pad3;        /* offsetof(blkif_req..,u.other.id)==8*/ | ||||
| #endif | ||||
| 	uint64_t     id;           /* private guest value, echoed in resp  */ | ||||
| } __attribute__((__packed__)); | ||||
| 
 | ||||
| struct blkif_request_indirect { | ||||
| 	uint8_t        indirect_op; | ||||
| 	uint16_t       nr_segments; | ||||
| #ifndef CONFIG_X86_32 | ||||
| 	uint32_t       _pad1;        /* offsetof(blkif_...,u.indirect.id) == 8 */ | ||||
| #endif | ||||
| 	uint64_t       id; | ||||
| 	blkif_sector_t sector_number; | ||||
| 	blkif_vdev_t   handle; | ||||
| 	uint16_t       _pad2; | ||||
| 	grant_ref_t    indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST]; | ||||
| #ifndef CONFIG_X86_32 | ||||
| 	uint32_t      _pad3;         /* make it 64 byte aligned */ | ||||
| #else | ||||
| 	uint64_t      _pad3;         /* make it 64 byte aligned */ | ||||
| #endif | ||||
| } __attribute__((__packed__)); | ||||
| 
 | ||||
| struct blkif_request { | ||||
| 	uint8_t        operation;    /* BLKIF_OP_???                         */ | ||||
| 	union { | ||||
| 		struct blkif_request_rw rw; | ||||
| 		struct blkif_request_discard discard; | ||||
| 		struct blkif_request_other other; | ||||
| 		struct blkif_request_indirect indirect; | ||||
| 	} u; | ||||
| } __attribute__((__packed__)); | ||||
| 
 | ||||
| struct blkif_response { | ||||
| 	uint64_t        id;              /* copied from request */ | ||||
| 	uint8_t         operation;       /* copied from request */ | ||||
| 	int16_t         status;          /* BLKIF_RSP_???       */ | ||||
| }; | ||||
| 
 | ||||
| /*
 | ||||
|  * STATUS RETURN CODES. | ||||
|  */ | ||||
|  /* Operation not supported (only happens on barrier writes). */ | ||||
| #define BLKIF_RSP_EOPNOTSUPP  -2 | ||||
|  /* Operation failed for some unspecified reason (-EIO). */ | ||||
| #define BLKIF_RSP_ERROR       -1 | ||||
|  /* Operation completed successfully. */ | ||||
| #define BLKIF_RSP_OKAY         0 | ||||
| 
 | ||||
| /*
 | ||||
|  * Generate blkif ring structures and types. | ||||
|  */ | ||||
| 
 | ||||
| DEFINE_RING_TYPES(blkif, struct blkif_request, struct blkif_response); | ||||
| 
 | ||||
| #define VDISK_CDROM        0x1 | ||||
| #define VDISK_REMOVABLE    0x2 | ||||
| #define VDISK_READONLY     0x4 | ||||
| 
 | ||||
| /* Xen-defined major numbers for virtual disks, they look strangely
 | ||||
|  * familiar */ | ||||
| #define XEN_IDE0_MAJOR	3 | ||||
| #define XEN_IDE1_MAJOR	22 | ||||
| #define XEN_SCSI_DISK0_MAJOR	8 | ||||
| #define XEN_SCSI_DISK1_MAJOR	65 | ||||
| #define XEN_SCSI_DISK2_MAJOR	66 | ||||
| #define XEN_SCSI_DISK3_MAJOR	67 | ||||
| #define XEN_SCSI_DISK4_MAJOR	68 | ||||
| #define XEN_SCSI_DISK5_MAJOR	69 | ||||
| #define XEN_SCSI_DISK6_MAJOR	70 | ||||
| #define XEN_SCSI_DISK7_MAJOR	71 | ||||
| #define XEN_SCSI_DISK8_MAJOR	128 | ||||
| #define XEN_SCSI_DISK9_MAJOR	129 | ||||
| #define XEN_SCSI_DISK10_MAJOR	130 | ||||
| #define XEN_SCSI_DISK11_MAJOR	131 | ||||
| #define XEN_SCSI_DISK12_MAJOR	132 | ||||
| #define XEN_SCSI_DISK13_MAJOR	133 | ||||
| #define XEN_SCSI_DISK14_MAJOR	134 | ||||
| #define XEN_SCSI_DISK15_MAJOR	135 | ||||
| 
 | ||||
| #endif /* __XEN_PUBLIC_IO_BLKIF_H__ */ | ||||
							
								
								
									
										23
									
								
								include/xen/interface/io/console.h
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										23
									
								
								include/xen/interface/io/console.h
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,23 @@ | |||
| /******************************************************************************
 | ||||
|  * console.h | ||||
|  * | ||||
|  * Console I/O interface for Xen guest OSes. | ||||
|  * | ||||
|  * Copyright (c) 2005, Keir Fraser | ||||
|  */ | ||||
| 
 | ||||
| #ifndef __XEN_PUBLIC_IO_CONSOLE_H__ | ||||
| #define __XEN_PUBLIC_IO_CONSOLE_H__ | ||||
| 
 | ||||
| typedef uint32_t XENCONS_RING_IDX; | ||||
| 
 | ||||
| #define MASK_XENCONS_IDX(idx, ring) ((idx) & (sizeof(ring)-1)) | ||||
| 
 | ||||
| struct xencons_interface { | ||||
|     char in[1024]; | ||||
|     char out[2048]; | ||||
|     XENCONS_RING_IDX in_cons, in_prod; | ||||
|     XENCONS_RING_IDX out_cons, out_prod; | ||||
| }; | ||||
| 
 | ||||
| #endif /* __XEN_PUBLIC_IO_CONSOLE_H__ */ | ||||
							
								
								
									
										143
									
								
								include/xen/interface/io/fbif.h
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										143
									
								
								include/xen/interface/io/fbif.h
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,143 @@ | |||
| /*
 | ||||
|  * fbif.h -- Xen virtual frame buffer device | ||||
|  * | ||||
|  * Permission is hereby granted, free of charge, to any person obtaining a copy | ||||
|  * of this software and associated documentation files (the "Software"), to | ||||
|  * deal in the Software without restriction, including without limitation the | ||||
|  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or | ||||
|  * sell copies of the Software, and to permit persons to whom the Software is | ||||
|  * furnished to do so, subject to the following conditions: | ||||
|  * | ||||
|  * The above copyright notice and this permission notice shall be included in | ||||
|  * all copies or substantial portions of the Software. | ||||
|  * | ||||
|  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||
|  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||||
|  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||||
|  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||||
|  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||||
|  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||||
|  * DEALINGS IN THE SOFTWARE. | ||||
|  * | ||||
|  * Copyright (C) 2005 Anthony Liguori <aliguori@us.ibm.com> | ||||
|  * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com> | ||||
|  */ | ||||
| 
 | ||||
| #ifndef __XEN_PUBLIC_IO_FBIF_H__ | ||||
| #define __XEN_PUBLIC_IO_FBIF_H__ | ||||
| 
 | ||||
| /* Out events (frontend -> backend) */ | ||||
| 
 | ||||
| /*
 | ||||
|  * Out events may be sent only when requested by backend, and receipt | ||||
|  * of an unknown out event is an error. | ||||
|  */ | ||||
| 
 | ||||
| /* Event type 1 currently not used */ | ||||
| /*
 | ||||
|  * Framebuffer update notification event | ||||
|  * Capable frontend sets feature-update in xenstore. | ||||
|  * Backend requests it by setting request-update in xenstore. | ||||
|  */ | ||||
| #define XENFB_TYPE_UPDATE 2 | ||||
| 
 | ||||
| struct xenfb_update { | ||||
| 	uint8_t type;		/* XENFB_TYPE_UPDATE */ | ||||
| 	int32_t x;		/* source x */ | ||||
| 	int32_t y;		/* source y */ | ||||
| 	int32_t width;		/* rect width */ | ||||
| 	int32_t height;		/* rect height */ | ||||
| }; | ||||
| 
 | ||||
| /*
 | ||||
|  * Framebuffer resize notification event | ||||
|  * Capable backend sets feature-resize in xenstore. | ||||
|  */ | ||||
| #define XENFB_TYPE_RESIZE 3 | ||||
| 
 | ||||
| struct xenfb_resize { | ||||
| 	uint8_t type;		/* XENFB_TYPE_RESIZE */ | ||||
| 	int32_t width;		/* width in pixels */ | ||||
| 	int32_t height;		/* height in pixels */ | ||||
| 	int32_t stride;		/* stride in bytes */ | ||||
| 	int32_t depth;		/* depth in bits */ | ||||
| 	int32_t offset;		/* start offset within framebuffer */ | ||||
| }; | ||||
| 
 | ||||
| #define XENFB_OUT_EVENT_SIZE 40 | ||||
| 
 | ||||
| union xenfb_out_event { | ||||
| 	uint8_t type; | ||||
| 	struct xenfb_update update; | ||||
| 	struct xenfb_resize resize; | ||||
| 	char pad[XENFB_OUT_EVENT_SIZE]; | ||||
| }; | ||||
| 
 | ||||
| /* In events (backend -> frontend) */ | ||||
| 
 | ||||
| /*
 | ||||
|  * Frontends should ignore unknown in events. | ||||
|  * No in events currently defined. | ||||
|  */ | ||||
| 
 | ||||
| #define XENFB_IN_EVENT_SIZE 40 | ||||
| 
 | ||||
| union xenfb_in_event { | ||||
| 	uint8_t type; | ||||
| 	char pad[XENFB_IN_EVENT_SIZE]; | ||||
| }; | ||||
| 
 | ||||
| /* shared page */ | ||||
| 
 | ||||
| #define XENFB_IN_RING_SIZE 1024 | ||||
| #define XENFB_IN_RING_LEN (XENFB_IN_RING_SIZE / XENFB_IN_EVENT_SIZE) | ||||
| #define XENFB_IN_RING_OFFS 1024 | ||||
| #define XENFB_IN_RING(page) \ | ||||
| 	((union xenfb_in_event *)((char *)(page) + XENFB_IN_RING_OFFS)) | ||||
| #define XENFB_IN_RING_REF(page, idx) \ | ||||
| 	(XENFB_IN_RING((page))[(idx) % XENFB_IN_RING_LEN]) | ||||
| 
 | ||||
| #define XENFB_OUT_RING_SIZE 2048 | ||||
| #define XENFB_OUT_RING_LEN (XENFB_OUT_RING_SIZE / XENFB_OUT_EVENT_SIZE) | ||||
| #define XENFB_OUT_RING_OFFS (XENFB_IN_RING_OFFS + XENFB_IN_RING_SIZE) | ||||
| #define XENFB_OUT_RING(page) \ | ||||
| 	((union xenfb_out_event *)((char *)(page) + XENFB_OUT_RING_OFFS)) | ||||
| #define XENFB_OUT_RING_REF(page, idx) \ | ||||
| 	(XENFB_OUT_RING((page))[(idx) % XENFB_OUT_RING_LEN]) | ||||
| 
 | ||||
| struct xenfb_page { | ||||
| 	uint32_t in_cons, in_prod; | ||||
| 	uint32_t out_cons, out_prod; | ||||
| 
 | ||||
| 	int32_t width;          /* width of the framebuffer (in pixels) */ | ||||
| 	int32_t height;         /* height of the framebuffer (in pixels) */ | ||||
| 	uint32_t line_length;   /* length of a row of pixels (in bytes) */ | ||||
| 	uint32_t mem_length;    /* length of the framebuffer (in bytes) */ | ||||
| 	uint8_t depth;          /* depth of a pixel (in bits) */ | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Framebuffer page directory | ||||
| 	 * | ||||
| 	 * Each directory page holds PAGE_SIZE / sizeof(*pd) | ||||
| 	 * framebuffer pages, and can thus map up to PAGE_SIZE * | ||||
| 	 * PAGE_SIZE / sizeof(*pd) bytes.  With PAGE_SIZE == 4096 and | ||||
| 	 * sizeof(unsigned long) == 4/8, that's 4 Megs 32 bit and 2 | ||||
| 	 * Megs 64 bit.  256 directories give enough room for a 512 | ||||
| 	 * Meg framebuffer with a max resolution of 12,800x10,240. | ||||
| 	 * Should be enough for a while with room leftover for | ||||
| 	 * expansion. | ||||
| 	 */ | ||||
| 	unsigned long pd[256]; | ||||
| }; | ||||
| 
 | ||||
| /*
 | ||||
|  * Wart: xenkbd needs to know default resolution.  Put it here until a | ||||
|  * better solution is found, but don't leak it to the backend. | ||||
|  */ | ||||
| #ifdef __KERNEL__ | ||||
| #define XENFB_WIDTH 800 | ||||
| #define XENFB_HEIGHT 600 | ||||
| #define XENFB_DEPTH 32 | ||||
| #endif | ||||
| 
 | ||||
| #endif | ||||
							
								
								
									
										116
									
								
								include/xen/interface/io/kbdif.h
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										116
									
								
								include/xen/interface/io/kbdif.h
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,116 @@ | |||
| /*
 | ||||
|  * kbdif.h -- Xen virtual keyboard/mouse | ||||
|  * | ||||
|  * Permission is hereby granted, free of charge, to any person obtaining a copy | ||||
|  * of this software and associated documentation files (the "Software"), to | ||||
|  * deal in the Software without restriction, including without limitation the | ||||
|  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or | ||||
|  * sell copies of the Software, and to permit persons to whom the Software is | ||||
|  * furnished to do so, subject to the following conditions: | ||||
|  * | ||||
|  * The above copyright notice and this permission notice shall be included in | ||||
|  * all copies or substantial portions of the Software. | ||||
|  * | ||||
|  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||
|  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||||
|  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||||
|  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||||
|  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||||
|  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||||
|  * DEALINGS IN THE SOFTWARE. | ||||
|  * | ||||
|  * Copyright (C) 2005 Anthony Liguori <aliguori@us.ibm.com> | ||||
|  * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com> | ||||
|  */ | ||||
| 
 | ||||
| #ifndef __XEN_PUBLIC_IO_KBDIF_H__ | ||||
| #define __XEN_PUBLIC_IO_KBDIF_H__ | ||||
| 
 | ||||
| /* In events (backend -> frontend) */ | ||||
| 
 | ||||
| /*
 | ||||
|  * Frontends should ignore unknown in events. | ||||
|  */ | ||||
| 
 | ||||
| /* Pointer movement event */ | ||||
| #define XENKBD_TYPE_MOTION  1 | ||||
| /* Event type 2 currently not used */ | ||||
| /* Key event (includes pointer buttons) */ | ||||
| #define XENKBD_TYPE_KEY     3 | ||||
| /*
 | ||||
|  * Pointer position event | ||||
|  * Capable backend sets feature-abs-pointer in xenstore. | ||||
|  * Frontend requests ot instead of XENKBD_TYPE_MOTION by setting | ||||
|  * request-abs-update in xenstore. | ||||
|  */ | ||||
| #define XENKBD_TYPE_POS     4 | ||||
| 
 | ||||
| struct xenkbd_motion { | ||||
| 	uint8_t type;		/* XENKBD_TYPE_MOTION */ | ||||
| 	int32_t rel_x;		/* relative X motion */ | ||||
| 	int32_t rel_y;		/* relative Y motion */ | ||||
| 	int32_t rel_z;		/* relative Z motion (wheel) */ | ||||
| }; | ||||
| 
 | ||||
| struct xenkbd_key { | ||||
| 	uint8_t type;		/* XENKBD_TYPE_KEY */ | ||||
| 	uint8_t pressed;	/* 1 if pressed; 0 otherwise */ | ||||
| 	uint32_t keycode;	/* KEY_* from linux/input.h */ | ||||
| }; | ||||
| 
 | ||||
| struct xenkbd_position { | ||||
| 	uint8_t type;		/* XENKBD_TYPE_POS */ | ||||
| 	int32_t abs_x;		/* absolute X position (in FB pixels) */ | ||||
| 	int32_t abs_y;		/* absolute Y position (in FB pixels) */ | ||||
| 	int32_t rel_z;		/* relative Z motion (wheel) */ | ||||
| }; | ||||
| 
 | ||||
| #define XENKBD_IN_EVENT_SIZE 40 | ||||
| 
 | ||||
| union xenkbd_in_event { | ||||
| 	uint8_t type; | ||||
| 	struct xenkbd_motion motion; | ||||
| 	struct xenkbd_key key; | ||||
| 	struct xenkbd_position pos; | ||||
| 	char pad[XENKBD_IN_EVENT_SIZE]; | ||||
| }; | ||||
| 
 | ||||
| /* Out events (frontend -> backend) */ | ||||
| 
 | ||||
| /*
 | ||||
|  * Out events may be sent only when requested by backend, and receipt | ||||
|  * of an unknown out event is an error. | ||||
|  * No out events currently defined. | ||||
|  */ | ||||
| 
 | ||||
| #define XENKBD_OUT_EVENT_SIZE 40 | ||||
| 
 | ||||
| union xenkbd_out_event { | ||||
| 	uint8_t type; | ||||
| 	char pad[XENKBD_OUT_EVENT_SIZE]; | ||||
| }; | ||||
| 
 | ||||
| /* shared page */ | ||||
| 
 | ||||
| #define XENKBD_IN_RING_SIZE 2048 | ||||
| #define XENKBD_IN_RING_LEN (XENKBD_IN_RING_SIZE / XENKBD_IN_EVENT_SIZE) | ||||
| #define XENKBD_IN_RING_OFFS 1024 | ||||
| #define XENKBD_IN_RING(page) \ | ||||
| 	((union xenkbd_in_event *)((char *)(page) + XENKBD_IN_RING_OFFS)) | ||||
| #define XENKBD_IN_RING_REF(page, idx) \ | ||||
| 	(XENKBD_IN_RING((page))[(idx) % XENKBD_IN_RING_LEN]) | ||||
| 
 | ||||
| #define XENKBD_OUT_RING_SIZE 1024 | ||||
| #define XENKBD_OUT_RING_LEN (XENKBD_OUT_RING_SIZE / XENKBD_OUT_EVENT_SIZE) | ||||
| #define XENKBD_OUT_RING_OFFS (XENKBD_IN_RING_OFFS + XENKBD_IN_RING_SIZE) | ||||
| #define XENKBD_OUT_RING(page) \ | ||||
| 	((union xenkbd_out_event *)((char *)(page) + XENKBD_OUT_RING_OFFS)) | ||||
| #define XENKBD_OUT_RING_REF(page, idx) \ | ||||
| 	(XENKBD_OUT_RING((page))[(idx) % XENKBD_OUT_RING_LEN]) | ||||
| 
 | ||||
| struct xenkbd_page { | ||||
| 	uint32_t in_cons, in_prod; | ||||
| 	uint32_t out_cons, out_prod; | ||||
| }; | ||||
| 
 | ||||
| #endif | ||||
							
								
								
									
										262
									
								
								include/xen/interface/io/netif.h
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										262
									
								
								include/xen/interface/io/netif.h
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,262 @@ | |||
| /******************************************************************************
 | ||||
|  * netif.h | ||||
|  * | ||||
|  * Unified network-device I/O interface for Xen guest OSes. | ||||
|  * | ||||
|  * Copyright (c) 2003-2004, Keir Fraser | ||||
|  */ | ||||
| 
 | ||||
| #ifndef __XEN_PUBLIC_IO_NETIF_H__ | ||||
| #define __XEN_PUBLIC_IO_NETIF_H__ | ||||
| 
 | ||||
| #include <xen/interface/io/ring.h> | ||||
| #include <xen/interface/grant_table.h> | ||||
| 
 | ||||
| /*
 | ||||
|  * Older implementation of Xen network frontend / backend has an | ||||
|  * implicit dependency on the MAX_SKB_FRAGS as the maximum number of | ||||
|  * ring slots a skb can use. Netfront / netback may not work as | ||||
|  * expected when frontend and backend have different MAX_SKB_FRAGS. | ||||
|  * | ||||
|  * A better approach is to add mechanism for netfront / netback to | ||||
|  * negotiate this value. However we cannot fix all possible | ||||
|  * frontends, so we need to define a value which states the minimum | ||||
|  * slots backend must support. | ||||
|  * | ||||
|  * The minimum value derives from older Linux kernel's MAX_SKB_FRAGS | ||||
|  * (18), which is proved to work with most frontends. Any new backend | ||||
|  * which doesn't negotiate with frontend should expect frontend to | ||||
|  * send a valid packet using slots up to this value. | ||||
|  */ | ||||
| #define XEN_NETIF_NR_SLOTS_MIN 18 | ||||
| 
 | ||||
| /*
 | ||||
|  * Notifications after enqueuing any type of message should be conditional on | ||||
|  * the appropriate req_event or rsp_event field in the shared ring. | ||||
|  * If the client sends notification for rx requests then it should specify | ||||
|  * feature 'feature-rx-notify' via xenbus. Otherwise the backend will assume | ||||
|  * that it cannot safely queue packets (as it may not be kicked to send them). | ||||
|  */ | ||||
| 
 | ||||
|  /*
 | ||||
|  * "feature-split-event-channels" is introduced to separate guest TX | ||||
|  * and RX notificaion. Backend either doesn't support this feature or | ||||
|  * advertise it via xenstore as 0 (disabled) or 1 (enabled). | ||||
|  * | ||||
|  * To make use of this feature, frontend should allocate two event | ||||
|  * channels for TX and RX, advertise them to backend as | ||||
|  * "event-channel-tx" and "event-channel-rx" respectively. If frontend | ||||
|  * doesn't want to use this feature, it just writes "event-channel" | ||||
|  * node as before. | ||||
|  */ | ||||
| 
 | ||||
| /*
 | ||||
|  * Multiple transmit and receive queues: | ||||
|  * If supported, the backend will write the key "multi-queue-max-queues" to | ||||
|  * the directory for that vif, and set its value to the maximum supported | ||||
|  * number of queues. | ||||
|  * Frontends that are aware of this feature and wish to use it can write the | ||||
|  * key "multi-queue-num-queues", set to the number they wish to use, which | ||||
|  * must be greater than zero, and no more than the value reported by the backend | ||||
|  * in "multi-queue-max-queues". | ||||
|  * | ||||
|  * Queues replicate the shared rings and event channels. | ||||
|  * "feature-split-event-channels" may optionally be used when using | ||||
|  * multiple queues, but is not mandatory. | ||||
|  * | ||||
|  * Each queue consists of one shared ring pair, i.e. there must be the same | ||||
|  * number of tx and rx rings. | ||||
|  * | ||||
|  * For frontends requesting just one queue, the usual event-channel and | ||||
|  * ring-ref keys are written as before, simplifying the backend processing | ||||
|  * to avoid distinguishing between a frontend that doesn't understand the | ||||
|  * multi-queue feature, and one that does, but requested only one queue. | ||||
|  * | ||||
|  * Frontends requesting two or more queues must not write the toplevel | ||||
|  * event-channel (or event-channel-{tx,rx}) and {tx,rx}-ring-ref keys, | ||||
|  * instead writing those keys under sub-keys having the name "queue-N" where | ||||
|  * N is the integer ID of the queue for which those keys belong. Queues | ||||
|  * are indexed from zero. For example, a frontend with two queues and split | ||||
|  * event channels must write the following set of queue-related keys: | ||||
|  * | ||||
|  * /local/domain/1/device/vif/0/multi-queue-num-queues = "2" | ||||
|  * /local/domain/1/device/vif/0/queue-0 = "" | ||||
|  * /local/domain/1/device/vif/0/queue-0/tx-ring-ref = "<ring-ref-tx0>" | ||||
|  * /local/domain/1/device/vif/0/queue-0/rx-ring-ref = "<ring-ref-rx0>" | ||||
|  * /local/domain/1/device/vif/0/queue-0/event-channel-tx = "<evtchn-tx0>" | ||||
|  * /local/domain/1/device/vif/0/queue-0/event-channel-rx = "<evtchn-rx0>" | ||||
|  * /local/domain/1/device/vif/0/queue-1 = "" | ||||
|  * /local/domain/1/device/vif/0/queue-1/tx-ring-ref = "<ring-ref-tx1>" | ||||
|  * /local/domain/1/device/vif/0/queue-1/rx-ring-ref = "<ring-ref-rx1" | ||||
|  * /local/domain/1/device/vif/0/queue-1/event-channel-tx = "<evtchn-tx1>" | ||||
|  * /local/domain/1/device/vif/0/queue-1/event-channel-rx = "<evtchn-rx1>" | ||||
|  * | ||||
|  * If there is any inconsistency in the XenStore data, the backend may | ||||
|  * choose not to connect any queues, instead treating the request as an | ||||
|  * error. This includes scenarios where more (or fewer) queues were | ||||
|  * requested than the frontend provided details for. | ||||
|  * | ||||
|  * Mapping of packets to queues is considered to be a function of the | ||||
|  * transmitting system (backend or frontend) and is not negotiated | ||||
|  * between the two. Guests are free to transmit packets on any queue | ||||
|  * they choose, provided it has been set up correctly. Guests must be | ||||
|  * prepared to receive packets on any queue they have requested be set up. | ||||
|  */ | ||||
| 
 | ||||
| /*
 | ||||
|  * "feature-no-csum-offload" should be used to turn IPv4 TCP/UDP checksum | ||||
|  * offload off or on. If it is missing then the feature is assumed to be on. | ||||
|  * "feature-ipv6-csum-offload" should be used to turn IPv6 TCP/UDP checksum | ||||
|  * offload on or off. If it is missing then the feature is assumed to be off. | ||||
|  */ | ||||
| 
 | ||||
| /*
 | ||||
|  * "feature-gso-tcpv4" and "feature-gso-tcpv6" advertise the capability to | ||||
|  * handle large TCP packets (in IPv4 or IPv6 form respectively). Neither | ||||
|  * frontends nor backends are assumed to be capable unless the flags are | ||||
|  * present. | ||||
|  */ | ||||
| 
 | ||||
| /*
 | ||||
|  * This is the 'wire' format for packets: | ||||
|  *  Request 1: xen_netif_tx_request  -- XEN_NETTXF_* (any flags) | ||||
|  * [Request 2: xen_netif_extra_info]    (only if request 1 has XEN_NETTXF_extra_info) | ||||
|  * [Request 3: xen_netif_extra_info]    (only if request 2 has XEN_NETIF_EXTRA_MORE) | ||||
|  *  Request 4: xen_netif_tx_request  -- XEN_NETTXF_more_data | ||||
|  *  Request 5: xen_netif_tx_request  -- XEN_NETTXF_more_data | ||||
|  *  ... | ||||
|  *  Request N: xen_netif_tx_request  -- 0 | ||||
|  */ | ||||
| 
 | ||||
| /* Protocol checksum field is blank in the packet (hardware offload)? */ | ||||
| #define _XEN_NETTXF_csum_blank		(0) | ||||
| #define  XEN_NETTXF_csum_blank		(1U<<_XEN_NETTXF_csum_blank) | ||||
| 
 | ||||
| /* Packet data has been validated against protocol checksum. */ | ||||
| #define _XEN_NETTXF_data_validated	(1) | ||||
| #define  XEN_NETTXF_data_validated	(1U<<_XEN_NETTXF_data_validated) | ||||
| 
 | ||||
| /* Packet continues in the next request descriptor. */ | ||||
| #define _XEN_NETTXF_more_data		(2) | ||||
| #define  XEN_NETTXF_more_data		(1U<<_XEN_NETTXF_more_data) | ||||
| 
 | ||||
| /* Packet to be followed by extra descriptor(s). */ | ||||
| #define _XEN_NETTXF_extra_info		(3) | ||||
| #define  XEN_NETTXF_extra_info		(1U<<_XEN_NETTXF_extra_info) | ||||
| 
 | ||||
| #define XEN_NETIF_MAX_TX_SIZE 0xFFFF | ||||
| struct xen_netif_tx_request { | ||||
|     grant_ref_t gref;      /* Reference to buffer page */ | ||||
|     uint16_t offset;       /* Offset within buffer page */ | ||||
|     uint16_t flags;        /* XEN_NETTXF_* */ | ||||
|     uint16_t id;           /* Echoed in response message. */ | ||||
|     uint16_t size;         /* Packet size in bytes.       */ | ||||
| }; | ||||
| 
 | ||||
| /* Types of xen_netif_extra_info descriptors. */ | ||||
| #define XEN_NETIF_EXTRA_TYPE_NONE	(0)  /* Never used - invalid */ | ||||
| #define XEN_NETIF_EXTRA_TYPE_GSO	(1)  /* u.gso */ | ||||
| #define XEN_NETIF_EXTRA_TYPE_MAX	(2) | ||||
| 
 | ||||
| /* xen_netif_extra_info flags. */ | ||||
| #define _XEN_NETIF_EXTRA_FLAG_MORE	(0) | ||||
| #define  XEN_NETIF_EXTRA_FLAG_MORE	(1U<<_XEN_NETIF_EXTRA_FLAG_MORE) | ||||
| 
 | ||||
| /* GSO types */ | ||||
| #define XEN_NETIF_GSO_TYPE_NONE		(0) | ||||
| #define XEN_NETIF_GSO_TYPE_TCPV4	(1) | ||||
| #define XEN_NETIF_GSO_TYPE_TCPV6	(2) | ||||
| 
 | ||||
| /*
 | ||||
|  * This structure needs to fit within both netif_tx_request and | ||||
|  * netif_rx_response for compatibility. | ||||
|  */ | ||||
| struct xen_netif_extra_info { | ||||
| 	uint8_t type;  /* XEN_NETIF_EXTRA_TYPE_* */ | ||||
| 	uint8_t flags; /* XEN_NETIF_EXTRA_FLAG_* */ | ||||
| 
 | ||||
| 	union { | ||||
| 		struct { | ||||
| 			/*
 | ||||
| 			 * Maximum payload size of each segment. For | ||||
| 			 * example, for TCP this is just the path MSS. | ||||
| 			 */ | ||||
| 			uint16_t size; | ||||
| 
 | ||||
| 			/*
 | ||||
| 			 * GSO type. This determines the protocol of | ||||
| 			 * the packet and any extra features required | ||||
| 			 * to segment the packet properly. | ||||
| 			 */ | ||||
| 			uint8_t type; /* XEN_NETIF_GSO_TYPE_* */ | ||||
| 
 | ||||
| 			/* Future expansion. */ | ||||
| 			uint8_t pad; | ||||
| 
 | ||||
| 			/*
 | ||||
| 			 * GSO features. This specifies any extra GSO | ||||
| 			 * features required to process this packet, | ||||
| 			 * such as ECN support for TCPv4. | ||||
| 			 */ | ||||
| 			uint16_t features; /* XEN_NETIF_GSO_FEAT_* */ | ||||
| 		} gso; | ||||
| 
 | ||||
| 		uint16_t pad[3]; | ||||
| 	} u; | ||||
| }; | ||||
| 
 | ||||
| struct xen_netif_tx_response { | ||||
| 	uint16_t id; | ||||
| 	int16_t  status;       /* XEN_NETIF_RSP_* */ | ||||
| }; | ||||
| 
 | ||||
| struct xen_netif_rx_request { | ||||
| 	uint16_t    id;        /* Echoed in response message.        */ | ||||
| 	grant_ref_t gref;      /* Reference to incoming granted frame */ | ||||
| }; | ||||
| 
 | ||||
| /* Packet data has been validated against protocol checksum. */ | ||||
| #define _XEN_NETRXF_data_validated	(0) | ||||
| #define  XEN_NETRXF_data_validated	(1U<<_XEN_NETRXF_data_validated) | ||||
| 
 | ||||
| /* Protocol checksum field is blank in the packet (hardware offload)? */ | ||||
| #define _XEN_NETRXF_csum_blank		(1) | ||||
| #define  XEN_NETRXF_csum_blank		(1U<<_XEN_NETRXF_csum_blank) | ||||
| 
 | ||||
| /* Packet continues in the next request descriptor. */ | ||||
| #define _XEN_NETRXF_more_data		(2) | ||||
| #define  XEN_NETRXF_more_data		(1U<<_XEN_NETRXF_more_data) | ||||
| 
 | ||||
| /* Packet to be followed by extra descriptor(s). */ | ||||
| #define _XEN_NETRXF_extra_info		(3) | ||||
| #define  XEN_NETRXF_extra_info		(1U<<_XEN_NETRXF_extra_info) | ||||
| 
 | ||||
| /* GSO Prefix descriptor. */ | ||||
| #define _XEN_NETRXF_gso_prefix		(4) | ||||
| #define  XEN_NETRXF_gso_prefix		(1U<<_XEN_NETRXF_gso_prefix) | ||||
| 
 | ||||
| struct xen_netif_rx_response { | ||||
|     uint16_t id; | ||||
|     uint16_t offset;       /* Offset in page of start of received packet  */ | ||||
|     uint16_t flags;        /* XEN_NETRXF_* */ | ||||
|     int16_t  status;       /* -ve: BLKIF_RSP_* ; +ve: Rx'ed pkt size. */ | ||||
| }; | ||||
| 
 | ||||
| /*
 | ||||
|  * Generate netif ring structures and types. | ||||
|  */ | ||||
| 
 | ||||
| DEFINE_RING_TYPES(xen_netif_tx, | ||||
| 		  struct xen_netif_tx_request, | ||||
| 		  struct xen_netif_tx_response); | ||||
| DEFINE_RING_TYPES(xen_netif_rx, | ||||
| 		  struct xen_netif_rx_request, | ||||
| 		  struct xen_netif_rx_response); | ||||
| 
 | ||||
| #define XEN_NETIF_RSP_DROPPED	-2 | ||||
| #define XEN_NETIF_RSP_ERROR	-1 | ||||
| #define XEN_NETIF_RSP_OKAY	 0 | ||||
| /* No response: used for auxiliary requests (e.g., xen_netif_extra_info). */ | ||||
| #define XEN_NETIF_RSP_NULL	 1 | ||||
| 
 | ||||
| #endif | ||||
							
								
								
									
										112
									
								
								include/xen/interface/io/pciif.h
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										112
									
								
								include/xen/interface/io/pciif.h
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,112 @@ | |||
| /*
 | ||||
|  * PCI Backend/Frontend Common Data Structures & Macros | ||||
|  * | ||||
|  * Permission is hereby granted, free of charge, to any person obtaining a copy | ||||
|  * of this software and associated documentation files (the "Software"), to | ||||
|  * deal in the Software without restriction, including without limitation the | ||||
|  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or | ||||
|  * sell copies of the Software, and to permit persons to whom the Software is | ||||
|  * furnished to do so, subject to the following conditions: | ||||
|  * | ||||
|  * The above copyright notice and this permission notice shall be included in | ||||
|  * all copies or substantial portions of the Software. | ||||
|  * | ||||
|  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||
|  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||||
|  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||||
|  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||||
|  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||||
|  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||||
|  * DEALINGS IN THE SOFTWARE. | ||||
|  * | ||||
|  *   Author: Ryan Wilson <hap9@epoch.ncsc.mil> | ||||
|  */ | ||||
| #ifndef __XEN_PCI_COMMON_H__ | ||||
| #define __XEN_PCI_COMMON_H__ | ||||
| 
 | ||||
| /* Be sure to bump this number if you change this file */ | ||||
| #define XEN_PCI_MAGIC "7" | ||||
| 
 | ||||
| /* xen_pci_sharedinfo flags */ | ||||
| #define	_XEN_PCIF_active		(0) | ||||
| #define	XEN_PCIF_active			(1<<_XEN_PCIF_active) | ||||
| #define	_XEN_PCIB_AERHANDLER		(1) | ||||
| #define	XEN_PCIB_AERHANDLER		(1<<_XEN_PCIB_AERHANDLER) | ||||
| #define	_XEN_PCIB_active		(2) | ||||
| #define	XEN_PCIB_active			(1<<_XEN_PCIB_active) | ||||
| 
 | ||||
| /* xen_pci_op commands */ | ||||
| #define	XEN_PCI_OP_conf_read		(0) | ||||
| #define	XEN_PCI_OP_conf_write		(1) | ||||
| #define	XEN_PCI_OP_enable_msi		(2) | ||||
| #define	XEN_PCI_OP_disable_msi		(3) | ||||
| #define	XEN_PCI_OP_enable_msix		(4) | ||||
| #define	XEN_PCI_OP_disable_msix		(5) | ||||
| #define	XEN_PCI_OP_aer_detected		(6) | ||||
| #define	XEN_PCI_OP_aer_resume		(7) | ||||
| #define	XEN_PCI_OP_aer_mmio		(8) | ||||
| #define	XEN_PCI_OP_aer_slotreset	(9) | ||||
| 
 | ||||
| /* xen_pci_op error numbers */ | ||||
| #define	XEN_PCI_ERR_success		(0) | ||||
| #define	XEN_PCI_ERR_dev_not_found	(-1) | ||||
| #define	XEN_PCI_ERR_invalid_offset	(-2) | ||||
| #define	XEN_PCI_ERR_access_denied	(-3) | ||||
| #define	XEN_PCI_ERR_not_implemented	(-4) | ||||
| /* XEN_PCI_ERR_op_failed - backend failed to complete the operation */ | ||||
| #define XEN_PCI_ERR_op_failed		(-5) | ||||
| 
 | ||||
| /*
 | ||||
|  * it should be PAGE_SIZE-sizeof(struct xen_pci_op))/sizeof(struct msix_entry)) | ||||
|  * Should not exceed 128 | ||||
|  */ | ||||
| #define SH_INFO_MAX_VEC			128 | ||||
| 
 | ||||
| struct xen_msix_entry { | ||||
| 	uint16_t vector; | ||||
| 	uint16_t entry; | ||||
| }; | ||||
| struct xen_pci_op { | ||||
| 	/* IN: what action to perform: XEN_PCI_OP_* */ | ||||
| 	uint32_t cmd; | ||||
| 
 | ||||
| 	/* OUT: will contain an error number (if any) from errno.h */ | ||||
| 	int32_t err; | ||||
| 
 | ||||
| 	/* IN: which device to touch */ | ||||
| 	uint32_t domain; /* PCI Domain/Segment */ | ||||
| 	uint32_t bus; | ||||
| 	uint32_t devfn; | ||||
| 
 | ||||
| 	/* IN: which configuration registers to touch */ | ||||
| 	int32_t offset; | ||||
| 	int32_t size; | ||||
| 
 | ||||
| 	/* IN/OUT: Contains the result after a READ or the value to WRITE */ | ||||
| 	uint32_t value; | ||||
| 	/* IN: Contains extra infor for this operation */ | ||||
| 	uint32_t info; | ||||
| 	/*IN:  param for msi-x */ | ||||
| 	struct xen_msix_entry msix_entries[SH_INFO_MAX_VEC]; | ||||
| }; | ||||
| 
 | ||||
| /*used for pcie aer handling*/ | ||||
| struct xen_pcie_aer_op { | ||||
| 	/* IN: what action to perform: XEN_PCI_OP_* */ | ||||
| 	uint32_t cmd; | ||||
| 	/*IN/OUT: return aer_op result or carry error_detected state as input*/ | ||||
| 	int32_t err; | ||||
| 
 | ||||
| 	/* IN: which device to touch */ | ||||
| 	uint32_t domain; /* PCI Domain/Segment*/ | ||||
| 	uint32_t bus; | ||||
| 	uint32_t devfn; | ||||
| }; | ||||
| struct xen_pci_sharedinfo { | ||||
| 	/* flags - XEN_PCIF_* */ | ||||
| 	uint32_t flags; | ||||
| 	struct xen_pci_op op; | ||||
| 	struct xen_pcie_aer_op aer_op; | ||||
| }; | ||||
| 
 | ||||
| #endif /* __XEN_PCI_COMMON_H__ */ | ||||
							
								
								
									
										21
									
								
								include/xen/interface/io/protocols.h
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										21
									
								
								include/xen/interface/io/protocols.h
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,21 @@ | |||
| #ifndef __XEN_PROTOCOLS_H__ | ||||
| #define __XEN_PROTOCOLS_H__ | ||||
| 
 | ||||
| #define XEN_IO_PROTO_ABI_X86_32     "x86_32-abi" | ||||
| #define XEN_IO_PROTO_ABI_X86_64     "x86_64-abi" | ||||
| #define XEN_IO_PROTO_ABI_POWERPC64  "powerpc64-abi" | ||||
| #define XEN_IO_PROTO_ABI_ARM        "arm-abi" | ||||
| 
 | ||||
| #if defined(__i386__) | ||||
| # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_32 | ||||
| #elif defined(__x86_64__) | ||||
| # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_64 | ||||
| #elif defined(__powerpc64__) | ||||
| # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_POWERPC64 | ||||
| #elif defined(__arm__) || defined(__aarch64__) | ||||
| # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_ARM | ||||
| #else | ||||
| # error arch fixup needed here | ||||
| #endif | ||||
| 
 | ||||
| #endif | ||||
							
								
								
									
										272
									
								
								include/xen/interface/io/ring.h
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										272
									
								
								include/xen/interface/io/ring.h
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,272 @@ | |||
| /******************************************************************************
 | ||||
|  * ring.h | ||||
|  * | ||||
|  * Shared producer-consumer ring macros. | ||||
|  * | ||||
|  * Tim Deegan and Andrew Warfield November 2004. | ||||
|  */ | ||||
| 
 | ||||
| #ifndef __XEN_PUBLIC_IO_RING_H__ | ||||
| #define __XEN_PUBLIC_IO_RING_H__ | ||||
| 
 | ||||
| typedef unsigned int RING_IDX; | ||||
| 
 | ||||
| /* Round a 32-bit unsigned constant down to the nearest power of two. */ | ||||
| #define __RD2(_x)  (((_x) & 0x00000002) ? 0x2		       : ((_x) & 0x1)) | ||||
| #define __RD4(_x)  (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2    : __RD2(_x)) | ||||
| #define __RD8(_x)  (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4    : __RD4(_x)) | ||||
| #define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8    : __RD8(_x)) | ||||
| #define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x)) | ||||
| 
 | ||||
| /*
 | ||||
|  * Calculate size of a shared ring, given the total available space for the | ||||
|  * ring and indexes (_sz), and the name tag of the request/response structure. | ||||
|  * A ring contains as many entries as will fit, rounded down to the nearest | ||||
|  * power of two (so we can mask with (size-1) to loop around). | ||||
|  */ | ||||
| #define __CONST_RING_SIZE(_s, _sz)				\ | ||||
| 	(__RD32(((_sz) - offsetof(struct _s##_sring, ring)) /	\ | ||||
| 		sizeof(((struct _s##_sring *)0)->ring[0]))) | ||||
| 
 | ||||
| /*
 | ||||
|  * The same for passing in an actual pointer instead of a name tag. | ||||
|  */ | ||||
| #define __RING_SIZE(_s, _sz)						\ | ||||
| 	(__RD32(((_sz) - (long)&(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0]))) | ||||
| 
 | ||||
| /*
 | ||||
|  * Macros to make the correct C datatypes for a new kind of ring. | ||||
|  * | ||||
|  * To make a new ring datatype, you need to have two message structures, | ||||
|  * let's say struct request, and struct response already defined. | ||||
|  * | ||||
|  * In a header where you want the ring datatype declared, you then do: | ||||
|  * | ||||
|  *     DEFINE_RING_TYPES(mytag, struct request, struct response); | ||||
|  * | ||||
|  * These expand out to give you a set of types, as you can see below. | ||||
|  * The most important of these are: | ||||
|  * | ||||
|  *     struct mytag_sring      - The shared ring. | ||||
|  *     struct mytag_front_ring - The 'front' half of the ring. | ||||
|  *     struct mytag_back_ring  - The 'back' half of the ring. | ||||
|  * | ||||
|  * To initialize a ring in your code you need to know the location and size | ||||
|  * of the shared memory area (PAGE_SIZE, for instance). To initialise | ||||
|  * the front half: | ||||
|  * | ||||
|  *     struct mytag_front_ring front_ring; | ||||
|  *     SHARED_RING_INIT((struct mytag_sring *)shared_page); | ||||
|  *     FRONT_RING_INIT(&front_ring, (struct mytag_sring *)shared_page, | ||||
|  *		       PAGE_SIZE); | ||||
|  * | ||||
|  * Initializing the back follows similarly (note that only the front | ||||
|  * initializes the shared ring): | ||||
|  * | ||||
|  *     struct mytag_back_ring back_ring; | ||||
|  *     BACK_RING_INIT(&back_ring, (struct mytag_sring *)shared_page, | ||||
|  *		      PAGE_SIZE); | ||||
|  */ | ||||
| 
 | ||||
| #define DEFINE_RING_TYPES(__name, __req_t, __rsp_t)			\ | ||||
| 									\ | ||||
| /* Shared ring entry */							\ | ||||
| union __name##_sring_entry {						\ | ||||
|     __req_t req;							\ | ||||
|     __rsp_t rsp;							\ | ||||
| };									\ | ||||
| 									\ | ||||
| /* Shared ring page */							\ | ||||
| struct __name##_sring {							\ | ||||
|     RING_IDX req_prod, req_event;					\ | ||||
|     RING_IDX rsp_prod, rsp_event;					\ | ||||
|     uint8_t  pad[48];							\ | ||||
|     union __name##_sring_entry ring[1]; /* variable-length */		\ | ||||
| };									\ | ||||
| 									\ | ||||
| /* "Front" end's private variables */					\ | ||||
| struct __name##_front_ring {						\ | ||||
|     RING_IDX req_prod_pvt;						\ | ||||
|     RING_IDX rsp_cons;							\ | ||||
|     unsigned int nr_ents;						\ | ||||
|     struct __name##_sring *sring;					\ | ||||
| };									\ | ||||
| 									\ | ||||
| /* "Back" end's private variables */					\ | ||||
| struct __name##_back_ring {						\ | ||||
|     RING_IDX rsp_prod_pvt;						\ | ||||
|     RING_IDX req_cons;							\ | ||||
|     unsigned int nr_ents;						\ | ||||
|     struct __name##_sring *sring;					\ | ||||
| }; | ||||
| 
 | ||||
| /*
 | ||||
|  * Macros for manipulating rings. | ||||
|  * | ||||
|  * FRONT_RING_whatever works on the "front end" of a ring: here | ||||
|  * requests are pushed on to the ring and responses taken off it. | ||||
|  * | ||||
|  * BACK_RING_whatever works on the "back end" of a ring: here | ||||
|  * requests are taken off the ring and responses put on. | ||||
|  * | ||||
|  * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL. | ||||
|  * This is OK in 1-for-1 request-response situations where the | ||||
|  * requestor (front end) never has more than RING_SIZE()-1 | ||||
|  * outstanding requests. | ||||
|  */ | ||||
| 
 | ||||
| /* Initialising empty rings */ | ||||
| #define SHARED_RING_INIT(_s) do {					\ | ||||
|     (_s)->req_prod  = (_s)->rsp_prod  = 0;				\ | ||||
|     (_s)->req_event = (_s)->rsp_event = 1;				\ | ||||
|     memset((_s)->pad, 0, sizeof((_s)->pad));				\ | ||||
| } while(0) | ||||
| 
 | ||||
| #define FRONT_RING_INIT(_r, _s, __size) do {				\ | ||||
|     (_r)->req_prod_pvt = 0;						\ | ||||
|     (_r)->rsp_cons = 0;							\ | ||||
|     (_r)->nr_ents = __RING_SIZE(_s, __size);				\ | ||||
|     (_r)->sring = (_s);							\ | ||||
| } while (0) | ||||
| 
 | ||||
| #define BACK_RING_INIT(_r, _s, __size) do {				\ | ||||
|     (_r)->rsp_prod_pvt = 0;						\ | ||||
|     (_r)->req_cons = 0;							\ | ||||
|     (_r)->nr_ents = __RING_SIZE(_s, __size);				\ | ||||
|     (_r)->sring = (_s);							\ | ||||
| } while (0) | ||||
| 
 | ||||
| /* Initialize to existing shared indexes -- for recovery */ | ||||
| #define FRONT_RING_ATTACH(_r, _s, __size) do {				\ | ||||
|     (_r)->sring = (_s);							\ | ||||
|     (_r)->req_prod_pvt = (_s)->req_prod;				\ | ||||
|     (_r)->rsp_cons = (_s)->rsp_prod;					\ | ||||
|     (_r)->nr_ents = __RING_SIZE(_s, __size);				\ | ||||
| } while (0) | ||||
| 
 | ||||
| #define BACK_RING_ATTACH(_r, _s, __size) do {				\ | ||||
|     (_r)->sring = (_s);							\ | ||||
|     (_r)->rsp_prod_pvt = (_s)->rsp_prod;				\ | ||||
|     (_r)->req_cons = (_s)->req_prod;					\ | ||||
|     (_r)->nr_ents = __RING_SIZE(_s, __size);				\ | ||||
| } while (0) | ||||
| 
 | ||||
| /* How big is this ring? */ | ||||
| #define RING_SIZE(_r)							\ | ||||
|     ((_r)->nr_ents) | ||||
| 
 | ||||
| /* Number of free requests (for use on front side only). */ | ||||
| #define RING_FREE_REQUESTS(_r)						\ | ||||
|     (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons)) | ||||
| 
 | ||||
| /* Test if there is an empty slot available on the front ring.
 | ||||
|  * (This is only meaningful from the front. ) | ||||
|  */ | ||||
| #define RING_FULL(_r)							\ | ||||
|     (RING_FREE_REQUESTS(_r) == 0) | ||||
| 
 | ||||
| /* Test if there are outstanding messages to be processed on a ring. */ | ||||
| #define RING_HAS_UNCONSUMED_RESPONSES(_r)				\ | ||||
|     ((_r)->sring->rsp_prod - (_r)->rsp_cons) | ||||
| 
 | ||||
| #define RING_HAS_UNCONSUMED_REQUESTS(_r)				\ | ||||
|     ({									\ | ||||
| 	unsigned int req = (_r)->sring->req_prod - (_r)->req_cons;	\ | ||||
| 	unsigned int rsp = RING_SIZE(_r) -				\ | ||||
| 			   ((_r)->req_cons - (_r)->rsp_prod_pvt);	\ | ||||
| 	req < rsp ? req : rsp;						\ | ||||
|     }) | ||||
| 
 | ||||
| /* Direct access to individual ring elements, by index. */ | ||||
| #define RING_GET_REQUEST(_r, _idx)					\ | ||||
|     (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req)) | ||||
| 
 | ||||
| #define RING_GET_RESPONSE(_r, _idx)					\ | ||||
|     (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp)) | ||||
| 
 | ||||
| /* Loop termination condition: Would the specified index overflow the ring? */ | ||||
| #define RING_REQUEST_CONS_OVERFLOW(_r, _cons)				\ | ||||
|     (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r)) | ||||
| 
 | ||||
| /* Ill-behaved frontend determination: Can there be this many requests? */ | ||||
| #define RING_REQUEST_PROD_OVERFLOW(_r, _prod)               \ | ||||
|     (((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r)) | ||||
| 
 | ||||
| 
 | ||||
| #define RING_PUSH_REQUESTS(_r) do {					\ | ||||
|     wmb(); /* back sees requests /before/ updated producer index */	\ | ||||
|     (_r)->sring->req_prod = (_r)->req_prod_pvt;				\ | ||||
| } while (0) | ||||
| 
 | ||||
| #define RING_PUSH_RESPONSES(_r) do {					\ | ||||
|     wmb(); /* front sees responses /before/ updated producer index */	\ | ||||
|     (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt;				\ | ||||
| } while (0) | ||||
| 
 | ||||
| /*
 | ||||
|  * Notification hold-off (req_event and rsp_event): | ||||
|  * | ||||
|  * When queueing requests or responses on a shared ring, it may not always be | ||||
|  * necessary to notify the remote end. For example, if requests are in flight | ||||
|  * in a backend, the front may be able to queue further requests without | ||||
|  * notifying the back (if the back checks for new requests when it queues | ||||
|  * responses). | ||||
|  * | ||||
|  * When enqueuing requests or responses: | ||||
|  * | ||||
|  *  Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument | ||||
|  *  is a boolean return value. True indicates that the receiver requires an | ||||
|  *  asynchronous notification. | ||||
|  * | ||||
|  * After dequeuing requests or responses (before sleeping the connection): | ||||
|  * | ||||
|  *  Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES(). | ||||
|  *  The second argument is a boolean return value. True indicates that there | ||||
|  *  are pending messages on the ring (i.e., the connection should not be put | ||||
|  *  to sleep). | ||||
|  * | ||||
|  *  These macros will set the req_event/rsp_event field to trigger a | ||||
|  *  notification on the very next message that is enqueued. If you want to | ||||
|  *  create batches of work (i.e., only receive a notification after several | ||||
|  *  messages have been enqueued) then you will need to create a customised | ||||
|  *  version of the FINAL_CHECK macro in your own code, which sets the event | ||||
|  *  field appropriately. | ||||
|  */ | ||||
| 
 | ||||
| #define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do {		\ | ||||
|     RING_IDX __old = (_r)->sring->req_prod;				\ | ||||
|     RING_IDX __new = (_r)->req_prod_pvt;				\ | ||||
|     wmb(); /* back sees requests /before/ updated producer index */	\ | ||||
|     (_r)->sring->req_prod = __new;					\ | ||||
|     mb(); /* back sees new requests /before/ we check req_event */	\ | ||||
|     (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) <		\ | ||||
| 		 (RING_IDX)(__new - __old));				\ | ||||
| } while (0) | ||||
| 
 | ||||
| #define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do {		\ | ||||
|     RING_IDX __old = (_r)->sring->rsp_prod;				\ | ||||
|     RING_IDX __new = (_r)->rsp_prod_pvt;				\ | ||||
|     wmb(); /* front sees responses /before/ updated producer index */	\ | ||||
|     (_r)->sring->rsp_prod = __new;					\ | ||||
|     mb(); /* front sees new responses /before/ we check rsp_event */	\ | ||||
|     (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) <		\ | ||||
| 		 (RING_IDX)(__new - __old));				\ | ||||
| } while (0) | ||||
| 
 | ||||
| #define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do {		\ | ||||
|     (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r);			\ | ||||
|     if (_work_to_do) break;						\ | ||||
|     (_r)->sring->req_event = (_r)->req_cons + 1;			\ | ||||
|     mb();								\ | ||||
|     (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r);			\ | ||||
| } while (0) | ||||
| 
 | ||||
| #define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do {		\ | ||||
|     (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r);			\ | ||||
|     if (_work_to_do) break;						\ | ||||
|     (_r)->sring->rsp_event = (_r)->rsp_cons + 1;			\ | ||||
|     mb();								\ | ||||
|     (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r);			\ | ||||
| } while (0) | ||||
| 
 | ||||
| #endif /* __XEN_PUBLIC_IO_RING_H__ */ | ||||
							
								
								
									
										52
									
								
								include/xen/interface/io/tpmif.h
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										52
									
								
								include/xen/interface/io/tpmif.h
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,52 @@ | |||
| /******************************************************************************
 | ||||
|  * tpmif.h | ||||
|  * | ||||
|  * TPM I/O interface for Xen guest OSes, v2 | ||||
|  * | ||||
|  * This file is in the public domain. | ||||
|  * | ||||
|  */ | ||||
| 
 | ||||
| #ifndef __XEN_PUBLIC_IO_TPMIF_H__ | ||||
| #define __XEN_PUBLIC_IO_TPMIF_H__ | ||||
| 
 | ||||
| /*
 | ||||
|  * Xenbus state machine | ||||
|  * | ||||
|  * Device open: | ||||
|  *   1. Both ends start in XenbusStateInitialising | ||||
|  *   2. Backend transitions to InitWait (frontend does not wait on this step) | ||||
|  *   3. Frontend populates ring-ref, event-channel, feature-protocol-v2 | ||||
|  *   4. Frontend transitions to Initialised | ||||
|  *   5. Backend maps grant and event channel, verifies feature-protocol-v2 | ||||
|  *   6. Backend transitions to Connected | ||||
|  *   7. Frontend verifies feature-protocol-v2, transitions to Connected | ||||
|  * | ||||
|  * Device close: | ||||
|  *   1. State is changed to XenbusStateClosing | ||||
|  *   2. Frontend transitions to Closed | ||||
|  *   3. Backend unmaps grant and event, changes state to InitWait | ||||
|  */ | ||||
| 
 | ||||
| enum vtpm_shared_page_state { | ||||
| 	VTPM_STATE_IDLE,         /* no contents / vTPM idle / cancel complete */ | ||||
| 	VTPM_STATE_SUBMIT,       /* request ready / vTPM working */ | ||||
| 	VTPM_STATE_FINISH,       /* response ready / vTPM idle */ | ||||
| 	VTPM_STATE_CANCEL,       /* cancel requested / vTPM working */ | ||||
| }; | ||||
| /* The backend should only change state to IDLE or FINISH, while the
 | ||||
|  * frontend should only change to SUBMIT or CANCEL. */ | ||||
| 
 | ||||
| 
 | ||||
| struct vtpm_shared_page { | ||||
| 	uint32_t length;         /* request/response length in bytes */ | ||||
| 
 | ||||
| 	uint8_t state;           /* enum vtpm_shared_page_state */ | ||||
| 	uint8_t locality;        /* for the current request */ | ||||
| 	uint8_t pad; | ||||
| 
 | ||||
| 	uint8_t nr_extra_pages;  /* extra pages for long packets; may be zero */ | ||||
| 	uint32_t extra_pages[0]; /* grant IDs; length in nr_extra_pages */ | ||||
| }; | ||||
| 
 | ||||
| #endif | ||||
							
								
								
									
										229
									
								
								include/xen/interface/io/vscsiif.h
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										229
									
								
								include/xen/interface/io/vscsiif.h
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,229 @@ | |||
| /******************************************************************************
 | ||||
|  * vscsiif.h | ||||
|  * | ||||
|  * Based on the blkif.h code. | ||||
|  * | ||||
|  * Permission is hereby granted, free of charge, to any person obtaining a copy | ||||
|  * of this software and associated documentation files (the "Software"), to | ||||
|  * deal in the Software without restriction, including without limitation the | ||||
|  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or | ||||
|  * sell copies of the Software, and to permit persons to whom the Software is | ||||
|  * furnished to do so, subject to the following conditions: | ||||
|  * | ||||
|  * The above copyright notice and this permission notice shall be included in | ||||
|  * all copies or substantial portions of the Software. | ||||
|  * | ||||
|  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||
|  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||||
|  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||||
|  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||||
|  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||||
|  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||||
|  * DEALINGS IN THE SOFTWARE. | ||||
|  * | ||||
|  * Copyright(c) FUJITSU Limited 2008. | ||||
|  */ | ||||
| 
 | ||||
| #ifndef __XEN__PUBLIC_IO_SCSI_H__ | ||||
| #define __XEN__PUBLIC_IO_SCSI_H__ | ||||
| 
 | ||||
| #include "ring.h" | ||||
| #include "../grant_table.h" | ||||
| 
 | ||||
| /*
 | ||||
|  * Feature and Parameter Negotiation | ||||
|  * ================================= | ||||
|  * The two halves of a Xen pvSCSI driver utilize nodes within the XenStore to | ||||
|  * communicate capabilities and to negotiate operating parameters.  This | ||||
|  * section enumerates these nodes which reside in the respective front and | ||||
|  * backend portions of the XenStore, following the XenBus convention. | ||||
|  * | ||||
|  * Any specified default value is in effect if the corresponding XenBus node | ||||
|  * is not present in the XenStore. | ||||
|  * | ||||
|  * XenStore nodes in sections marked "PRIVATE" are solely for use by the | ||||
|  * driver side whose XenBus tree contains them. | ||||
|  * | ||||
|  ***************************************************************************** | ||||
|  *                            Backend XenBus Nodes | ||||
|  ***************************************************************************** | ||||
|  * | ||||
|  *------------------ Backend Device Identification (PRIVATE) ------------------ | ||||
|  * | ||||
|  * p-devname | ||||
|  *      Values:         string | ||||
|  * | ||||
|  *      A free string used to identify the physical device (e.g. a disk name). | ||||
|  * | ||||
|  * p-dev | ||||
|  *      Values:         string | ||||
|  * | ||||
|  *      A string specifying the backend device: either a 4-tuple "h:c:t:l" | ||||
|  *      (host, controller, target, lun, all integers), or a WWN (e.g. | ||||
|  *      "naa.60014054ac780582"). | ||||
|  * | ||||
|  * v-dev | ||||
|  *      Values:         string | ||||
|  * | ||||
|  *      A string specifying the frontend device in form of a 4-tuple "h:c:t:l" | ||||
|  *      (host, controller, target, lun, all integers). | ||||
|  * | ||||
|  *--------------------------------- Features --------------------------------- | ||||
|  * | ||||
|  * feature-sg-grant | ||||
|  *      Values:         unsigned [VSCSIIF_SG_TABLESIZE...65535] | ||||
|  *      Default Value:  0 | ||||
|  * | ||||
|  *      Specifies the maximum number of scatter/gather elements in grant pages | ||||
|  *      supported. If not set, the backend supports up to VSCSIIF_SG_TABLESIZE | ||||
|  *      SG elements specified directly in the request. | ||||
|  * | ||||
|  ***************************************************************************** | ||||
|  *                            Frontend XenBus Nodes | ||||
|  ***************************************************************************** | ||||
|  * | ||||
|  *----------------------- Request Transport Parameters ----------------------- | ||||
|  * | ||||
|  * event-channel | ||||
|  *      Values:         unsigned | ||||
|  * | ||||
|  *      The identifier of the Xen event channel used to signal activity | ||||
|  *      in the ring buffer. | ||||
|  * | ||||
|  * ring-ref | ||||
|  *      Values:         unsigned | ||||
|  * | ||||
|  *      The Xen grant reference granting permission for the backend to map | ||||
|  *      the sole page in a single page sized ring buffer. | ||||
|  * | ||||
|  * protocol | ||||
|  *      Values:         string (XEN_IO_PROTO_ABI_*) | ||||
|  *      Default Value:  XEN_IO_PROTO_ABI_NATIVE | ||||
|  * | ||||
|  *      The machine ABI rules governing the format of all ring request and | ||||
|  *      response structures. | ||||
|  */ | ||||
| 
 | ||||
| /* Requests from the frontend to the backend */ | ||||
| 
 | ||||
| /*
 | ||||
|  * Request a SCSI operation specified via a CDB in vscsiif_request.cmnd. | ||||
|  * The target is specified via channel, id and lun. | ||||
|  * | ||||
|  * The operation to be performed is specified via a CDB in cmnd[], the length | ||||
|  * of the CDB is in cmd_len. sc_data_direction specifies the direction of data | ||||
|  * (to the device, from the device, or none at all). | ||||
|  * | ||||
|  * If data is to be transferred to or from the device the buffer(s) in the | ||||
|  * guest memory is/are specified via one or multiple scsiif_request_segment | ||||
|  * descriptors each specifying a memory page via a grant_ref_t, a offset into | ||||
|  * the page and the length of the area in that page. All scsiif_request_segment | ||||
|  * areas concatenated form the resulting data buffer used by the operation. | ||||
|  * If the number of scsiif_request_segment areas is not too large (less than | ||||
|  * or equal VSCSIIF_SG_TABLESIZE) the areas can be specified directly in the | ||||
|  * seg[] array and the number of valid scsiif_request_segment elements is to be | ||||
|  * set in nr_segments. | ||||
|  * | ||||
|  * If "feature-sg-grant" in the Xenstore is set it is possible to specify more | ||||
|  * than VSCSIIF_SG_TABLESIZE scsiif_request_segment elements via indirection. | ||||
|  * The maximum number of allowed scsiif_request_segment elements is the value | ||||
|  * of the "feature-sg-grant" entry from Xenstore. When using indirection the | ||||
|  * seg[] array doesn't contain specifications of the data buffers, but | ||||
|  * references to scsiif_request_segment arrays, which in turn reference the | ||||
|  * data buffers. While nr_segments holds the number of populated seg[] entries | ||||
|  * (plus the set VSCSIIF_SG_GRANT bit), the number of scsiif_request_segment | ||||
|  * elements referencing the target data buffers is calculated from the lengths | ||||
|  * of the seg[] elements (the sum of all valid seg[].length divided by the | ||||
|  * size of one scsiif_request_segment structure). | ||||
|  */ | ||||
| #define VSCSIIF_ACT_SCSI_CDB		1 | ||||
| 
 | ||||
| /*
 | ||||
|  * Request abort of a running operation for the specified target given by | ||||
|  * channel, id, lun and the operation's rqid in ref_rqid. | ||||
|  */ | ||||
| #define VSCSIIF_ACT_SCSI_ABORT		2 | ||||
| 
 | ||||
| /*
 | ||||
|  * Request a device reset of the specified target (channel and id). | ||||
|  */ | ||||
| #define VSCSIIF_ACT_SCSI_RESET		3 | ||||
| 
 | ||||
| /*
 | ||||
|  * Preset scatter/gather elements for a following request. Deprecated. | ||||
|  * Keeping the define only to avoid usage of the value "4" for other actions. | ||||
|  */ | ||||
| #define VSCSIIF_ACT_SCSI_SG_PRESET	4 | ||||
| 
 | ||||
| /*
 | ||||
|  * Maximum scatter/gather segments per request. | ||||
|  * | ||||
|  * Considering balance between allocating at least 16 "vscsiif_request" | ||||
|  * structures on one page (4096 bytes) and the number of scatter/gather | ||||
|  * elements needed, we decided to use 26 as a magic number. | ||||
|  * | ||||
|  * If "feature-sg-grant" is set, more scatter/gather elements can be specified | ||||
|  * by placing them in one or more (up to VSCSIIF_SG_TABLESIZE) granted pages. | ||||
|  * In this case the vscsiif_request seg elements don't contain references to | ||||
|  * the user data, but to the SG elements referencing the user data. | ||||
|  */ | ||||
| #define VSCSIIF_SG_TABLESIZE		26 | ||||
| 
 | ||||
| /*
 | ||||
|  * based on Linux kernel 2.6.18, still valid | ||||
|  * Changing these values requires support of multiple protocols via the rings | ||||
|  * as "old clients" will blindly use these values and the resulting structure | ||||
|  * sizes. | ||||
|  */ | ||||
| #define VSCSIIF_MAX_COMMAND_SIZE	16 | ||||
| #define VSCSIIF_SENSE_BUFFERSIZE	96 | ||||
| 
 | ||||
| struct scsiif_request_segment { | ||||
| 	grant_ref_t gref; | ||||
| 	uint16_t offset; | ||||
| 	uint16_t length; | ||||
| }; | ||||
| 
 | ||||
| #define VSCSIIF_SG_PER_PAGE (PAGE_SIZE / sizeof(struct scsiif_request_segment)) | ||||
| 
 | ||||
| /* Size of one request is 252 bytes */ | ||||
| struct vscsiif_request { | ||||
| 	uint16_t rqid;		/* private guest value, echoed in resp  */ | ||||
| 	uint8_t act;		/* command between backend and frontend */ | ||||
| 	uint8_t cmd_len;	/* valid CDB bytes */ | ||||
| 
 | ||||
| 	uint8_t cmnd[VSCSIIF_MAX_COMMAND_SIZE];	/* the CDB */ | ||||
| 	uint16_t timeout_per_command;	/* deprecated */ | ||||
| 	uint16_t channel, id, lun;	/* (virtual) device specification */ | ||||
| 	uint16_t ref_rqid;		/* command abort reference */ | ||||
| 	uint8_t sc_data_direction;	/* for DMA_TO_DEVICE(1)
 | ||||
| 					   DMA_FROM_DEVICE(2) | ||||
| 					   DMA_NONE(3) requests */ | ||||
| 	uint8_t nr_segments;		/* Number of pieces of scatter-gather */ | ||||
| /*
 | ||||
|  * flag in nr_segments: SG elements via grant page | ||||
|  * | ||||
|  * If VSCSIIF_SG_GRANT is set, the low 7 bits of nr_segments specify the number | ||||
|  * of grant pages containing SG elements. Usable if "feature-sg-grant" set. | ||||
|  */ | ||||
| #define VSCSIIF_SG_GRANT	0x80 | ||||
| 
 | ||||
| 	struct scsiif_request_segment seg[VSCSIIF_SG_TABLESIZE]; | ||||
| 	uint32_t reserved[3]; | ||||
| }; | ||||
| 
 | ||||
| /* Size of one response is 252 bytes */ | ||||
| struct vscsiif_response { | ||||
| 	uint16_t rqid;		/* identifies request */ | ||||
| 	uint8_t padding; | ||||
| 	uint8_t sense_len; | ||||
| 	uint8_t sense_buffer[VSCSIIF_SENSE_BUFFERSIZE]; | ||||
| 	int32_t rslt; | ||||
| 	uint32_t residual_len;	/* request bufflen -
 | ||||
| 				   return the value from physical device */ | ||||
| 	uint32_t reserved[36]; | ||||
| }; | ||||
| 
 | ||||
| DEFINE_RING_TYPES(vscsiif, struct vscsiif_request, struct vscsiif_response); | ||||
| 
 | ||||
| #endif /*__XEN__PUBLIC_IO_SCSI_H__*/ | ||||
							
								
								
									
										50
									
								
								include/xen/interface/io/xenbus.h
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										50
									
								
								include/xen/interface/io/xenbus.h
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,50 @@ | |||
| /*****************************************************************************
 | ||||
|  * xenbus.h | ||||
|  * | ||||
|  * Xenbus protocol details. | ||||
|  * | ||||
|  * Copyright (C) 2005 XenSource Ltd. | ||||
|  */ | ||||
| 
 | ||||
| #ifndef _XEN_PUBLIC_IO_XENBUS_H | ||||
| #define _XEN_PUBLIC_IO_XENBUS_H | ||||
| 
 | ||||
| /* The state of either end of the Xenbus, i.e. the current communication
 | ||||
|    status of initialisation across the bus.  States here imply nothing about | ||||
|    the state of the connection between the driver and the kernel's device | ||||
|    layers.  */ | ||||
| enum xenbus_state | ||||
| { | ||||
| 	XenbusStateUnknown      = 0, | ||||
| 	XenbusStateInitialising = 1, | ||||
| 	XenbusStateInitWait     = 2,  /* Finished early
 | ||||
| 					 initialisation, but waiting | ||||
| 					 for information from the peer | ||||
| 					 or hotplug scripts. */ | ||||
| 	XenbusStateInitialised  = 3,  /* Initialised and waiting for a
 | ||||
| 					 connection from the peer. */ | ||||
| 	XenbusStateConnected    = 4, | ||||
| 	XenbusStateClosing      = 5,  /* The device is being closed
 | ||||
| 					 due to an error or an unplug | ||||
| 					 event. */ | ||||
| 	XenbusStateClosed       = 6, | ||||
| 
 | ||||
| 	/*
 | ||||
| 	* Reconfiguring: The device is being reconfigured. | ||||
| 	*/ | ||||
| 	XenbusStateReconfiguring = 7, | ||||
| 
 | ||||
| 	XenbusStateReconfigured  = 8 | ||||
| }; | ||||
| 
 | ||||
| #endif /* _XEN_PUBLIC_IO_XENBUS_H */ | ||||
| 
 | ||||
| /*
 | ||||
|  * Local variables: | ||||
|  *  c-file-style: "linux" | ||||
|  *  indent-tabs-mode: t | ||||
|  *  c-indent-level: 8 | ||||
|  *  c-basic-offset: 8 | ||||
|  *  tab-width: 8 | ||||
|  * End: | ||||
|  */ | ||||
							
								
								
									
										94
									
								
								include/xen/interface/io/xs_wire.h
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										94
									
								
								include/xen/interface/io/xs_wire.h
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,94 @@ | |||
| /*
 | ||||
|  * Details of the "wire" protocol between Xen Store Daemon and client | ||||
|  * library or guest kernel. | ||||
|  * Copyright (C) 2005 Rusty Russell IBM Corporation | ||||
|  */ | ||||
| 
 | ||||
| #ifndef _XS_WIRE_H | ||||
| #define _XS_WIRE_H | ||||
| 
 | ||||
| enum xsd_sockmsg_type | ||||
| { | ||||
|     XS_DEBUG, | ||||
|     XS_DIRECTORY, | ||||
|     XS_READ, | ||||
|     XS_GET_PERMS, | ||||
|     XS_WATCH, | ||||
|     XS_UNWATCH, | ||||
|     XS_TRANSACTION_START, | ||||
|     XS_TRANSACTION_END, | ||||
|     XS_INTRODUCE, | ||||
|     XS_RELEASE, | ||||
|     XS_GET_DOMAIN_PATH, | ||||
|     XS_WRITE, | ||||
|     XS_MKDIR, | ||||
|     XS_RM, | ||||
|     XS_SET_PERMS, | ||||
|     XS_WATCH_EVENT, | ||||
|     XS_ERROR, | ||||
|     XS_IS_DOMAIN_INTRODUCED, | ||||
|     XS_RESUME, | ||||
|     XS_SET_TARGET, | ||||
|     XS_RESTRICT, | ||||
|     XS_RESET_WATCHES, | ||||
| }; | ||||
| 
 | ||||
| #define XS_WRITE_NONE "NONE" | ||||
| #define XS_WRITE_CREATE "CREATE" | ||||
| #define XS_WRITE_CREATE_EXCL "CREATE|EXCL" | ||||
| 
 | ||||
| /* We hand errors as strings, for portability. */ | ||||
| struct xsd_errors | ||||
| { | ||||
|     int errnum; | ||||
|     const char *errstring; | ||||
| }; | ||||
| #define XSD_ERROR(x) { x, #x } | ||||
| static struct xsd_errors xsd_errors[] __attribute__((unused)) = { | ||||
|     XSD_ERROR(EINVAL), | ||||
|     XSD_ERROR(EACCES), | ||||
|     XSD_ERROR(EEXIST), | ||||
|     XSD_ERROR(EISDIR), | ||||
|     XSD_ERROR(ENOENT), | ||||
|     XSD_ERROR(ENOMEM), | ||||
|     XSD_ERROR(ENOSPC), | ||||
|     XSD_ERROR(EIO), | ||||
|     XSD_ERROR(ENOTEMPTY), | ||||
|     XSD_ERROR(ENOSYS), | ||||
|     XSD_ERROR(EROFS), | ||||
|     XSD_ERROR(EBUSY), | ||||
|     XSD_ERROR(EAGAIN), | ||||
|     XSD_ERROR(EISCONN) | ||||
| }; | ||||
| 
 | ||||
| struct xsd_sockmsg | ||||
| { | ||||
|     uint32_t type;  /* XS_??? */ | ||||
|     uint32_t req_id;/* Request identifier, echoed in daemon's response.  */ | ||||
|     uint32_t tx_id; /* Transaction id (0 if not related to a transaction). */ | ||||
|     uint32_t len;   /* Length of data following this. */ | ||||
| 
 | ||||
|     /* Generally followed by nul-terminated string(s). */ | ||||
| }; | ||||
| 
 | ||||
| enum xs_watch_type | ||||
| { | ||||
|     XS_WATCH_PATH = 0, | ||||
|     XS_WATCH_TOKEN | ||||
| }; | ||||
| 
 | ||||
| /* Inter-domain shared memory communications. */ | ||||
| #define XENSTORE_RING_SIZE 1024 | ||||
| typedef uint32_t XENSTORE_RING_IDX; | ||||
| #define MASK_XENSTORE_IDX(idx) ((idx) & (XENSTORE_RING_SIZE-1)) | ||||
| struct xenstore_domain_interface { | ||||
|     char req[XENSTORE_RING_SIZE]; /* Requests to xenstore daemon. */ | ||||
|     char rsp[XENSTORE_RING_SIZE]; /* Replies and async watch events. */ | ||||
|     XENSTORE_RING_IDX req_cons, req_prod; | ||||
|     XENSTORE_RING_IDX rsp_cons, rsp_prod; | ||||
| }; | ||||
| 
 | ||||
| /* Violating this is very bad.  See docs/misc/xenstore.txt. */ | ||||
| #define XENSTORE_PAYLOAD_MAX 4096 | ||||
| 
 | ||||
| #endif /* _XS_WIRE_H */ | ||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue
	
	 awab228
						awab228