![]() |
|
|||
0001 /* SPDX-License-Identifier: MIT */ 0002 /****************************************************************************** 0003 * blkif.h 0004 * 0005 * Unified block-device I/O interface for Xen guest OSes. 0006 * 0007 * Copyright (c) 2003-2004, Keir Fraser 0008 */ 0009 0010 #ifndef __XEN_PUBLIC_IO_BLKIF_H__ 0011 #define __XEN_PUBLIC_IO_BLKIF_H__ 0012 0013 #include <xen/interface/io/ring.h> 0014 #include <xen/interface/grant_table.h> 0015 0016 /* 0017 * Front->back notifications: When enqueuing a new request, sending a 0018 * notification can be made conditional on req_event (i.e., the generic 0019 * hold-off mechanism provided by the ring macros). Backends must set 0020 * req_event appropriately (e.g., using RING_FINAL_CHECK_FOR_REQUESTS()). 0021 * 0022 * Back->front notifications: When enqueuing a new response, sending a 0023 * notification can be made conditional on rsp_event (i.e., the generic 0024 * hold-off mechanism provided by the ring macros). Frontends must set 0025 * rsp_event appropriately (e.g., using RING_FINAL_CHECK_FOR_RESPONSES()). 0026 */ 0027 0028 typedef uint16_t blkif_vdev_t; 0029 typedef uint64_t blkif_sector_t; 0030 0031 /* 0032 * Multiple hardware queues/rings: 0033 * If supported, the backend will write the key "multi-queue-max-queues" to 0034 * the directory for that vbd, and set its value to the maximum supported 0035 * number of queues. 0036 * Frontends that are aware of this feature and wish to use it can write the 0037 * key "multi-queue-num-queues" with the number they wish to use, which must be 0038 * greater than zero, and no more than the value reported by the backend in 0039 * "multi-queue-max-queues". 0040 * 0041 * For frontends requesting just one queue, the usual event-channel and 0042 * ring-ref keys are written as before, simplifying the backend processing 0043 * to avoid distinguishing between a frontend that doesn't understand the 0044 * multi-queue feature, and one that does, but requested only one queue. 0045 * 0046 * Frontends requesting two or more queues must not write the toplevel 0047 * event-channel and ring-ref keys, instead writing those keys under sub-keys 0048 * having the name "queue-N" where N is the integer ID of the queue/ring for 0049 * which those keys belong. Queues are indexed from zero. 0050 * For example, a frontend with two queues must write the following set of 0051 * queue-related keys: 0052 * 0053 * /local/domain/1/device/vbd/0/multi-queue-num-queues = "2" 0054 * /local/domain/1/device/vbd/0/queue-0 = "" 0055 * /local/domain/1/device/vbd/0/queue-0/ring-ref = "<ring-ref#0>" 0056 * /local/domain/1/device/vbd/0/queue-0/event-channel = "<evtchn#0>" 0057 * /local/domain/1/device/vbd/0/queue-1 = "" 0058 * /local/domain/1/device/vbd/0/queue-1/ring-ref = "<ring-ref#1>" 0059 * /local/domain/1/device/vbd/0/queue-1/event-channel = "<evtchn#1>" 0060 * 0061 * It is also possible to use multiple queues/rings together with 0062 * feature multi-page ring buffer. 0063 * For example, a frontend requests two queues/rings and the size of each ring 0064 * buffer is two pages must write the following set of related keys: 0065 * 0066 * /local/domain/1/device/vbd/0/multi-queue-num-queues = "2" 0067 * /local/domain/1/device/vbd/0/ring-page-order = "1" 0068 * /local/domain/1/device/vbd/0/queue-0 = "" 0069 * /local/domain/1/device/vbd/0/queue-0/ring-ref0 = "<ring-ref#0>" 0070 * /local/domain/1/device/vbd/0/queue-0/ring-ref1 = "<ring-ref#1>" 0071 * /local/domain/1/device/vbd/0/queue-0/event-channel = "<evtchn#0>" 0072 * /local/domain/1/device/vbd/0/queue-1 = "" 0073 * /local/domain/1/device/vbd/0/queue-1/ring-ref0 = "<ring-ref#2>" 0074 * /local/domain/1/device/vbd/0/queue-1/ring-ref1 = "<ring-ref#3>" 0075 * /local/domain/1/device/vbd/0/queue-1/event-channel = "<evtchn#1>" 0076 * 0077 */ 0078 0079 /* 0080 * REQUEST CODES. 0081 */ 0082 #define BLKIF_OP_READ 0 0083 #define BLKIF_OP_WRITE 1 0084 /* 0085 * Recognised only if "feature-barrier" is present in backend xenbus info. 0086 * The "feature_barrier" node contains a boolean indicating whether barrier 0087 * requests are likely to succeed or fail. Either way, a barrier request 0088 * may fail at any time with BLKIF_RSP_EOPNOTSUPP if it is unsupported by 0089 * the underlying block-device hardware. The boolean simply indicates whether 0090 * or not it is worthwhile for the frontend to attempt barrier requests. 0091 * If a backend does not recognise BLKIF_OP_WRITE_BARRIER, it should *not* 0092 * create the "feature-barrier" node! 0093 */ 0094 #define BLKIF_OP_WRITE_BARRIER 2 0095 0096 /* 0097 * Recognised if "feature-flush-cache" is present in backend xenbus 0098 * info. A flush will ask the underlying storage hardware to flush its 0099 * non-volatile caches as appropriate. The "feature-flush-cache" node 0100 * contains a boolean indicating whether flush requests are likely to 0101 * succeed or fail. Either way, a flush request may fail at any time 0102 * with BLKIF_RSP_EOPNOTSUPP if it is unsupported by the underlying 0103 * block-device hardware. The boolean simply indicates whether or not it 0104 * is worthwhile for the frontend to attempt flushes. If a backend does 0105 * not recognise BLKIF_OP_WRITE_FLUSH_CACHE, it should *not* create the 0106 * "feature-flush-cache" node! 0107 */ 0108 #define BLKIF_OP_FLUSH_DISKCACHE 3 0109 0110 /* 0111 * Recognised only if "feature-discard" is present in backend xenbus info. 0112 * The "feature-discard" node contains a boolean indicating whether trim 0113 * (ATA) or unmap (SCSI) - conviently called discard requests are likely 0114 * to succeed or fail. Either way, a discard request 0115 * may fail at any time with BLKIF_RSP_EOPNOTSUPP if it is unsupported by 0116 * the underlying block-device hardware. The boolean simply indicates whether 0117 * or not it is worthwhile for the frontend to attempt discard requests. 0118 * If a backend does not recognise BLKIF_OP_DISCARD, it should *not* 0119 * create the "feature-discard" node! 0120 * 0121 * Discard operation is a request for the underlying block device to mark 0122 * extents to be erased. However, discard does not guarantee that the blocks 0123 * will be erased from the device - it is just a hint to the device 0124 * controller that these blocks are no longer in use. What the device 0125 * controller does with that information is left to the controller. 0126 * Discard operations are passed with sector_number as the 0127 * sector index to begin discard operations at and nr_sectors as the number of 0128 * sectors to be discarded. The specified sectors should be discarded if the 0129 * underlying block device supports trim (ATA) or unmap (SCSI) operations, 0130 * or a BLKIF_RSP_EOPNOTSUPP should be returned. 0131 * More information about trim/unmap operations at: 0132 * http://t13.org/Documents/UploadedDocuments/docs2008/ 0133 * e07154r6-Data_Set_Management_Proposal_for_ATA-ACS2.doc 0134 * http://www.seagate.com/staticfiles/support/disc/manuals/ 0135 * Interface%20manuals/100293068c.pdf 0136 * The backend can optionally provide three extra XenBus attributes to 0137 * further optimize the discard functionality: 0138 * 'discard-alignment' - Devices that support discard functionality may 0139 * internally allocate space in units that are bigger than the exported 0140 * logical block size. The discard-alignment parameter indicates how many bytes 0141 * the beginning of the partition is offset from the internal allocation unit's 0142 * natural alignment. 0143 * 'discard-granularity' - Devices that support discard functionality may 0144 * internally allocate space using units that are bigger than the logical block 0145 * size. The discard-granularity parameter indicates the size of the internal 0146 * allocation unit in bytes if reported by the device. Otherwise the 0147 * discard-granularity will be set to match the device's physical block size. 0148 * 'discard-secure' - All copies of the discarded sectors (potentially created 0149 * by garbage collection) must also be erased. To use this feature, the flag 0150 * BLKIF_DISCARD_SECURE must be set in the blkif_request_trim. 0151 */ 0152 #define BLKIF_OP_DISCARD 5 0153 0154 /* 0155 * Recognized if "feature-max-indirect-segments" in present in the backend 0156 * xenbus info. The "feature-max-indirect-segments" node contains the maximum 0157 * number of segments allowed by the backend per request. If the node is 0158 * present, the frontend might use blkif_request_indirect structs in order to 0159 * issue requests with more than BLKIF_MAX_SEGMENTS_PER_REQUEST (11). The 0160 * maximum number of indirect segments is fixed by the backend, but the 0161 * frontend can issue requests with any number of indirect segments as long as 0162 * it's less than the number provided by the backend. The indirect_grefs field 0163 * in blkif_request_indirect should be filled by the frontend with the 0164 * grant references of the pages that are holding the indirect segments. 0165 * These pages are filled with an array of blkif_request_segment that hold the 0166 * information about the segments. The number of indirect pages to use is 0167 * determined by the number of segments an indirect request contains. Every 0168 * indirect page can contain a maximum of 0169 * (PAGE_SIZE / sizeof(struct blkif_request_segment)) segments, so to 0170 * calculate the number of indirect pages to use we have to do 0171 * ceil(indirect_segments / (PAGE_SIZE / sizeof(struct blkif_request_segment))). 0172 * 0173 * If a backend does not recognize BLKIF_OP_INDIRECT, it should *not* 0174 * create the "feature-max-indirect-segments" node! 0175 */ 0176 #define BLKIF_OP_INDIRECT 6 0177 0178 /* 0179 * Maximum scatter/gather segments per request. 0180 * This is carefully chosen so that sizeof(struct blkif_ring) <= PAGE_SIZE. 0181 * NB. This could be 12 if the ring indexes weren't stored in the same page. 0182 */ 0183 #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11 0184 0185 #define BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST 8 0186 0187 struct blkif_request_segment { 0188 grant_ref_t gref; /* reference to I/O buffer frame */ 0189 /* @first_sect: first sector in frame to transfer (inclusive). */ 0190 /* @last_sect: last sector in frame to transfer (inclusive). */ 0191 uint8_t first_sect, last_sect; 0192 }; 0193 0194 struct blkif_request_rw { 0195 uint8_t nr_segments; /* number of segments */ 0196 blkif_vdev_t handle; /* only for read/write requests */ 0197 #ifndef CONFIG_X86_32 0198 uint32_t _pad1; /* offsetof(blkif_request,u.rw.id) == 8 */ 0199 #endif 0200 uint64_t id; /* private guest value, echoed in resp */ 0201 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ 0202 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 0203 } __attribute__((__packed__)); 0204 0205 struct blkif_request_discard { 0206 uint8_t flag; /* BLKIF_DISCARD_SECURE or zero. */ 0207 #define BLKIF_DISCARD_SECURE (1<<0) /* ignored if discard-secure=0 */ 0208 blkif_vdev_t _pad1; /* only for read/write requests */ 0209 #ifndef CONFIG_X86_32 0210 uint32_t _pad2; /* offsetof(blkif_req..,u.discard.id)==8*/ 0211 #endif 0212 uint64_t id; /* private guest value, echoed in resp */ 0213 blkif_sector_t sector_number; 0214 uint64_t nr_sectors; 0215 uint8_t _pad3; 0216 } __attribute__((__packed__)); 0217 0218 struct blkif_request_other { 0219 uint8_t _pad1; 0220 blkif_vdev_t _pad2; /* only for read/write requests */ 0221 #ifndef CONFIG_X86_32 0222 uint32_t _pad3; /* offsetof(blkif_req..,u.other.id)==8*/ 0223 #endif 0224 uint64_t id; /* private guest value, echoed in resp */ 0225 } __attribute__((__packed__)); 0226 0227 struct blkif_request_indirect { 0228 uint8_t indirect_op; 0229 uint16_t nr_segments; 0230 #ifndef CONFIG_X86_32 0231 uint32_t _pad1; /* offsetof(blkif_...,u.indirect.id) == 8 */ 0232 #endif 0233 uint64_t id; 0234 blkif_sector_t sector_number; 0235 blkif_vdev_t handle; 0236 uint16_t _pad2; 0237 grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST]; 0238 #ifndef CONFIG_X86_32 0239 uint32_t _pad3; /* make it 64 byte aligned */ 0240 #else 0241 uint64_t _pad3; /* make it 64 byte aligned */ 0242 #endif 0243 } __attribute__((__packed__)); 0244 0245 struct blkif_request { 0246 uint8_t operation; /* BLKIF_OP_??? */ 0247 union { 0248 struct blkif_request_rw rw; 0249 struct blkif_request_discard discard; 0250 struct blkif_request_other other; 0251 struct blkif_request_indirect indirect; 0252 } u; 0253 } __attribute__((__packed__)); 0254 0255 struct blkif_response { 0256 uint64_t id; /* copied from request */ 0257 uint8_t operation; /* copied from request */ 0258 int16_t status; /* BLKIF_RSP_??? */ 0259 }; 0260 0261 /* 0262 * STATUS RETURN CODES. 0263 */ 0264 /* Operation not supported (only happens on barrier writes). */ 0265 #define BLKIF_RSP_EOPNOTSUPP -2 0266 /* Operation failed for some unspecified reason (-EIO). */ 0267 #define BLKIF_RSP_ERROR -1 0268 /* Operation completed successfully. */ 0269 #define BLKIF_RSP_OKAY 0 0270 0271 /* 0272 * Generate blkif ring structures and types. 0273 */ 0274 0275 DEFINE_RING_TYPES(blkif, struct blkif_request, struct blkif_response); 0276 0277 #define VDISK_CDROM 0x1 0278 #define VDISK_REMOVABLE 0x2 0279 #define VDISK_READONLY 0x4 0280 0281 /* Xen-defined major numbers for virtual disks, they look strangely 0282 * familiar */ 0283 #define XEN_IDE0_MAJOR 3 0284 #define XEN_IDE1_MAJOR 22 0285 #define XEN_SCSI_DISK0_MAJOR 8 0286 #define XEN_SCSI_DISK1_MAJOR 65 0287 #define XEN_SCSI_DISK2_MAJOR 66 0288 #define XEN_SCSI_DISK3_MAJOR 67 0289 #define XEN_SCSI_DISK4_MAJOR 68 0290 #define XEN_SCSI_DISK5_MAJOR 69 0291 #define XEN_SCSI_DISK6_MAJOR 70 0292 #define XEN_SCSI_DISK7_MAJOR 71 0293 #define XEN_SCSI_DISK8_MAJOR 128 0294 #define XEN_SCSI_DISK9_MAJOR 129 0295 #define XEN_SCSI_DISK10_MAJOR 130 0296 #define XEN_SCSI_DISK11_MAJOR 131 0297 #define XEN_SCSI_DISK12_MAJOR 132 0298 #define XEN_SCSI_DISK13_MAJOR 133 0299 #define XEN_SCSI_DISK14_MAJOR 134 0300 #define XEN_SCSI_DISK15_MAJOR 135 0301 0302 #endif /* __XEN_PUBLIC_IO_BLKIF_H__ */
[ Source navigation ] | [ Diff markup ] | [ Identifier search ] | [ general search ] |
This page was automatically generated by the 2.1.0 LXR engine. The LXR team |
![]() ![]() |