0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027 #ifndef __XEN_NETBACK__COMMON_H__
0028 #define __XEN_NETBACK__COMMON_H__
0029
0030 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
0031
0032 #include <linux/module.h>
0033 #include <linux/interrupt.h>
0034 #include <linux/slab.h>
0035 #include <linux/ip.h>
0036 #include <linux/in.h>
0037 #include <linux/io.h>
0038 #include <linux/netdevice.h>
0039 #include <linux/etherdevice.h>
0040 #include <linux/wait.h>
0041 #include <linux/sched.h>
0042
0043 #include <xen/interface/io/netif.h>
0044 #include <xen/interface/grant_table.h>
0045 #include <xen/grant_table.h>
0046 #include <xen/xenbus.h>
0047 #include <xen/page.h>
0048 #include <linux/debugfs.h>
0049
0050 typedef unsigned int pending_ring_idx_t;
0051
0052 struct pending_tx_info {
0053 struct xen_netif_tx_request req;
0054 unsigned int extra_count;
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065 struct ubuf_info callback_struct;
0066 };
0067
0068 #define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE)
0069 #define XEN_NETIF_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE)
0070
0071 struct xenvif_rx_meta {
0072 int id;
0073 int size;
0074 int gso_type;
0075 int gso_size;
0076 };
0077
0078 #define GSO_BIT(type) \
0079 (1 << XEN_NETIF_GSO_TYPE_ ## type)
0080
0081
0082 #define INVALID_PENDING_IDX 0xFFFF
0083
0084 #define MAX_PENDING_REQS XEN_NETIF_TX_RING_SIZE
0085
0086
0087
0088
0089 #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
0090
0091 #define NETBACK_INVALID_HANDLE -1
0092
0093
0094
0095
0096
0097
0098 #define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
0099
0100
0101 #define QUEUE_NAME_SIZE (IFNAMSIZ + 5)
0102
0103
0104 #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
0105
0106 struct xenvif;
0107
0108 struct xenvif_stats {
0109
0110
0111
0112
0113 u64 rx_bytes;
0114 u64 rx_packets;
0115 u64 tx_bytes;
0116 u64 tx_packets;
0117
0118
0119 unsigned long rx_gso_checksum_fixup;
0120 unsigned long tx_zerocopy_sent;
0121 unsigned long tx_zerocopy_success;
0122 unsigned long tx_zerocopy_fail;
0123 unsigned long tx_frag_overflow;
0124 };
0125
0126 #define COPY_BATCH_SIZE 64
0127
0128 struct xenvif_copy_state {
0129 struct gnttab_copy op[COPY_BATCH_SIZE];
0130 RING_IDX idx[COPY_BATCH_SIZE];
0131 unsigned int num;
0132 struct sk_buff_head *completed;
0133 };
0134
0135 struct xenvif_queue {
0136 unsigned int id;
0137 char name[QUEUE_NAME_SIZE];
0138 struct xenvif *vif;
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149 atomic_t eoi_pending;
0150 #define NETBK_RX_EOI 0x01
0151 #define NETBK_TX_EOI 0x02
0152 #define NETBK_COMMON_EOI 0x04
0153
0154
0155 struct napi_struct napi;
0156
0157 unsigned int tx_irq;
0158
0159 char tx_irq_name[IRQ_NAME_SIZE];
0160 struct xen_netif_tx_back_ring tx;
0161 struct sk_buff_head tx_queue;
0162 struct page *mmap_pages[MAX_PENDING_REQS];
0163 pending_ring_idx_t pending_prod;
0164 pending_ring_idx_t pending_cons;
0165 u16 pending_ring[MAX_PENDING_REQS];
0166 struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
0167 grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
0168
0169 struct gnttab_copy tx_copy_ops[MAX_PENDING_REQS];
0170 struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS];
0171 struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS];
0172
0173 struct page *pages_to_map[MAX_PENDING_REQS];
0174 struct page *pages_to_unmap[MAX_PENDING_REQS];
0175
0176
0177 spinlock_t callback_lock;
0178
0179
0180
0181
0182 spinlock_t response_lock;
0183 pending_ring_idx_t dealloc_prod;
0184 pending_ring_idx_t dealloc_cons;
0185 u16 dealloc_ring[MAX_PENDING_REQS];
0186 struct task_struct *dealloc_task;
0187 wait_queue_head_t dealloc_wq;
0188 atomic_t inflight_packets;
0189
0190
0191 struct task_struct *task;
0192 wait_queue_head_t wq;
0193
0194 unsigned int rx_irq;
0195
0196 char rx_irq_name[IRQ_NAME_SIZE];
0197 struct xen_netif_rx_back_ring rx;
0198 struct sk_buff_head rx_queue;
0199
0200 unsigned int rx_queue_max;
0201 unsigned int rx_queue_len;
0202 unsigned long last_rx_time;
0203 unsigned int rx_slots_needed;
0204 bool stalled;
0205
0206 struct xenvif_copy_state rx_copy;
0207
0208
0209 unsigned long credit_bytes;
0210 unsigned long credit_usec;
0211 unsigned long remaining_credit;
0212 struct timer_list credit_timeout;
0213 u64 credit_window_start;
0214 bool rate_limited;
0215
0216
0217 struct xenvif_stats stats;
0218 };
0219
0220 enum state_bit_shift {
0221
0222 VIF_STATUS_CONNECTED,
0223 };
0224
0225 struct xenvif_mcast_addr {
0226 struct list_head entry;
0227 struct rcu_head rcu;
0228 u8 addr[6];
0229 };
0230
0231 #define XEN_NETBK_MCAST_MAX 64
0232
0233 #define XEN_NETBK_MAX_HASH_KEY_SIZE 40
0234 #define XEN_NETBK_MAX_HASH_MAPPING_SIZE 128
0235 #define XEN_NETBK_HASH_TAG_SIZE 40
0236
0237 struct xenvif_hash_cache_entry {
0238 struct list_head link;
0239 struct rcu_head rcu;
0240 u8 tag[XEN_NETBK_HASH_TAG_SIZE];
0241 unsigned int len;
0242 u32 val;
0243 int seq;
0244 };
0245
0246 struct xenvif_hash_cache {
0247 spinlock_t lock;
0248 struct list_head list;
0249 unsigned int count;
0250 atomic_t seq;
0251 };
0252
0253 struct xenvif_hash {
0254 unsigned int alg;
0255 u32 flags;
0256 bool mapping_sel;
0257 u8 key[XEN_NETBK_MAX_HASH_KEY_SIZE];
0258 u32 mapping[2][XEN_NETBK_MAX_HASH_MAPPING_SIZE];
0259 unsigned int size;
0260 struct xenvif_hash_cache cache;
0261 };
0262
0263 struct backend_info {
0264 struct xenbus_device *dev;
0265 struct xenvif *vif;
0266
0267
0268
0269
0270 enum xenbus_state state;
0271
0272 enum xenbus_state frontend_state;
0273 struct xenbus_watch hotplug_status_watch;
0274 u8 have_hotplug_status_watch:1;
0275
0276 const char *hotplug_script;
0277 };
0278
0279 struct xenvif {
0280
0281 domid_t domid;
0282 unsigned int handle;
0283
0284 u8 fe_dev_addr[6];
0285 struct list_head fe_mcast_addr;
0286 unsigned int fe_mcast_count;
0287
0288
0289 int gso_mask;
0290
0291 u8 can_sg:1;
0292 u8 ip_csum:1;
0293 u8 ipv6_csum:1;
0294 u8 multicast_control:1;
0295
0296
0297 u16 xdp_headroom;
0298
0299
0300
0301
0302 bool disabled;
0303 unsigned long status;
0304 unsigned long drain_timeout;
0305 unsigned long stall_timeout;
0306
0307
0308 struct xenvif_queue *queues;
0309 unsigned int num_queues;
0310 unsigned int stalled_queues;
0311
0312 struct xenvif_hash hash;
0313
0314 struct xenbus_watch credit_watch;
0315 struct xenbus_watch mcast_ctrl_watch;
0316
0317 struct backend_info *be;
0318
0319 spinlock_t lock;
0320
0321 #ifdef CONFIG_DEBUG_FS
0322 struct dentry *xenvif_dbg_root;
0323 #endif
0324
0325 struct xen_netif_ctrl_back_ring ctrl;
0326 unsigned int ctrl_irq;
0327
0328
0329 struct net_device *dev;
0330 };
0331
0332 struct xenvif_rx_cb {
0333 unsigned long expires;
0334 int meta_slots_used;
0335 };
0336
0337 #define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
0338
0339 static inline struct xenbus_device *xenvif_to_xenbus_device(struct xenvif *vif)
0340 {
0341 return to_xenbus_device(vif->dev->dev.parent);
0342 }
0343
0344 void xenvif_tx_credit_callback(struct timer_list *t);
0345
0346 struct xenvif *xenvif_alloc(struct device *parent,
0347 domid_t domid,
0348 unsigned int handle);
0349
0350 int xenvif_init_queue(struct xenvif_queue *queue);
0351 void xenvif_deinit_queue(struct xenvif_queue *queue);
0352
0353 int xenvif_connect_data(struct xenvif_queue *queue,
0354 unsigned long tx_ring_ref,
0355 unsigned long rx_ring_ref,
0356 unsigned int tx_evtchn,
0357 unsigned int rx_evtchn);
0358 void xenvif_disconnect_data(struct xenvif *vif);
0359 int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
0360 unsigned int evtchn);
0361 void xenvif_disconnect_ctrl(struct xenvif *vif);
0362 void xenvif_free(struct xenvif *vif);
0363
0364 int xenvif_xenbus_init(void);
0365 void xenvif_xenbus_fini(void);
0366
0367
0368 void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue);
0369 int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
0370 grant_ref_t tx_ring_ref,
0371 grant_ref_t rx_ring_ref);
0372
0373
0374 void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue);
0375
0376
0377 void xenvif_carrier_off(struct xenvif *vif);
0378
0379 int xenvif_tx_action(struct xenvif_queue *queue, int budget);
0380
0381 int xenvif_kthread_guest_rx(void *data);
0382 void xenvif_kick_thread(struct xenvif_queue *queue);
0383
0384 int xenvif_dealloc_kthread(void *data);
0385
0386 irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data);
0387
0388 bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread);
0389 void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
0390
0391 void xenvif_carrier_on(struct xenvif *vif);
0392
0393
0394 void xenvif_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *ubuf,
0395 bool zerocopy_success);
0396
0397 static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue)
0398 {
0399 return MAX_PENDING_REQS -
0400 queue->pending_prod + queue->pending_cons;
0401 }
0402
0403 irqreturn_t xenvif_interrupt(int irq, void *dev_id);
0404
0405 extern bool separate_tx_rx_irq;
0406 extern bool provides_xdp_headroom;
0407
0408 extern unsigned int rx_drain_timeout_msecs;
0409 extern unsigned int rx_stall_timeout_msecs;
0410 extern unsigned int xenvif_max_queues;
0411 extern unsigned int xenvif_hash_cache_size;
0412
0413 #ifdef CONFIG_DEBUG_FS
0414 extern struct dentry *xen_netback_dbg_root;
0415 #endif
0416
0417 void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
0418 struct sk_buff *skb);
0419 void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue);
0420
0421
0422 bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr);
0423 void xenvif_mcast_addr_list_free(struct xenvif *vif);
0424
0425
0426 void xenvif_init_hash(struct xenvif *vif);
0427 void xenvif_deinit_hash(struct xenvif *vif);
0428
0429 u32 xenvif_set_hash_alg(struct xenvif *vif, u32 alg);
0430 u32 xenvif_get_hash_flags(struct xenvif *vif, u32 *flags);
0431 u32 xenvif_set_hash_flags(struct xenvif *vif, u32 flags);
0432 u32 xenvif_set_hash_key(struct xenvif *vif, u32 gref, u32 len);
0433 u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size);
0434 u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
0435 u32 off);
0436
0437 void xenvif_set_skb_hash(struct xenvif *vif, struct sk_buff *skb);
0438
0439 #ifdef CONFIG_DEBUG_FS
0440 void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m);
0441 #endif
0442
0443 #endif