Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: (GPL-2.0 OR MIT)
0002  * Google virtual Ethernet (gve) driver
0003  *
0004  * Copyright (C) 2015-2019 Google, Inc.
0005  */
0006 
0007 /* GVE Transmit Descriptor formats */
0008 
0009 #ifndef _GVE_DESC_H_
0010 #define _GVE_DESC_H_
0011 
0012 #include <linux/build_bug.h>
0013 
0014 /* A note on seg_addrs
0015  *
0016  * Base addresses encoded in seg_addr are not assumed to be physical
0017  * addresses. The ring format assumes these come from some linear address
0018  * space. This could be physical memory, kernel virtual memory, user virtual
0019  * memory.
0020  * If raw dma addressing is not supported then gVNIC uses lists of registered
0021  * pages. Each queue is assumed to be associated with a single such linear
0022  * address space to ensure a consistent meaning for seg_addrs posted to its
0023  * rings.
0024  */
0025 
0026 struct gve_tx_pkt_desc {
0027     u8  type_flags;  /* desc type is lower 4 bits, flags upper */
0028     u8  l4_csum_offset;  /* relative offset of L4 csum word */
0029     u8  l4_hdr_offset;  /* Offset of start of L4 headers in packet */
0030     u8  desc_cnt;  /* Total descriptors for this packet */
0031     __be16  len;  /* Total length of this packet (in bytes) */
0032     __be16  seg_len;  /* Length of this descriptor's segment */
0033     __be64  seg_addr;  /* Base address (see note) of this segment */
0034 } __packed;
0035 
0036 struct gve_tx_mtd_desc {
0037     u8      type_flags;     /* type is lower 4 bits, subtype upper  */
0038     u8      path_state;     /* state is lower 4 bits, hash type upper */
0039     __be16  reserved0;
0040     __be32  path_hash;
0041     __be64  reserved1;
0042 } __packed;
0043 
0044 struct gve_tx_seg_desc {
0045     u8  type_flags; /* type is lower 4 bits, flags upper    */
0046     u8  l3_offset;  /* TSO: 2 byte units to start of IPH    */
0047     __be16  reserved;
0048     __be16  mss;        /* TSO MSS              */
0049     __be16  seg_len;
0050     __be64  seg_addr;
0051 } __packed;
0052 
0053 /* GVE Transmit Descriptor Types */
0054 #define GVE_TXD_STD     (0x0 << 4) /* Std with Host Address */
0055 #define GVE_TXD_TSO     (0x1 << 4) /* TSO with Host Address */
0056 #define GVE_TXD_SEG     (0x2 << 4) /* Seg with Host Address */
0057 #define GVE_TXD_MTD     (0x3 << 4) /* Metadata          */
0058 
0059 /* GVE Transmit Descriptor Flags for Std Pkts */
0060 #define GVE_TXF_L4CSUM  BIT(0)  /* Need csum offload */
0061 #define GVE_TXF_TSTAMP  BIT(2)  /* Timestamp required */
0062 
0063 /* GVE Transmit Descriptor Flags for TSO Segs */
0064 #define GVE_TXSF_IPV6   BIT(1)  /* IPv6 TSO */
0065 
0066 /* GVE Transmit Descriptor Options for MTD Segs */
0067 #define GVE_MTD_SUBTYPE_PATH        0
0068 
0069 #define GVE_MTD_PATH_STATE_DEFAULT  0
0070 #define GVE_MTD_PATH_STATE_TIMEOUT  1
0071 #define GVE_MTD_PATH_STATE_CONGESTION   2
0072 #define GVE_MTD_PATH_STATE_RETRANSMIT   3
0073 
0074 #define GVE_MTD_PATH_HASH_NONE         (0x0 << 4)
0075 #define GVE_MTD_PATH_HASH_L4           (0x1 << 4)
0076 
0077 /* GVE Receive Packet Descriptor */
0078 /* The start of an ethernet packet comes 2 bytes into the rx buffer.
0079  * gVNIC adds this padding so that both the DMA and the L3/4 protocol header
0080  * access is aligned.
0081  */
0082 #define GVE_RX_PAD 2
0083 
0084 struct gve_rx_desc {
0085     u8  padding[48];
0086     __be32  rss_hash;  /* Receive-side scaling hash (Toeplitz for gVNIC) */
0087     __be16  mss;
0088     __be16  reserved;  /* Reserved to zero */
0089     u8  hdr_len;  /* Header length (L2-L4) including padding */
0090     u8  hdr_off;  /* 64-byte-scaled offset into RX_DATA entry */
0091     __sum16 csum;  /* 1's-complement partial checksum of L3+ bytes */
0092     __be16  len;  /* Length of the received packet */
0093     __be16  flags_seq;  /* Flags [15:3] and sequence number [2:0] (1-7) */
0094 } __packed;
0095 static_assert(sizeof(struct gve_rx_desc) == 64);
0096 
0097 /* If the device supports raw dma addressing then the addr in data slot is
0098  * the dma address of the buffer.
0099  * If the device only supports registered segments then the addr is a byte
0100  * offset into the registered segment (an ordered list of pages) where the
0101  * buffer is.
0102  */
0103 union gve_rx_data_slot {
0104     __be64 qpl_offset;
0105     __be64 addr;
0106 };
0107 
0108 /* GVE Recive Packet Descriptor Seq No */
0109 #define GVE_SEQNO(x) (be16_to_cpu(x) & 0x7)
0110 
0111 /* GVE Recive Packet Descriptor Flags */
0112 #define GVE_RXFLG(x)    cpu_to_be16(1 << (3 + (x)))
0113 #define GVE_RXF_FRAG        GVE_RXFLG(3)    /* IP Fragment          */
0114 #define GVE_RXF_IPV4        GVE_RXFLG(4)    /* IPv4             */
0115 #define GVE_RXF_IPV6        GVE_RXFLG(5)    /* IPv6             */
0116 #define GVE_RXF_TCP     GVE_RXFLG(6)    /* TCP Packet           */
0117 #define GVE_RXF_UDP     GVE_RXFLG(7)    /* UDP Packet           */
0118 #define GVE_RXF_ERR     GVE_RXFLG(8)    /* Packet Error Detected    */
0119 #define GVE_RXF_PKT_CONT    GVE_RXFLG(10)   /* Multi Fragment RX packet */
0120 
0121 /* GVE IRQ */
0122 #define GVE_IRQ_ACK BIT(31)
0123 #define GVE_IRQ_MASK    BIT(30)
0124 #define GVE_IRQ_EVENT   BIT(29)
0125 
0126 static inline bool gve_needs_rss(__be16 flag)
0127 {
0128     if (flag & GVE_RXF_FRAG)
0129         return false;
0130     if (flag & (GVE_RXF_IPV4 | GVE_RXF_IPV6))
0131         return true;
0132     return false;
0133 }
0134 
0135 static inline u8 gve_next_seqno(u8 seq)
0136 {
0137     return (seq + 1) == 8 ? 1 : seq + 1;
0138 }
0139 #endif /* _GVE_DESC_H_ */