0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #ifndef __LIBCXGB_CM_H__
0034 #define __LIBCXGB_CM_H__
0035
0036
0037 #include <net/tcp.h>
0038
0039 #include <cxgb4.h>
0040 #include <t4_msg.h>
0041 #include <l2t.h>
0042
0043 void
0044 cxgb_get_4tuple(struct cpl_pass_accept_req *, enum chip_type,
0045 int *, __u8 *, __u8 *, __be16 *, __be16 *);
0046 struct dst_entry *
0047 cxgb_find_route(struct cxgb4_lld_info *,
0048 struct net_device *(*)(struct net_device *),
0049 __be32, __be32, __be16, __be16, u8);
0050 struct dst_entry *
0051 cxgb_find_route6(struct cxgb4_lld_info *,
0052 struct net_device *(*)(struct net_device *),
0053 __u8 *, __u8 *, __be16, __be16, u8, __u32);
0054
0055
0056
0057 static inline bool cxgb_is_neg_adv(unsigned int status)
0058 {
0059 return status == CPL_ERR_RTX_NEG_ADVICE ||
0060 status == CPL_ERR_PERSIST_NEG_ADVICE ||
0061 status == CPL_ERR_KEEPALV_NEG_ADVICE;
0062 }
0063
0064 static inline void
0065 cxgb_best_mtu(const unsigned short *mtus, unsigned short mtu,
0066 unsigned int *idx, int use_ts, int ipv6)
0067 {
0068 unsigned short hdr_size = (ipv6 ?
0069 sizeof(struct ipv6hdr) :
0070 sizeof(struct iphdr)) +
0071 sizeof(struct tcphdr) +
0072 (use_ts ?
0073 round_up(TCPOLEN_TIMESTAMP, 4) : 0);
0074 unsigned short data_size = mtu - hdr_size;
0075
0076 cxgb4_best_aligned_mtu(mtus, hdr_size, data_size, 8, idx);
0077 }
0078
0079 static inline u32 cxgb_compute_wscale(u32 win)
0080 {
0081 u32 wscale = 0;
0082
0083 while (wscale < 14 && (65535 << wscale) < win)
0084 wscale++;
0085 return wscale;
0086 }
0087
0088 static inline void
0089 cxgb_mk_tid_release(struct sk_buff *skb, u32 len, u32 tid, u16 chan)
0090 {
0091 struct cpl_tid_release *req;
0092
0093 req = __skb_put_zero(skb, len);
0094
0095 INIT_TP_WR(req, tid);
0096 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
0097 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
0098 }
0099
0100 static inline void
0101 cxgb_mk_close_con_req(struct sk_buff *skb, u32 len, u32 tid, u16 chan,
0102 void *handle, arp_err_handler_t handler)
0103 {
0104 struct cpl_close_con_req *req;
0105
0106 req = __skb_put_zero(skb, len);
0107
0108 INIT_TP_WR(req, tid);
0109 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
0110 set_wr_txq(skb, CPL_PRIORITY_DATA, chan);
0111 t4_set_arp_err_handler(skb, handle, handler);
0112 }
0113
0114 static inline void
0115 cxgb_mk_abort_req(struct sk_buff *skb, u32 len, u32 tid, u16 chan,
0116 void *handle, arp_err_handler_t handler)
0117 {
0118 struct cpl_abort_req *req;
0119
0120 req = __skb_put_zero(skb, len);
0121
0122 INIT_TP_WR(req, tid);
0123 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
0124 req->cmd = CPL_ABORT_SEND_RST;
0125 set_wr_txq(skb, CPL_PRIORITY_DATA, chan);
0126 t4_set_arp_err_handler(skb, handle, handler);
0127 }
0128
0129 static inline void
0130 cxgb_mk_abort_rpl(struct sk_buff *skb, u32 len, u32 tid, u16 chan)
0131 {
0132 struct cpl_abort_rpl *rpl;
0133
0134 rpl = __skb_put_zero(skb, len);
0135
0136 INIT_TP_WR(rpl, tid);
0137 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
0138 rpl->cmd = CPL_ABORT_NO_RST;
0139 set_wr_txq(skb, CPL_PRIORITY_DATA, chan);
0140 }
0141
0142 static inline void
0143 cxgb_mk_rx_data_ack(struct sk_buff *skb, u32 len, u32 tid, u16 chan,
0144 u32 credit_dack)
0145 {
0146 struct cpl_rx_data_ack *req;
0147
0148 req = __skb_put_zero(skb, len);
0149
0150 INIT_TP_WR(req, tid);
0151 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, tid));
0152 req->credit_dack = cpu_to_be32(credit_dack);
0153 set_wr_txq(skb, CPL_PRIORITY_ACK, chan);
0154 }
0155 #endif